repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
val_in_trans_fns = prj_conf.val_input_trans_fns \
if hasattr(prj_conf, 'val_input_trans_fns') else None
val_out_trans_fns = prj_conf.val_output_trans_fns \
if hasattr(prj_conf, 'val_output_trans_fns') else None
if val_in_trans_fns is None:
val_in_trans_fns = in_trans_fns
if val_out_trans_fns is None:
val_out_trans_fns = out_trans_fns
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if hasattr(prj_conf, 'val_input_dirs'):
val_input_dirs = prj_conf.val_input_dirs
else:
val_input_dirs = prj_conf.input_dirs
if hasattr(prj_conf, 'val_output_dirs'):
val_output_dirs = prj_conf.val_output_dirs
else:
val_output_dirs = prj_conf.output_dirs
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
val_input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
val_output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = val_in_trans_fns,
output_augment_funcs = val_out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 8,626 | 36.34632 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_4.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn_bona']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, we only load bonafide through default data IO
# then, (paired) spoofed data will be loaded through data augmentation function
trn_list = [tmp + '/scp/train_bonafide.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = tmp + '/train_dev'
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = tmp + '/protocol.txt'
# configuration to use Pandas to parse the protocol
protocol_cols = ['voice', 'trial', '-', 'attack', 'label']
# length to truncate the waveform
trim_len = 64000
# load protocol
protocol_pd = pd.read_csv(protocol_path, sep = ' ', names = protocol_cols)
# how many number of spoofed trials exist for every bonafide
# this toy dataset has four vocoded (spoofed) versions of each bonafide trial
num_spoofed_trials = 4
# where this database provided bonafide and spoofed pairs?
flag_database_aligned = False
def spoof_name_loader(bona_name,
wav_path=wav_path,
data_pd = protocol_pd,
num_spoofed = num_spoofed_trials,
flag_database_aligned = flag_database_aligned):
"""
"""
# get the spoofed data list that contains the name of the bona fide
# spoofs can be empty for non-aligned database, in that case,
if flag_database_aligned:
# if aligned we assume the spoofed and bonafide audio file names have
# common part, thus, we can select those trials from the list
# in toy dataset
# bonafide trial has name LA_X_YYYY
# spoofed trials are named as vocoder1_LA_X_YYYY, vocoder2_LA_X_YYYY
idx = data_pd.query('label == "spoof"')['trial'].str.contains(bona_name)
spoofs = data_pd.query('label == "spoof"')[idx]['trial'].to_list()
if len(spoofs) == 0:
print("Warn: cannot find spoofed data for {:s}".format(bona_name))
print("Possible reason: {:s} is spoofed trial".format(bona_name))
print("How-to-resolve: make sure that trn_list={:s}".format(
str(trn_list)
))
print(" in config only contains bona fide data")
else:
# if not aligned, we randomply sample spoofed trials
idx = data_pd.query('label == "spoof"')['trial'].str.contains('LA_T')
tmp_pd = data_pd.query('label == "spoof"')[idx].sample(n=num_spoofed)
spoofs = tmp_pd['trial'].to_list()
spoof_paths = []
for spoof_name in spoofs:
filepath = wav_path + '/' + spoof_name + '.wav'
if not os.path.isfile(filepath):
print("Cannot find {:s}".format(filepath))
else:
spoof_paths.append(filepath)
return spoof_paths
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 9,125 | 34.235521 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_6.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn_bona']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, we only load bonafide through default data IO
# then, (paired) spoofed data will be loaded through data augmentation function
trn_list = [tmp + '/scp/train_bonafide.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = tmp + '/train_dev'
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = tmp + '/protocol.txt'
# configuration to use Pandas to parse the protocol
protocol_cols = ['voice', 'trial', '-', 'attack', 'label']
# length to truncate the waveform
trim_len = 64000
# load protocol
protocol_pd = pd.read_csv(protocol_path, sep = ' ', names = protocol_cols)
# how many number of spoofed trials exist for every bonafide
# this toy dataset has four vocoded (spoofed) versions of each bonafide trial
num_spoofed_trials = 4
# where this database provided bonafide and spoofed pairs?
flag_database_aligned = False
def spoof_name_loader(bona_name,
wav_path=wav_path,
data_pd = protocol_pd,
num_spoofed = num_spoofed_trials,
flag_database_aligned = flag_database_aligned):
"""
"""
# get the spoofed data list that contains the name of the bona fide
# spoofs can be empty for non-aligned database, in that case,
if flag_database_aligned:
# if aligned we assume the spoofed and bonafide audio file names have
# common part, thus, we can select those trials from the list
# in toy dataset
# bonafide trial has name LA_X_YYYY
# spoofed trials are named as vocoder1_LA_X_YYYY, vocoder2_LA_X_YYYY
idx = data_pd.query('label == "spoof"')['trial'].str.contains(bona_name)
spoofs = data_pd.query('label == "spoof"')[idx]['trial'].to_list()
if len(spoofs) == 0:
print("Warn: cannot find spoofed data for {:s}".format(bona_name))
print("Possible reason: {:s} is spoofed trial".format(bona_name))
print("How-to-resolve: make sure that trn_list={:s}".format(
str(trn_list)
))
print(" in config only contains bona fide data")
else:
# if not aligned, we randomply sample spoofed trials
idx = data_pd.query('label == "spoof"')['trial'].str.contains('LA_T')
tmp_pd = data_pd.query('label == "spoof"')[idx].sample(n=num_spoofed)
spoofs = tmp_pd['trial'].to_list()
spoof_paths = []
for spoof_name in spoofs:
filepath = wav_path + '/' + spoof_name + '.wav'
if not os.path.isfile(filepath):
print("Cannot find {:s}".format(filepath))
else:
spoof_paths.append(filepath)
return spoof_paths
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 9,125 | 34.235521 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_7.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn_bona']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, we only load bonafide through default data IO
# then, (paired) spoofed data will be loaded through data augmentation function
trn_list = [tmp + '/scp/train_bonafide.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = tmp + '/train_dev'
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = tmp + '/protocol.txt'
# configuration to use Pandas to parse the protocol
protocol_cols = ['voice', 'trial', '-', 'attack', 'label']
# length to truncate the waveform
trim_len = 64000
# load protocol
protocol_pd = pd.read_csv(protocol_path, sep = ' ', names = protocol_cols)
# how many number of spoofed trials exist for every bonafide
# this toy dataset has four vocoded (spoofed) versions of each bonafide trial
num_spoofed_trials = 4
# where this database provided bonafide and spoofed pairs?
flag_database_aligned = True
def spoof_name_loader(bona_name,
wav_path=wav_path,
data_pd = protocol_pd,
num_spoofed = num_spoofed_trials,
flag_database_aligned = flag_database_aligned):
"""
"""
# get the spoofed data list that contains the name of the bona fide
# spoofs can be empty for non-aligned database, in that case,
if flag_database_aligned:
# if aligned we assume the spoofed and bonafide audio file names have
# common part, thus, we can select those trials from the list
# in toy dataset
# bonafide trial has name LA_X_YYYY
# spoofed trials are named as vocoder1_LA_X_YYYY, vocoder2_LA_X_YYYY
idx = data_pd.query('label == "spoof"')['trial'].str.contains(bona_name)
spoofs = data_pd.query('label == "spoof"')[idx]['trial'].to_list()
if len(spoofs) == 0:
print("Warn: cannot find spoofed data for {:s}".format(bona_name))
print("Possible reason: {:s} is spoofed trial".format(bona_name))
print("How-to-resolve: make sure that trn_list={:s}".format(
str(trn_list)
))
print(" in config only contains bona fide data")
else:
# if not aligned, we randomply sample spoofed trials
idx = data_pd.query('label == "spoof"')['trial'].str.contains('LA_T')
tmp_pd = data_pd.query('label == "spoof"')[idx].sample(n=num_spoofed)
spoofs = tmp_pd['trial'].to_list()
spoof_paths = []
for spoof_name in spoofs:
filepath = wav_path + '/' + spoof_name + '.wav'
if not os.path.isfile(filepath):
print("Cannot find {:s}".format(filepath))
else:
spoof_paths.append(filepath)
return spoof_paths
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 9,124 | 34.23166 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_auto.py | #!/usr/bin/env python
"""
config.py
This configuration file will read environment variables
for configuration. it is used for scoring
It assumes that input data will be waveform files (*.wav)
No need to change settings here
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
trn_set_name = ['']
val_set_name = ['']
trn_list = ['']
val_list = ['']
input_dirs = [['']]
input_dims = [1]
input_exts = ['.wav']
input_reso = [1]
input_norm = [False]
output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
# ASVspoof uses 16000 Hz
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# For ASVspoof, we don't do truncate here
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
# For ASVspoof, we don't set minimum length of input trial
minimum_len = None
# Optional argument
# We will use this optional_argument to read protocol file
# When evaluating on a eval set without protocol file, set this to ['']
optional_argument = ['']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
# We need this because the model definition is written in
# this file.
# Its weight will be overwritten by the trained CM.
#
# path to the SSL model. It will be loaded as front-end
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
ssl_front_end_out_dim = 1024
#########################################################
## Configuration for inference stage
#########################################################
# We set environment variables
# No need to change
test_set_name = [os.getenv('TEMP_DATA_NAME')]
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [test_set_name[0] + '.lst']
# Directories for input features
# input_dirs = [[path_of_feature_1, path_of_feature_2, ..., ]]
# directory of the evaluation set waveform
test_input_dirs = [[os.getenv('TEMP_DATA_DIR')]]
# Directories for output features, which are [[]]
test_output_dirs = [[]]
| 2,644 | 26.268041 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-2/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = torch_nn.CrossEntropyLoss()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target)
# feature loss
if self.m_feat:
# not implemented for baseline method
return loss_ce
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,097 | 33.018797 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-2/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=True)[0]
return input_data
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 1,311 | 29.511628 | 72 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-6/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = nii_loss_metric.MixUpCE()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 1.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x_tuple, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be [input_tensor, target_1, target_2, gamma_list]
x = x_tuple[0]
tar1 = x_tuple[1]
tar2 = x_tuple[2]
gamma = x_tuple[3]
feat_class = x_tuple[4]
# input_tensor will be (batchsize, length, batch size in data_augment)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_view = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target1 = tar1.flatten().to(torch.long)
target2 = tar2.flatten().to(torch.long)
gamma = gamma.flatten()
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target1, target2, gamma)
# feature loss
if self.m_feat:
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(
bat_siz, num_sys, num_view, -1, feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_view, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx], feat_class[bat_idx])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx], feat_class[bat_idx])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,709 | 33.142336 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-6/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def load_spoof_trials(bona_name, spoof_loader):
"""spoof_audios = load_spoof_utterance
"""
# load the spoofed data
spoofs = spoof_loader(bona_name)
spoof_list = []
for filepath in spoofs:
if os.path.isfile(filepath):
_, data = nii_wav_tools.waveReadAsFloat(filepath)
else:
print("Cannot find {:s}".format(filepath))
sys.exit(1)
# make the shape (length, 1)
spoof_list.append(np.expand_dims(data, axis=1))
return spoof_list
def mixup_wav(input_wav_list, target_list, alpha, beta, mixup_method='wav'):
"""
"""
# output buffer
out_buf, out_tar1, out_tar2, out_gamma = [], [], [], []
# assume input_wav_list[0] is bonafide
bona_idx = 0
bona_wav = input_wav_list[bona_idx]
# mixed bonafide and spoofed
for spoof_idx, spoof_wav in enumerate(input_wav_list):
if spoof_idx == 0:
# this is bonafide
continue
else:
# mixup
gamma = np.random.beta(alpha, beta)
mixed_wav = nii_wav_aug.morph_wavform(
bona_wav, spoof_wav, gamma, method=mixup_method)
out_buf.append(mixed_wav)
out_tar1.append(target_list[bona_idx])
out_tar2.append(target_list[spoof_idx])
out_gamma.append(gamma)
# return
return out_buf, out_tar1, out_tar2, out_gamma
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length,
spoof_loader):
# load spoofed trials
spoof_trials = load_spoof_trials(
name_info.seq_name, spoof_loader)
# use this API to randomly trim the input length
batch_data = [input_data] + spoof_trials
batch_data = nii_wav_aug.batch_pad_for_multiview(
batch_data, wav_samp_rate, length, random_trim_nosil=True)
# first target is for bonafide
# the rest are for spoofed
orig_target = [0] * len(batch_data)
orig_target[0] = 1
# rawboost to create multiple view data
aug_list = [nii_wav_aug.RawBoostWrapper12(x) for x in batch_data]
# to be compatible with the rest of the code,
tar_list_1, tar_list_2 = orig_target, orig_target
# gamma_list=1.0 will be equivalent to normal CE
gamma_list = [1.0] * len(batch_data)
# assign bonafide and spoofed data to different feature classes
# for supervised contrastive learning loss
new_feat_class = [2] * len(batch_data)
new_feat_class[0] = 1
# merge the original data and mixed data
new_wav_list = [np.concatenate(batch_data, axis=1),
np.concatenate(aug_list, axis=1)]
new_tar_list_1 = [np.array(orig_target), np.array(tar_list_1)]
new_tar_list_2 = [np.array(orig_target), np.array(tar_list_2)]
new_gamma_list = [np.array([1.0] * len(batch_data)), np.array(gamma_list)]
# waveform stack to (length, num_of_wav, num_of_aug)
output = np.stack(new_wav_list, axis=2)
# label stack to (num_of_wav, num_of_aug)
new_tar_1 = np.stack(new_tar_list_1, axis=1)
new_tar_2 = np.stack(new_tar_list_2, axis=1)
# gamma the same shape as label
new_gamma = np.stack(new_gamma_list, axis=1)
# (num_of_wav)
new_feat_class = np.array(new_feat_class)
return [output, new_tar_1, new_tar_2, new_gamma,
new_feat_class, new_feat_class]
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length,
spoof_name_loader):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 4,445 | 31.452555 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-4/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = nii_loss_metric.MixUpCE()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x_tuple, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be [input_tensor, target_1, target_2, gamma_list]
x = x_tuple[0]
tar1 = x_tuple[1]
tar2 = x_tuple[2]
gamma = x_tuple[3]
feat_class = x_tuple[4]
# input_tensor will be (batchsize, length, batch size in data_augment)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_view = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target1 = tar1.flatten().to(torch.long)
target2 = tar2.flatten().to(torch.long)
gamma = gamma.flatten()
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target1, target2, gamma)
# feature loss
if self.m_feat:
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(
bat_siz, num_sys, num_view, -1, feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_view, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx], feat_class[bat_idx])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx], feat_class[bat_idx])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,709 | 33.142336 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-4/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def load_spoof_trials(bona_name, spoof_loader):
"""spoof_audios = load_spoof_utterance
"""
# load the spoofed data
spoofs = spoof_loader(bona_name)
spoof_list = []
for filepath in spoofs:
if os.path.isfile(filepath):
_, data = nii_wav_tools.waveReadAsFloat(filepath)
else:
print("Cannot find {:s}".format(filepath))
sys.exit(1)
# make the shape (length, 1)
spoof_list.append(np.expand_dims(data, axis=1))
return spoof_list
def mixup_wav(input_wav_list, target_list, alpha, beta, mixup_method='wav'):
"""
"""
# output buffer
out_buf, out_tar1, out_tar2, out_gamma = [], [], [], []
# assume input_wav_list[0] is bonafide
bona_idx = 0
bona_wav = input_wav_list[bona_idx]
# mixed bonafide and spoofed
for spoof_idx, spoof_wav in enumerate(input_wav_list):
if spoof_idx == 0:
# this is bonafide
continue
else:
# mixup
gamma = np.random.beta(alpha, beta)
mixed_wav = nii_wav_aug.morph_wavform(
bona_wav, spoof_wav, gamma, method=mixup_method)
out_buf.append(mixed_wav)
out_tar1.append(target_list[bona_idx])
out_tar2.append(target_list[spoof_idx])
out_gamma.append(gamma)
# return
return out_buf, out_tar1, out_tar2, out_gamma
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length,
spoof_loader):
# load spoofed trials
spoof_trials = load_spoof_trials(
name_info.seq_name, spoof_loader)
# use this API to randomly trim the input length
batch_data = [input_data] + spoof_trials
batch_data = nii_wav_aug.batch_pad_for_multiview(
batch_data, wav_samp_rate, length, random_trim_nosil=True)
# first target is for bonafide
# the rest are for spoofed
orig_target = [0] * len(batch_data)
orig_target[0] = 1
# rawboost to create multiple view data
aug_list = [nii_wav_aug.RawBoostWrapper12(x) for x in batch_data]
# to be compatible with the rest of the code,
tar_list_1, tar_list_2 = orig_target, orig_target
# gamma_list=1.0 will be equivalent to normal CE
gamma_list = [1.0] * len(batch_data)
# assign bonafide and spoofed data to different feature classes
# for supervised contrastive learning loss
new_feat_class = [2] * len(batch_data)
new_feat_class[0] = 1
# merge the original data and mixed data
new_wav_list = [np.concatenate(batch_data, axis=1),
np.concatenate(aug_list, axis=1)]
new_tar_list_1 = [np.array(orig_target), np.array(tar_list_1)]
new_tar_list_2 = [np.array(orig_target), np.array(tar_list_2)]
new_gamma_list = [np.array([1.0] * len(batch_data)), np.array(gamma_list)]
# waveform stack to (length, num_of_wav, num_of_aug)
output = np.stack(new_wav_list, axis=2)
# label stack to (num_of_wav, num_of_aug)
new_tar_1 = np.stack(new_tar_list_1, axis=1)
new_tar_2 = np.stack(new_tar_list_2, axis=1)
# gamma the same shape as label
new_gamma = np.stack(new_gamma_list, axis=1)
# (num_of_wav)
new_feat_class = np.array(new_feat_class)
return [output, new_tar_1, new_tar_2, new_gamma,
new_feat_class, new_feat_class]
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length,
spoof_name_loader):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 4,445 | 31.452555 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-7/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = nii_loss_metric.MixUpCE()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 1.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x_tuple, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be [input_tensor, target_1, target_2, gamma_list]
x = x_tuple[0]
tar1 = x_tuple[1]
tar2 = x_tuple[2]
gamma = x_tuple[3]
feat_class = x_tuple[4]
# input_tensor will be (batchsize, length, batch size in data_augment)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_view = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target1 = tar1.flatten().to(torch.long)
target2 = tar2.flatten().to(torch.long)
gamma = gamma.flatten()
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target1, target2, gamma)
# feature loss
if self.m_feat:
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(
bat_siz, num_sys, num_view, -1, feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_view, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx], feat_class[bat_idx])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx], feat_class[bat_idx])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,709 | 33.142336 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-7/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def load_spoof_trials(bona_name, spoof_loader):
"""spoof_audios = load_spoof_utterance
"""
# load the spoofed data
spoofs = spoof_loader(bona_name)
spoof_list = []
for filepath in spoofs:
if os.path.isfile(filepath):
_, data = nii_wav_tools.waveReadAsFloat(filepath)
else:
print("Cannot find {:s}".format(filepath))
sys.exit(1)
# make the shape (length, 1)
spoof_list.append(np.expand_dims(data, axis=1))
return spoof_list
def mixup_wav(input_wav_list, target_list, alpha, beta, mixup_method='wav'):
"""
"""
# output buffer
out_buf, out_tar1, out_tar2, out_gamma = [], [], [], []
# assume input_wav_list[0] is bonafide
bona_idx = 0
bona_wav = input_wav_list[bona_idx]
# mixed bonafide and spoofed
for spoof_idx, spoof_wav in enumerate(input_wav_list):
if spoof_idx == 0:
# this is bonafide
continue
else:
# mixup
gamma = np.random.beta(alpha, beta)
mixed_wav = nii_wav_aug.morph_wavform(
bona_wav, spoof_wav, gamma, method=mixup_method)
out_buf.append(mixed_wav)
out_tar1.append(target_list[bona_idx])
out_tar2.append(target_list[spoof_idx])
out_gamma.append(gamma)
# return
return out_buf, out_tar1, out_tar2, out_gamma
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length,
spoof_loader):
# load spoofed trials
spoof_trials = load_spoof_trials(
name_info.seq_name, spoof_loader)
# use this API to randomly trim the input length
batch_data = [input_data] + spoof_trials
batch_data = nii_wav_aug.batch_pad_for_multiview(
batch_data, wav_samp_rate, length, random_trim_nosil=True)
# first target is for bonafide
# the rest are for spoofed
orig_target = [0] * len(batch_data)
orig_target[0] = 1
# rawboost to create multiple view data
aug_list = [nii_wav_aug.RawBoostWrapper12(x) for x in batch_data]
# to be compatible with the rest of the code,
tar_list_1, tar_list_2 = orig_target, orig_target
# gamma_list=1.0 will be equivalent to normal CE
gamma_list = [1.0] * len(batch_data)
# assign bonafide and spoofed data to different feature classes
# for supervised contrastive learning loss
new_feat_class = [2] * len(batch_data)
new_feat_class[0] = 1
# merge the original data and mixed data
new_wav_list = [np.concatenate(batch_data, axis=1),
np.concatenate(aug_list, axis=1)]
new_tar_list_1 = [np.array(orig_target), np.array(tar_list_1)]
new_tar_list_2 = [np.array(orig_target), np.array(tar_list_2)]
new_gamma_list = [np.array([1.0] * len(batch_data)), np.array(gamma_list)]
# waveform stack to (length, num_of_wav, num_of_aug)
output = np.stack(new_wav_list, axis=2)
# label stack to (num_of_wav, num_of_aug)
new_tar_1 = np.stack(new_tar_list_1, axis=1)
new_tar_2 = np.stack(new_tar_list_2, axis=1)
# gamma the same shape as label
new_gamma = np.stack(new_gamma_list, axis=1)
# (num_of_wav)
new_feat_class = np.array(new_feat_class)
return [output, new_tar_1, new_tar_2, new_gamma,
new_feat_class, new_feat_class]
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length,
spoof_name_loader):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 4,445 | 31.452555 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/a_softmax.py | #!/usr/bin/env python
"""
a_softmax layers
copied from https://github.com/Joyako/SphereFace-pytorch
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###################
class AngleLayer(torch_nn.Module):
""" Output layer to produce activation for Angular softmax layer
AngleLayer(in_dim, output_dim, m=4):
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
m: angular-softmax paramter
Method: (|x|cos, phi) = forward(x)
x: (batchsize, input_dim)
cos: (batchsize, output_dim)
phi: (batchsize, output_dim)
Note:
cos[i, j]: cos(\theta) where \theta is the angle between
input feature vector x[i, :] and weight vector w[j, :]
phi[i, j]: -1^k cos(m \theta) - 2k
"""
def __init__(self, in_planes, out_planes, m=4):
super(AngleLayer, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.weight = Parameter(torch.Tensor(in_planes, out_planes))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.m = m
# cos(m \theta) = f(cos(\theta))
self.cos_val = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x,
]
def forward(self, input, flag_angle_only=False):
"""
Compute a-softmax activations
input:
------
input tensor (batchsize, input_dim)
flag_angle_only: true: return cos(\theta), phi(\theta)
false: return |x|cos(\theta), |x|phi(\theta)
default: false
output:
-------
tuple of tensor ((batchsize, output_dim), (batchsize, output_dim))
"""
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1) \
/ w_modulus.view(1, -1)
cos_theta = cos_theta.clamp(-1, 1)
# cos(m \theta)
cos_m_theta = self.cos_val[self.m](cos_theta)
with torch.no_grad():
# theta (batchsie, output_dim)
theta = cos_theta.acos()
# k is deterministic here
# k * pi / m <= theta <= (k + 1) * pi / m
k = (self.m * theta / 3.14159265).floor()
minus_one = k * 0.0 - 1
# phi_theta (batchsize, output_dim)
# Phi(yi, i) = (-1)**k * cos(myi,i) - 2 * k
phi_theta = (minus_one ** k) * cos_m_theta - 2 * k
if flag_angle_only:
cos_x = cos_theta
phi_x = phi_theta
else:
cos_x = cos_theta * x_modulus.view(-1, 1)
phi_x = phi_theta * x_modulus.view(-1, 1)
# ((batchsie, output_dim), (batchsie, output_dim))
return cos_x, phi_x
class AngularSoftmaxWithLoss(torch_nn.Module):
"""
AngularSoftmaxWithLoss()
This is a loss function.
Method:
loss = forward(input, target)
input: a pair of cos(\theta) and phi(\theta),
calculated by AngularLinear
cos(\theta) and phi(\theta) shape: (batchsize, class_num)
target: target labels (batchsize)
"""
def __init__(self, gamma=0):
super(AngularSoftmaxWithLoss, self).__init__()
self.gamma = gamma
self.iter = 0
self.lambda_min = 5.0
self.lambda_max = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
"""
"""
self.iter += 1
# target (batchsize, 1)
target = target.long().view(-1, 1)
with torch.no_grad():
index = torch.zeros_like(input[0])
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
index = index.bool()
# output (batchsize, output_dim)
# Tricks
# output(\theta_yi)
# = (lambda*cos(\theta_yi) + ((-1)**k * cos(m * \theta_yi) - 2*k))
# /(1 + lambda)
# = cos(\theta_yi)
# - cos(\theta_yi) / (1 + lambda) + Phi(\theta_yi) / (1 + lambda)
self.lamb = max(self.lambda_min,
self.lambda_max / (1 + 0.1 * self.iter))
output = input[0] * 1.0
output[index] -= input[0][index] * 1.0 / (1 + self.lamb)
output[index] += input[1][index] * 1.0 / (1 + self.lamb)
# softmax loss
logit = torch_f.log_softmax(output, dim=1)
# select the ones specified by target
logit = logit.gather(1, target).view(-1)
# additional
pt = logit.data.exp()
loss = -1 * (1 - pt) ** self.gamma * logit
loss = loss.mean()
return loss
if __name__ == "__main__":
print("Definition of A-softmax loss")
| 5,663 | 30.120879 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/p2sgrad.py | #!/usr/bin/env python
"""
P2sGrad:
Zhang, X. et al. P2sgrad: Refined gradients for optimizing deep face models.
in Proc. CVPR 9906-9914, 2019
I think the grad defined in Eq.(11) is equivalent to define a MSE loss with 0
or 1 as target:
\mathcal{L}_i = \sum_{j=1}^{K} (\cos\theta_{i,j} - \delta(j == y_i))^2
The difference from a common MSE is that the network output is cos angle.
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
###################
class P2SActivationLayer(torch_nn.Module):
""" Output layer that produces cos\theta between activation vector x
and class vector w_j
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
Usage example:
batchsize = 64
input_dim = 10
class_num = 5
l_layer = P2SActivationLayer(input_dim, class_num)
l_loss = P2SGradLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_dim, out_dim):
super(P2SActivationLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = Parameter(torch.Tensor(in_dim, out_dim))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
return
def forward(self, input_feat):
"""
Compute P2sgrad activation
input:
------
input_feat: tensor (batchsize, input_dim)
output:
-------
tensor (batchsize, output_dim)
"""
# normalize the weight (again)
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# normalize the input feature vector
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input_feat.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input_feat.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
# done
return cos_theta
class P2SGradLoss(torch_nn.Module):
"""P2SGradLoss() MSE loss between output and target one-hot vectors
See usage in __doc__ of P2SActivationLayer
"""
def __init__(self):
super(P2SGradLoss, self).__init__()
self.m_loss = torch_nn.MSELoss()
def forward(self, input_score, target):
"""
input
-----
input_score: tensor (batchsize, class_num)
cos\theta given by P2SActivationLayer(input_feat)
target: tensor (batchsize)
target[i] is the target class index of the i-th sample
output
------
loss: scaler
"""
# target (batchsize, 1)
target = target.long() #.view(-1, 1)
# filling in the target
# index (batchsize, class_num)
with torch.no_grad():
index = torch.zeros_like(input_score)
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
# MSE between \cos\theta and one-hot vectors
loss = self.m_loss(input_score, index)
return loss
if __name__ == "__main__":
print("Definition of P2SGrad Loss")
| 3,964 | 27.52518 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/am_softmax.py | #!/usr/bin/env python
"""
additive margin softmax layers
Wang, F., Cheng, J., Liu, W. & Liu, H.
Additive margin softmax for face verification. IEEE Signal Process. Lett. 2018
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###################
class AMAngleLayer(torch_nn.Module):
""" Output layer to produce activation for Angular softmax layer
AMAngleLayer(in_dim, output_dim, s=20, m=0.9):
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
s: scaler
m: margin
Method: (|x|cos, phi) = forward(x)
x: (batchsize, input_dim)
cos: (batchsize, output_dim)
phi: (batchsize, output_dim)
Note:
cos[i, j]: cos(\theta) where \theta is the angle between
input feature vector x[i, :] and weight vector w[j, :]
phi[i, j]: -1^k cos(m \theta) - 2k
Usage example:
batchsize = 64
input_dim = 10
class_num = 2
l_layer = AMAngleLayer(input_dim, class_num)
l_loss = AMSoftmaxWithLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_planes, out_planes, s=20, m=0.9):
super(AMAngleLayer, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.weight = Parameter(torch.Tensor(in_planes, out_planes))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.m = m
self.s = s
def forward(self, input, flag_angle_only=False):
"""
Compute am-softmax activations
input:
------
input tensor (batchsize, input_dim)
flag_angle_only: true: return cos(\theta), phi(\theta)
false: return |x|cos(\theta), |x|phi(\theta)
default: false
output:
-------
tuple of tensor ((batchsize, output_dim), (batchsize, output_dim))
"""
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
if flag_angle_only:
cos_x = cos_theta
phi_x = cos_theta
else:
cos_x = self.s * cos_theta
phi_x = self.s * (cos_theta - self.m)
# ((batchsie, output_dim), (batchsie, output_dim))
return cos_x, phi_x
class AMSoftmaxWithLoss(torch_nn.Module):
"""
AMSoftmaxWithLoss()
See usage in __doc__ of AMAngleLayer
"""
def __init__(self):
super(AMSoftmaxWithLoss, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
def forward(self, input, target):
"""
input:
------
input: tuple of tensors ((batchsie, out_dim), (batchsie, out_dim))
output from AMAngleLayer
target: tensor (batchsize)
tensor of target index
output:
------
loss: scalar
"""
# target (batchsize)
target = target.long() #.view(-1, 1)
# create an index matrix, i.e., one-hot vectors
with torch.no_grad():
index = torch.zeros_like(input[0])
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
index = index.bool()
# use the one-hot vector as index to select
# input[0] -> cos
# input[1] -> phi
# if target_i = j, ouput[i][j] = phi[i][j], otherwise cos[i][j]
#
output = input[0] * 1.0
output[index] -= input[0][index] * 1.0
output[index] += input[1][index] * 1.0
# cross entropy loss
loss = self.m_loss(output, target)
return loss
if __name__ == "__main__":
print("Definition of Am-softmax loss")
| 4,893 | 28.305389 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/grad_rev.py | #!/usr/bin/env python
"""
grad_rev.py
Definition of gradient reverse layer
Copied from https://cyberagent.ai/blog/research/11863/
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#
class GradientReversalFunction(torch.autograd.Function):
""" https://cyberagent.ai/blog/research/11863/
"""
@staticmethod
def forward(ctx, x, scale):
ctx.save_for_backward(scale)
return x
@staticmethod
def backward(ctx, grad):
scale, = ctx.saved_tensors
return scale * -grad, None
class GradientReversal(torch_nn.Module):
""" https://cyberagent.ai/blog/research/11863/
"""
def __init__(self, scale: float):
super(GradientReversal, self).__init__()
self.scale = torch.tensor(scale)
def forward(self, x):
return GradientReversalFunction.apply(x, self.scale)
| 1,078 | 22.977778 | 60 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/oc_softmax.py | #!/usr/bin/env python
"""
one class
One-class learning towards generalized voice spoofing detection
Zhang, You and Jiang, Fei and Duan, Zhiyao
arXiv preprint arXiv:2010.13995
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###################
class OCAngleLayer(torch_nn.Module):
""" Output layer to produce activation for one-class softmax
Usage example:
batchsize = 64
input_dim = 10
class_num = 2
l_layer = OCAngleLayer(input_dim)
l_loss = OCSoftmaxWithLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_planes, w_posi=0.9, w_nega=0.2, alpha=20.0):
super(OCAngleLayer, self).__init__()
self.in_planes = in_planes
self.w_posi = w_posi
self.w_nega = w_nega
self.out_planes = 1
self.weight = Parameter(torch.Tensor(in_planes, self.out_planes))
#self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
torch_nn.init.kaiming_uniform_(self.weight, 0.25)
self.weight.data.renorm_(2,1,1e-5).mul_(1e5)
self.alpha = alpha
def forward(self, input, flag_angle_only=False):
"""
Compute oc-softmax activations
input:
------
input tensor (batchsize, input_dim)
output:
-------
tuple of tensor ((batchsize, output_dim), (batchsize, output_dim))
"""
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
# w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, 1)
inner_wx = input.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
if flag_angle_only:
pos_score = cos_theta
neg_score = cos_theta
else:
pos_score = self.alpha * (self.w_posi - cos_theta)
neg_score = -1 * self.alpha * (self.w_nega - cos_theta)
#
return pos_score, neg_score
class OCSoftmaxWithLoss(torch_nn.Module):
"""
OCSoftmaxWithLoss()
"""
def __init__(self):
super(OCSoftmaxWithLoss, self).__init__()
self.m_loss = torch_nn.Softplus()
def forward(self, inputs, target):
"""
input:
------
input: tuple of tensors ((batchsie, out_dim), (batchsie, out_dim))
output from OCAngle
inputs[0]: positive class score
inputs[1]: negative class score
target: tensor (batchsize)
tensor of target index
output:
------
loss: scalar
"""
# Assume target is binary, positive = 1, negaitve = 0
#
# Equivalent to select the scores using if-elese
# if target = 1, use inputs[0]
# else, use inputs[1]
output = inputs[0] * target.view(-1, 1) + \
inputs[1] * (1-target.view(-1, 1))
loss = self.m_loss(output).mean()
return loss
if __name__ == "__main__":
print("Definition of Am-softmax loss")
| 3,886 | 28.007463 | 76 | py |
matscipy | matscipy-master/discover_version.py | #
# Copyright 2022 Lars Pastewka
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# This is the most minimal-idiotic way of discovering the version that I
# could come up with. It deals with the following issues:
# * If we are installed, we can get the version from package metadata,
# either via importlib.metadata or from pkg_resources. This also holds for
# wheels that contain the metadata. We are good! Yay!
# * If we are not installed, there are two options:
# - We are working within the source git repository. Then
# git describe --tags --always
# yields a reasonable version descriptor, but that is unfortunately not
# PEP 440 compliant (see https://peps.python.org/pep-0440/). We need to
# mangle the version string to yield something compatible.
# - If we install from a source tarball, we need to parse PKG-INFO manually.
#
import re
import subprocess
class CannotDiscoverVersion(Exception):
pass
def get_version_from_pkg_info():
"""
Discover version from PKG-INFO file.
"""
f = open('PKG-INFO', 'r')
l = f.readline()
while l:
if l.startswith('Version:'):
return l[8:].strip()
l = f.readline()
raise CannotDiscoverVersion("No line starting with 'Version:' in 'PKG-INFO'.")
def get_version_from_git():
"""
Discover version from git repository.
"""
git_describe = subprocess.run(
['git', 'describe', '--tags', '--dirty', '--always'],
stdout=subprocess.PIPE)
if git_describe.returncode != 0:
raise CannotDiscoverVersion('git execution failed.')
version = git_describe.stdout.decode('latin-1').strip()
dirty = version.endswith('-dirty')
# Make version PEP 440 compliant
if dirty:
version = version.replace('-dirty', '')
version = version.strip('v') # Remove leading 'v' if it exists
version = version.replace('-', '.dev', 1)
version = version.replace('-', '+', 1)
if dirty:
version += '.dirty'
return version
try:
version = get_version_from_git()
except CannotDiscoverVersion:
version = get_version_from_pkg_info()
#
# Print version to screen
#
print(version)
| 3,204 | 32.736842 | 82 | py |
matscipy | matscipy-master/maintenance/copyright.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
from collections import defaultdict
from datetime import datetime
from subprocess import Popen, PIPE
root = os.path.dirname(sys.argv[0])
def read_authors(fn):
return {email.strip('<>'): name for name, email in
[line.rsplit(maxsplit=1) for line in open(fn, 'r')]}
def parse_git_log(log, authors):
committers = defaultdict(set)
author = None
date = None
for line in log.decode('latin1').split('\n'):
if line.startswith('commit'):
if date is not None and author is not None:
committers[author].add(date.year)
elif line.startswith('Author:'):
email = line.rsplit('<', maxsplit=1)[1][:-1]
elif line.startswith('Date:'):
date = datetime.strptime(line[5:].rsplit(maxsplit=1)[0].strip(),
'%a %b %d %H:%M:%S %Y')
try:
author = authors[email]
except KeyError:
author = email
elif 'copyright' in line.lower() or 'license' in line.lower():
date = None
if date is not None:
committers[author].add(date.year)
return committers
def pretty_years(years):
def add_to_year_string(s, pprev_year, prev_year):
if pprev_year == prev_year:
# It is a single year
if s is None:
return f'{prev_year}'
else:
return f'{s}, {prev_year}'
else:
# It is a range
if s is None:
return f'{pprev_year}-{prev_year}'
else:
return f'{s}, {pprev_year}-{prev_year}'
years = sorted(years)
prev_year = pprev_year = years[0]
s = None
for year in years[1:]:
if year - prev_year > 1:
s = add_to_year_string(s, pprev_year, prev_year)
pprev_year = year
prev_year = year
return add_to_year_string(s, pprev_year, prev_year)
authors = read_authors('{}/../AUTHORS'.format(root))
process = Popen(['git', 'log', '--follow', sys.argv[1]], stdout=PIPE,
stderr=PIPE)
stdout, stderr = process.communicate()
committers = parse_git_log(stdout, authors)
prefix = 'Copyright'
for name, years in committers.items():
print('{} {} {}'.format(prefix, pretty_years(years), name))
prefix = ' ' * len(prefix)
print()
| 3,148 | 31.132653 | 76 | py |
matscipy | matscipy-master/maintenance/replace_header.py | #
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
file_lines = open(sys.argv[1], 'r').readlines()
header_lines = sys.stdin.readlines()
while file_lines[0].startswith('#'):
file_lines = file_lines[1:]
file_lines.insert(0, '#\n')
for header_line in header_lines[::-1]:
file_lines.insert(0, '# {}'.format(header_line).strip() + '\n')
file_lines.insert(0, '#\n')
open(sys.argv[1], 'w').writelines(file_lines)
| 1,124 | 31.142857 | 71 | py |
matscipy | matscipy-master/matscipy/hessian_finite_differences.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018, 2020 Jan Griesser (U. Freiburg)
# 2014, 2020 James Kermode (Warwick U.)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Deprecated module."""
from numpy import deprecate
from .numerical import numerical_hessian
@deprecate(new_name="numerical.numerical_hessian")
def fd_hessian(atoms, dx=1e-5, indices=None):
"""
Compute the hessian matrix from Jacobian of forces via central differences.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
dx: float
Displacement increment
indices:
Compute the hessian only for these atom IDs
"""
return numerical_hessian(atoms, dx=dx, indices=indices)
| 1,542 | 30.489796 | 79 | py |
matscipy | matscipy-master/matscipy/precon.py | """Hessian preconditioner."""
import numpy as np
from ase.optimize.precon import Precon
from ase.calculators.calculator import equal
class HessianPrecon(Precon):
"""Preconditioner for dense Hessian."""
def __init__(self,
c_stab=0.01,
move_tol=0.1,
P=None,
old_positions=None):
self.P = P
self.c_stab = c_stab
self.move_tol = move_tol
self.old_positions = old_positions
def make_precon(self, atoms):
has_moved = not equal(atoms.positions,
self.old_positions,
atol=self.move_tol)
initialized = self.P is not None and self.old_positions is not None
if not initialized or has_moved:
P = atoms.calc.get_property("hessian", atoms).todense()
di = np.diag_indices_from(P)
P[di] += self.c_stab
D, Q = np.linalg.eigh(P)
if np.any(D < 0):
self.P = np.array(Q @ np.diag(np.abs(D)) @ Q.T)
else:
self.P = np.array(P)
self.old_positions = atoms.positions.copy()
def Pdot(self, x):
return self.P.dot(x)
def solve(self, x):
return np.linalg.solve(self.P, x)
def copy(self):
return HessianPrecon(self.c_stab, self.move_tol, None, None)
def asarray(self):
return self.P.copy()
| 1,429 | 27.6 | 75 | py |
matscipy | matscipy-master/matscipy/socketcalc.py | #
# Copyright 2015-2016, 2020 James Kermode (Warwick U.)
# 2019 James Brixey (Warwick U.)
# 2016 Henry Lambert (King's College London)
# 2015 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import shutil
import subprocess
import socket
import socketserver
from io import StringIO
import time
import threading
from queue import Queue
import numpy as np
from matscipy.elasticity import full_3x3_to_Voigt_6_stress
from matscipy.logger import quiet, screen
from ase.atoms import Atoms
from ase.io.extxyz import read_xyz, write_xyz
from ase.io.vasp import write_vasp
from ase.io.castep import write_castep_cell, write_param
from ase.calculators.calculator import Calculator
from ase.calculators.vasp import Vasp
from ase.calculators.castep import Castep
MSG_LEN_SIZE = 8
MSG_END_MARKER = b'done.\n'
MSG_END_MARKER_SIZE = len(MSG_END_MARKER)
MSG_INT_SIZE = 6
MSG_FLOAT_SIZE = 25
MSG_FLOAT_FORMAT = '%25.16f'
MSG_INT_FORMAT = '%6d'
ATOMS_REQUESTS = {ord('A'): 'REFTRAJ', ord('X'): 'XYZ'}
RESULTS_REQUESTS = {ord('R'): 'REFTRAJ', ord('Y'): 'XYZ'}
ZERO_ATOMS_DATA = {'REFTRAJ': b' 242 0\n 0\n 0.0000000000000000 0.0000000000000000 0.0000000000000000\n 0.0000000000000000 0.0000000000000000 0.0000000000000000\n 0.0000000000000000 0.0000000000000000 0.0000000000000000\n',
'XYZ': b' 2500\nlabel=0 cutoff_factor=1.20000000 nneightol=1.20000000 Lattice="0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000" Properties=species:S:1:pos:R:3:Z:I:1\n'}
CLIENT_TIMEOUT = 60
MAX_POS_DIFF = 1.0 # angstrom
MAX_CELL_DIFF = 1e-3 # angstrom
MAX_POS_DIFF_CASTEP = 1.0 # angstrom
MAX_CELL_DIFF_CASTEP = 1.0 # angstrom
def pack_atoms_to_reftraj_str(at, label):
data = ''
data += MSG_INT_FORMAT % label + '\n'
data += MSG_INT_FORMAT % len(at) + '\n'
for i in range(3):
data += (3*MSG_FLOAT_FORMAT) % tuple(at.cell[:, i]) + '\n'
s = at.get_scaled_positions()
for i in range(len(at)):
data += (3*MSG_FLOAT_FORMAT) % tuple(s[i, :]) + '\n'
# preceed message by its length
data_length = ('%8d' % len(data)).encode('ascii')
data = data_length + data.encode('ascii')
return data
def pack_atoms_to_xyz_str(at, label):
at.info['label'] = label
buffer = StringIO.StringIO()
write_xyz(buffer, at)
data = str(buffer)
buffer.close()
# preceed message by its length
data_length = ('%8d' % len(data)).encode('ascii')
data = data_length + data.encode('ascii')
return data
def unpack_reftraj_str_to_atoms(data):
lines = data.split(b'\n')
label = int(lines[0])
n_atoms = int(lines[1])
at = Atoms(symbols=[' ']*n_atoms, cell=np.eye(3))
at.info['label'] = label
for i in range(3):
at.cell[:, i] = [float(x) for x in lines[i].split()]
for i, line in enumerate(lines[4:]):
t = [float(x) for x in line.split()]
at.positions[i, :] = np.dot(t, at.cell)
return at
def pack_results_to_reftraj_output_str(at):
data = ''
data += MSG_INT_FORMAT % len(at) + '\n'
data += MSG_FLOAT_FORMAT % at.energy + '\n'
force = at.get_array('force')
virial = at.info['virial']
for i in at.indices:
data += (3*MSG_FLOAT_FORMAT) % tuple(force[i, :]) + '\n'
# NB: not in Voigt order (xx, yy, zz, yz, xz, xy)
data += (6*MSG_FLOAT_FORMAT) % (virial[0,0], virial[1,1], virial[2,2],
virial[0,1], virial[1,2], virial[0,2])
# preceed message by its length
data_length = ('%8s' % len(data)).encode('ascii')
data = data_length + data
return data
def unpack_reftraj_output_str_to_results(data):
lines = data.strip().split(b'\n')
label = int(lines[0])
natoms = int(lines[1])
energy = float(lines[2])
force = np.zeros((natoms,3))
for i, line in enumerate(lines[3:-1]):
force[i, :] = [float(f) for f in line.split()]
v6 = [float(v) for v in lines[-1].split()]
virial = np.zeros((3,3))
# NB: not in Voigt order (xx, yy, zz, yz, xz, xy)
virial[0,0], virial[1,1], virial[2,2], virial[0,1], virial[1,2], virial[0,2] = v6
virial[1,0] = virial[0,1]
virial[2,1] = virial[1,2]
virial[2,0] = virial[0,2]
return (label, (natoms, energy, force, virial))
def unpack_xyz_str_to_results(data):
buffer = StringIO.StringIO(data)
at = read_xyz(buffer)
buffer.close()
label = at.info['label']
return (label, at)
class AtomsRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
ip, port = self.client_address
task = None
# receive request code and client ID
request_str = self.rfile.read(MSG_LEN_SIZE)
request = request_str[0]
client_id = int(request_str[1:])
if client_id > self.server.njobs-1:
raise RuntimeError('Unknown client ID %d outside of range 0 < ID < %d' %
(client_id, self.server.njobs-1))
self.server.logger.pr('"%s" request from %s:%d client %d' % (chr(request), ip, port, client_id))
#print 'input queue lengths ', ''.join(['%d:%d ' % (i,q.qsize()) for (i,q) in enumerate(input_qs)])
#print 'output queue length %d' % output_q.qsize()
if request in ATOMS_REQUESTS:
# client is ready for Atoms (in either REFTRAJ or XYZ format)
data, fmt, label, at = self.server.input_qs[client_id].get()
assert ATOMS_REQUESTS[request] == fmt
if data == b'shutdown' or data == b'restart':
task = data
data = ZERO_ATOMS_DATA[fmt]
self.wfile.write(data)
elif request in RESULTS_REQUESTS:
# results are available from client in REFTRAJ or XYZ format
data_size = int(self.rfile.read(MSG_LEN_SIZE))
data = self.rfile.read(data_size)
fmt = RESULTS_REQUESTS[request]
self.server.output_q.put((client_id, fmt, data))
self.server.input_qs[client_id].task_done()
else:
raise RuntimeError('Unknown request code "%s"' % request)
# say goodbye to this client
self.wfile.write(MSG_END_MARKER)
if (request == ord('A') or request == ord('X')) and task == b'restart':
# if we're restarting a client, get the next thing out of the queue
# and re-initialise. Restart won't do anything until shutdown
# of old client has completed.
data, fmt, label, at = self.server.input_qs[client_id].get()
self.server.logger.pr('"%s" request from client %d triggering restart for calculation with label %d' %
(request, client_id, label))
self.server.clients[client_id].start_or_restart(at, label, restart=True)
class AtomsServerSync(socketserver.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, clients,
bind_and_activate=True, max_attempts=3, bgq=False, logger=screen):
self.njobs = len(clients)
# allow up to twice as many threads as sub-block jobs
self.request_queue_size = 2*self.njobs
self.max_attempts = max_attempts
self.bgq = bgq # If True, we're running on IBM Blue Gene/Q platform
self.logger = logger
socketserver.TCPServer.__init__(self,
server_address,
RequestHandlerClass,
bind_and_activate)
self.clients = clients
for client in self.clients:
client.server = self # FIXME circular reference
# record all input in the order in which it is put()
self.input_q = Queue()
# we also need an input Queue for each client: this is so that we can
# exploit wavefunction reuse by sending consecutive clusters belonging
# to the same atom to the same QM partition
self.input_qs = [Queue() for i in range(self.njobs) ]
self.output_q = Queue()
def server_activate(self):
socketserver.TCPServer.server_activate(self)
self.ip, self.port = self.server_address
if self.bgq:
# If we're on a Blue Gene, note that IP address returned
# by server.server_address is not the correct one for CNs
# to talk to FEN, so we discard it, and use the InfiniBand
# address returned by get_hostname_ip()
import bgqtools
hostname, self.ip = bgqtools.get_hostname_ip()
else:
hostname = socket.gethostname()
self.logger.pr('AtomsServer running on %s %s:%d with njobs=%d' %
(hostname, self.ip, self.port, self.njobs))
def shutdown_clients(self):
self.logger.pr('shutting down all clients')
wait_threads = []
for client_id, client in enumerate(self.clients):
if (client.process is not None and client.process.poll() is None and
(client.wait_thread is None or not client.wait_thread.isAlive())):
wait_threads.append(client.shutdown(block=False))
self.handle_request() # dispatch the shutdown request via socket
# wait for them all to finish shutting down
for wait_thread in wait_threads:
if wait_thread is None or not wait_thread.isAlive():
continue
wait_thread.join()
self.logger.pr('all client shutdowns complete')
def shutdown(self):
self.shutdown_clients()
self.server_close()
def put(self, at, client_id, label, force_restart=False):
self.logger.pr('Putting Atoms to client %d label %d' % (client_id, label))
# allow client to modify atoms (e.g. sort them)
at, fmt, first_time = self.clients[client_id].preprocess(at, label, force_restart)
# store what we actually did- `at` may have been modified by preprocess()
self.input_q.put((label, client_id, at))
if fmt == 'REFTRAJ':
data = pack_atoms_to_reftraj_str(at, label)
elif fmt == 'XYZ':
data = pack_atoms_to_xyz_str(at, label)
else:
raise ValueError('Unknown format "%s"' % fmt)
self.input_qs[client_id].put((data, fmt, label, at))
if first_time:
# throw away what we just put(), as it's in the input files.
# note that we don't call task_done() until results come in
discard = self.input_qs[client_id].get()
def join_all(self):
self.logger.pr('AtomsServer waiting for input queues to empty')
for input_q in self.input_qs:
input_q.join()
self.logger.pr('all AtomsServer queues drained.')
def get_results(self):
self.logger.pr('AtomsServer getting results')
results = {}
for attempt in range(self.max_attempts):
rejects = []
self.join_all()
self.logger.pr('AtomsServer.get_results() attempt %d of %d jobs finished' %
(attempt+1, self.max_attempts))
while self.output_q.unfinished_tasks:
client_id, fmt, data = self.output_q.get()
if fmt == 'REFTRAJ':
label, res = unpack_reftraj_output_str_to_results(data)
elif fmt == 'XYZ':
label, res = unpack_xyz_str_to_results(data)
else:
raise ValueError('get_results() got unknown format "%s"' % fmt)
if label > 0: # WARNING: labels must start from 1, or first calc never passes test
# calculation converged, save the results
self.logger.pr('calculation label %d client %d CONVERGED' % (label, client_id))
results[label] = res
else:
# calculation did not converge, we need to repeat it
self.logger.pr('calculation label %d client %d DID NOT CONVERGE' % (label, client_id))
rejects.append(-label)
self.output_q.task_done()
self.logger.pr('AtomsServer.get_results() rejects=%r' % rejects)
self.logger.pr('AtomsServer.get_results() sorted(results.keys())=%r' % sorted(results.keys()))
# collect all input task so we can see if anything is missing
input = {}
while self.input_q.unfinished_tasks:
label, client_id, at = self.input_q.get()
input[label] = (client_id, at)
self.input_q.task_done()
self.logger.pr('AtomsServer.get_results() sorted(input.keys())=%r' % sorted(input.keys()))
# resubmit any failed calculations
for label in rejects:
client_id, at = input[label]
self.logger.pr('Resubmiting calculation label %d client_id %d' % (label, client_id))
self.put(at, client_id, label, force_restart=True)
assert len(results) + len(rejects) == len(input)
# if all calculations converged we are done
if len(rejects) == 0:
break
else:
raise RuntimeError('max_attempts (%d) exceeded without all calculations completing successfully' %
self.max_attempts)
assert(len(results) == len(input))
assert(len(rejects) == 0)
results_atoms = []
for (inp_label, label) in zip(sorted(input.keys()), sorted(results.keys())):
assert inp_label == label
client_id, inp_at = input[inp_label]
res = results[label]
if isinstance(res, Atoms):
at = res
else:
(natoms, energy, force, virial) = res
assert len(inp_at) == natoms
at = inp_at.copy() # FIXME could possibly store results inplace, but need to think about sorting
at.info['label'] = label
at.info['energy'] = energy
at.set_array('force', force)
at.info['virial'] = virial
# allow client to modify results (e.g. reverse sort order)
at = self.clients[client_id].postprocess(at, label)
results_atoms.append(at)
self.logger.pr('AtomsServer processed %d results' % len(results))
return results_atoms
class AtomsServerAsync(AtomsServerSync, socketserver.ThreadingMixIn):
"""
Asynchronous (threaded) version of AtomsServer
"""
def shutdown(self):
self.shutdown_clients()
return socketserver.TCPServer.shutdown(self)
def shutdown_clients(self):
self.logger.pr('shutting down all clients')
wait_threads = []
for client_id, client in enumerate(self.clients):
if (client.process is not None and client.process.poll() is None and
(client.wait_thread is None or not client.wait_thread.isAlive())):
wait_threads.append(client.shutdown(block=False))
# wait for them all to finish shutting down
for wait_thread in wait_threads:
if wait_thread is None or not wait_thread.isAlive():
continue
wait_thread.join()
self.logger.pr('all client shutdowns complete')
AtomsServer = AtomsServerAsync # backwards compatibility
class Client(object):
"""
Represents a single Client job
Used by AtomsServer to start, restart and shutdown clients
running on the Compute Nodes.
"""
def __init__(self, client_id, exe, env=None, npj=1, ppn=1,
block=None, corner=None, shape=None,
jobname='socketcalc', rundir=None,
fmt='REFTRAJ', parmode=None, mpirun='mpirun',
mpirun_args=['-np'], logger=screen,
max_pos_diff=MAX_POS_DIFF,
max_cell_diff=MAX_CELL_DIFF):
self.client_id = client_id
self.process = None # handle for the runjob process
self.log = None # stdout file
self.wait_thread = None # used by shutdown(block=False)
self.last_atoms = None # used to check if we can continue from previous task
self.lock = threading.Lock() # avoid concurrancy issues
if env is None:
env = {}
self.env = env # environment
self.exe = exe # executable
self.npj = npj # nodes per job
self.ppn = ppn # processes per node
self.block, self.corner, self.shape = block, corner, shape
self.jobname = jobname
self.fmt = fmt
self.parmode = parmode
self.mpirun = mpirun
self.mpirun_args = mpirun_args
self.logger = logger
self.max_pos_diff = max_pos_diff
self.max_cell_diff = max_cell_diff
self.rundir = rundir or os.getcwd()
self.subdir = os.path.join(self.rundir, '%s-%03d' % (jobname, self.client_id))
if not os.path.exists(self.subdir):
self.logger.pr('Making subdir %s' % self.subdir)
os.mkdir(self.subdir)
def extra_args(self, label=None):
"""
Return list of additional command line arguments to be passed to client
"""
args = [self.server.ip, str(self.server.port), str(self.client_id)]
if label is not None:
args.append(str(label))
return args
def start(self, label=None):
"""
Start an individual client.
Raises RuntimeError if this client is already running.
"""
if self.process is not None:
raise RuntimeError('client %d is already running' % client_id)
runjob_args = []
popen_args = {}
if self.parmode == 'cobalt':
# Convert env to "--envs KEY=value" arguments for runjob
envargs = []
for (k, v) in self.env.iteritems():
envargs.extend(['--envs', '%s=%s' % (k, v) ])
runjob_args += ['runjob', '--block', self.block]
if self.corner is not None:
runjob_args += ['--corner', self.corner]
if self.shape is not None:
runjob_args += ['--shape', self.shape]
runjob_args += (['-n', str(self.npj*self.ppn), '-p', str(self.ppn)] + envargs +
['--cwd', self.subdir, ':'])
elif self.parmode == 'mpi':
runjob_args += [self.mpirun]
for mpirun_arg in self.mpirun_args:
runjob_args += [mpirun_arg]
if mpirun_arg in ['-n', '-np']:
runjob_args += [str(self.npj*self.ppn)]
popen_args['cwd'] = self.subdir
popen_args['env'] = os.environ # for mpi, let mpirun inherit environment of script
else:
popen_args['cwd'] = self.subdir
popen_args['env'] = self.env
runjob_args += [self.exe]
runjob_args += self.extra_args(label)
self.logger.pr('starting client %d args %r' % (self.client_id, runjob_args))
self.log = open(os.path.join(self.rundir, '%s-%03d.output' % (self.jobname, self.client_id)), 'a')
# send stdout and stderr to same file
self.process = subprocess.Popen(runjob_args, stdout=self.log, stderr=self.log, **popen_args)
def shutdown(self, block=True):
"""Request a client to shutdown.
If block=True, does not return until shutdown is complete. If
block=False, waits for the client to shutdown in a new
thread. Check self.waits_thread.isAlive() to see when shutdown
has finished. (This function also returns a handle to the wait
thread when block=False).
"""
if self.process is None:
self.logger.pr('client %d (requested to shutdown) has never been started' % self.client_id)
return
if self.process.poll() is not None:
self.logger.pr('client %d is already shutdown' % self.client_id)
return
if (self.wait_thread is not None and self.wait_thread.isAlive()):
raise RuntimeError('client %d is already in the process of shutting down' % self.client_id)
input_q = self.server.input_qs[self.client_id]
input_q.put((b'shutdown', self.fmt, -1, None))
if block:
self.wait_for_shutdown()
else:
self.wait_thread = threading.Thread(target=self.wait_for_shutdown)
self.wait_thread.start()
return self.wait_thread
def wait_for_shutdown(self):
"""
Block until a client has shutdown.
Typically called automatically by shutdown() or
start_or_restart().
Shutdown should previously have been initiated by queuing a
'shutdown' or 'restart' request. Waits CLIENT_TIMEOUT for
graceful shutdown. If client is still alive, a SIGTERM signal
is sent. If this has had no effect after a further
CLIENT_TIMEOUT, then a SIGKILL is sent. Does not return until
the SIGKILL has taken effect.
This function also marks shutdown task as complete in
servers's input_q for this client.
"""
wait_thread = threading.Thread(target=self.process.wait)
self.logger.pr('waiting for client %d to shutdown' % self.client_id)
wait_thread.start()
wait_thread.join(CLIENT_TIMEOUT)
if wait_thread.isAlive():
self.logger.pr('client %d did not shutdown gracefully in %d seconds - sending SIGTERM' %
(self.client_id, CLIENT_TIMEOUT))
self.process.terminate()
wait_thread.join(CLIENT_TIMEOUT)
if wait_thread.isAlive():
self.logger.pr('client %d did not respond to SIGTERM - sending SIGKILL' % self.client_id)
self.process.kill()
wait_thread.join() # no timeout for kill
else:
self.logger.pr('client %d responded to SIGTERM' % self.client_id)
else:
self.logger.pr('client %d shutdown within timeout' % self.client_id)
self.logger.pr('client %d shutdown complete - exit code %r' % (self.client_id, self.process.poll()))
self.log.close()
self.process = None
self.log = None
self.server.input_qs[self.client_id].task_done()
self.logger.pr('wait_for_shutdown done')
def start_or_restart(self, at, label, restart=False):
"""
Start or restart a client
If restart=True, wait for previous client to shutdown first.
Calls write_input_files() followed by start().
"""
if restart:
self.wait_for_shutdown()
self.write_input_files(at, label)
self.start(label)
def preprocess(self, at, label, force_restart=False):
"""
Prepare client for a calculation.
Starts client if this is the first task for it, or schedules a
restart if new configuration is not compatible with the last
one submitted to the queue (see is_compatible() method).
Many be extended in subclasses to e.g. sort the atoms by
atomic number. If Atoms object needs to be changed, a copy
should be returned rather than updating it inplace.
Returns (at, first_time).
"""
first_time = self.process is None
restart_reqd = (not first_time and (force_restart or
(not self.is_compatible(self.last_atoms, at, label))))
# keep a copy of last config queued for this client.
# acquire a lock in case multiple put() calls to the same client
# occur concurrently.
try:
self.lock.acquire()
self.last_atoms = at.copy()
finally:
self.lock.release()
if restart_reqd:
# put a shutdown command into the queue, ahead of this config.
# once it gets completed, restart_client() will be called as below
self.logger.pr('restart scheduled for client %d label %d' % (self.client_id, label))
self.server.input_qs[self.client_id].put((b'restart', self.fmt, -1, None))
if first_time:
self.start_or_restart(at, label, restart=False)
return at, self.fmt, first_time
def postprocess(self, at, label):
"""
Post-process results of calculation.
May be overrriden in subclasses to e.g. reverse sort order
applied in preprocess().
"""
return at
def is_compatible(self, old_at, new_at, label):
"""
Check if new_at and old_at are compatible.
Returns True if calculation can be continued, or False
if client must be restarted before it can process new_at.
"""
if old_at is None:
return True
return True
def write_input_files(self, at, label):
raise NotImplementedError('to be implemented in subclasses')
class QUIPClient(Client):
"""
Subclass of Client for running QUIP calculations.
Initial input files are written in extended XYZ format, and
subsequent communication is via sockets, in either REFTRAJ
or XYZ format.
"""
def __init__(self, client_id, exe, env=None, npj=1, ppn=1,
block=None, corner=None, shape=None,
jobname='socketcalc', rundir=None,
fmt='REFTRAJ', parmode=None, mpirun='mpirun',
mpirun_args=['-np'], logger=screen,
max_pos_diff=MAX_POS_DIFF,
max_cell_diff=MAX_CELL_DIFF,
param_files=None):
Client.__init__(self, client_id, exe, env, npj, ppn,
block, corner, shape, jobname, rundir, fmt, parmode,
mpirun, mpirun_args, logger, max_pos_diff,
max_cell_diff)
self.param_files = param_files
def write_input_files(self, at, label):
write_xyz(os.path.join(self.subdir, 'atoms.%d.xyz' % self.client_id), at)
# copy in parameter files
if self.param_files is not None:
for param_file in self.param_files:
param_file_basename = os.path.basename(param_file)
shutil.copyfile(param_file, os.path.join(self.subdir, param_file_basename))
_chdir_lock = threading.Lock()
class QMClient(Client):
"""
Abstract subclass of Client for QM calculations
"""
def is_compatible(self, old_at, new_at, label):
# first time, anything goes
if old_at is None:
return True
if not Client.is_compatible(self, old_at, new_at, label):
return False
if len(old_at) != len(new_at):
self.logger.pr('is_compatible() on client %d label %d got number of atoms mismatch: %d != %d' % (self.client_id,
label,
len(old_at),
len(new_at)))
return False # number of atoms must match
if abs(old_at.cell - new_at.cell).max() > self.max_cell_diff:
self.logger.pr('is_compatible() on client %d label %d got cell mismatch: %r != %r' % (self.client_id,
label,
old_at.cell,
new_at.cell))
return False # cells must match
# RMS difference in positions must be less than max_pos_diff
old_p = old_at.get_positions()
new_p = new_at.get_positions()
old_z = old_at.get_chemical_symbols()
new_z = new_at.get_chemical_symbols()
if 'index' in old_at.arrays:
old_index = old_at.get_array('index')
new_index = new_at.get_array('index')
# if termination exists, undo ordering differences due to cluster hopping
if ('termindex_%d' % self.client_id) in old_at.arrays:
old_termindex = old_at.get_array('termindex_%d' % self.client_id)
new_termindex = new_at.get_array('termindex_%d' % self.client_id)
a1s = sorted([(old_index[i], old_z[i], list(old_p[i]))
for i in range(len(old_at)) if old_termindex == 0])
a2s = sorted([(new_index[i], new_z[i], list(new_p[i]))
for i in range(len(new_at)) if new_termindex == 0])
else:
a1s = sorted([(old_index[i], old_z[i], list(old_p[i])) for i in range(len(old_at)) ])
a2s = sorted([(new_index[i], new_z[i], list(new_p[i])) for i in range(len(new_at)) ])
old_p = np.r_[[p for (i, z, p) in a1s]]
new_p = np.r_[[p for (i, z, p) in a2s]]
old_z = np.r_[[z for (i, z, p) in a1s]]
new_z = np.r_[[z for (i, z, p) in a2s]]
if not np.all(old_z == new_z):
self.logger.pr('is_compatible() on client %d label %d got atomic number mismatch: %r != %r' % (self.client_id,
label,
old_z, new_z))
return False # atomic numbers must match
# undo jumps across PBC - approach is that of QUIP's undo_pbc_jumps() routine
old_g = np.linalg.inv(old_at.cell.T).T
d = new_p.T - old_p.T - (np.dot(old_at.cell, np.floor(np.dot(old_g, (new_p - old_p).T)+0.5)))
rms_diff = np.sqrt((d**2).mean())
self.logger.pr('is_compatible() on client %d label %d got RMS position difference %.3f' % (self.client_id, label, rms_diff))
if rms_diff > self.max_pos_diff:
self.logger.pr('is_compatible() on client %d label %d got RMS position difference %.3f > max_pos_diff=%.3f' %
(self.client_id, label, rms_diff, self.max_pos_diff))
return False
return True
class VaspClient(QMClient):
"""
Subclass of Client for running VASP calculations.
Initial input files are written in POSCAR, INCAR, POTCAR and KPOINTS
formats, and subsequent communicatin is via sockets in REFTRAJ format.
"""
def __init__(self, client_id, exe, env=None, npj=1, ppn=1,
block=None, corner=None, shape=None,
jobname='socketcalc', rundir=None,
fmt='REFTRAJ', parmode=None, mpirun='mpirun',
mpirun_args=['-np'], logger=screen,
max_pos_diff=MAX_POS_DIFF,
max_cell_diff=MAX_CELL_DIFF,
**vasp_args):
Client.__init__(self, client_id, exe, env, npj, ppn,
block, corner, shape, jobname, rundir,
fmt, parmode, mpirun, mpirun_args, logger,
max_pos_diff, max_cell_diff)
if 'ibrion' not in vasp_args:
self.logger.pr('No ibrion key in vasp_args, setting ibrion=13')
vasp_args['ibrion'] = 13
if 'nsw' not in vasp_args:
self.logger.pr('No nsw key in vasp_args, setting nsw=1000000')
vasp_args['nsw'] = 1000000
self.vasp_args = vasp_args
def preprocess(self, at, label, force_restart=False):
self.logger.pr('vasp client %d preprocessing atoms label %d' % (self.client_id, label))
# make a copy and then sort atoms in the same way that vasp
# calculator will when it writes POSCAR. We use a new
# calculator and store the sort order in the Atoms so it can
# be reversed when results are ready.
vasp = Vasp(**self.vasp_args)
vasp.initialize(at)
at = at.copy()
order = np.array(range(len(at)))
at.set_array('vasp_sort_order', order)
at = at[vasp.resort]
# finally, call the parent method
return Client.preprocess(self, at, label, force_restart)
def postprocess(self, at, label):
self.logger.pr('vasp client %d postprocessing atoms label %d' % (self.client_id, label))
# call the parent method first
at = Client.postprocess(self, at, label)
# restore original atom ordering
at = at[at.arrays['vasp_sort_order'].tolist()]
return at
def write_input_files(self, at, label):
global _chdir_lock
# For LOTF Simulations active number of quantum
# atoms vary and must wait to this stage in order for
# magnetic moments to be set properly. If magnetic moments
# not set defaults to 0.
self.vasp_args['magmom'] = at.get_initial_magnetic_moments()
vasp = Vasp(**self.vasp_args)
vasp.initialize(at)
# chdir not thread safe, so acquire global lock before using it
orig_dir = os.getcwd()
try:
_chdir_lock.acquire()
os.chdir(self.subdir)
if os.path.exists('OUTCAR'):
n = 1
while os.path.exists('OUTCAR.%d' % n):
n += 1
shutil.copyfile('OUTCAR', 'OUTCAR.%d' % n)
shutil.copyfile('POSCAR', 'POSCAR.%d' % n)
write_vasp('POSCAR', vasp.atoms_sorted,
symbol_count=vasp.symbol_count,
vasp5='5' in self.exe)
vasp.write_incar(at)
vasp.write_potcar()
vasp.write_kpoints()
finally:
os.chdir(orig_dir)
_chdir_lock.release()
class CastepClient(QMClient):
"""
Subclass of Client for running CASTEP calculations.
Initial input files are written in .cell and .param
formats, and subsequent communication is via sockets in REFTRAJ format.
"""
def __init__(self, client_id, exe, env=None, npj=1, ppn=1,
block=None, corner=None, shape=None,
jobname='socketcalc', rundir=None,
fmt='REFTRAJ', parmode=None, mpirun='mpirun',
mpirun_args=['-np'], logger=screen,
max_pos_diff=MAX_POS_DIFF_CASTEP,
max_cell_diff=MAX_CELL_DIFF_CASTEP,
**castep_args):
Client.__init__(self, client_id, exe, env, npj, ppn,
block, corner, shape, jobname, rundir,
fmt, parmode, mpirun, mpirun_args, logger,
max_pos_diff, max_cell_diff)
if 'task' not in castep_args:
self.logger.pr('No task key in castep_args, setting task=MD')
castep_args['task'] = 'MD'
if 'md_ensemble' not in castep_args:
self.logger.pr('No md_ensemble key in castep_args, setting md_ensemble=SKT')
castep_args['md_ensemble'] = 'SKT'
if 'md_num_iter' not in castep_args:
self.logger.pr('No md_num_iter key in castep_args, setting md_num_iter=1000000')
castep_args['md_num_iter'] = 1000000
castep_args['_rename_existing_dir'] = False
self.castep_args = castep_args
self.logger.pr('constructing Castep instance with args %r' % castep_args)
self.castep = Castep(directory=self.subdir, **castep_args)
self._orig_devel_code = ''
if self.castep.param.devel_code.value is not None:
self._orig_devel_code = self.castep.param.devel_code.value.strip()+'\n'
def preprocess(self, at, label, force_restart=False):
self.logger.pr('Castep client %d preprocessing atoms label %d' % (self.client_id, label))
# make a copy and then sort atoms by atomic number
# in the same way that Castep will internally. We store the sort
# order in the Atoms so it can be reversed when results are ready.
at = at.copy()
order = np.array(range(len(at)))
at.set_array('castep_sort_order', order)
resort = order[np.argsort(at.get_atomic_numbers())]
#print 'resort = ', resort
#print at.get_scaled_positions()[resort[0]]
at = at[resort]
#print at.get_scaled_positions()[0]
#print 'castep_sort_order', at.get_array('castep_sort_order')
# finally, call the parent method (potentially writing input files)
return Client.preprocess(self, at, label, force_restart)
def postprocess(self, at, label):
self.logger.pr('Castep client %d postprocessing atoms label %d' % (self.client_id, label))
# call the parent method first
at = Client.postprocess(self, at, label)
# restore original atom ordering
at = at[at.arrays['castep_sort_order'].tolist()]
return at
def write_input_files(self, at, label):
global _chdir_lock
devel_code = self._orig_devel_code
devel_code += ('SOCKET_IP=%s\nSOCKET_PORT=%d\nSOCKET_CLIENT_ID=%d\nSOCKET_LABEL=%d' % \
(self.server.ip, self.server.port, self.client_id, label))
self.castep.param.devel_code = devel_code
# chdir not thread safe, so acquire global lock before using it
orig_dir = os.getcwd()
try:
_chdir_lock.acquire()
os.chdir(self.subdir)
cellf = open('castep.cell', 'w')
write_castep_cell(cellf, at, castep_cell=self.castep.cell)
cellf.close()
write_param('castep.param', self.castep.param, force_write=True)
finally:
os.chdir(orig_dir)
_chdir_lock.release()
def extra_args(self, label=None):
return ['castep']
class SocketCalculator(Calculator):
"""
ASE-compatible calculator which communicates with remote
force engines via sockets using a (synchronous) AtomsServer.
"""
implemented_properties = ['energy', 'forces', 'stress']
default_parameters = {}
name = 'SocketCalculator'
def __init__(self, client, ip=None, atoms=None, port=0, logger=screen, bgq=False):
Calculator.__init__(self)
self.client = client
if ip is None:
ip = '127.0.0.1' # default to localhost
self.logger = logger
self.bgq=bgq
self.server = AtomsServerSync((ip, port), AtomsRequestHandler,
[self.client], logger=self.logger,
bgq=self.bgq)
self._label = 1
self.atoms = atoms
def calculate(self, atoms, properties, system_changes):
Calculator.calculate(self, atoms, properties, system_changes)
if system_changes: # if anything at all changed (could be made more fine-grained)
self.logger.pr('calculation triggered with properties={0}, system_changes={1}'.format(properties,
system_changes))
self.server.put(atoms, 0, self._label)
if self._label != 1:
# send atoms over socket, unless first time
self.logger.pr('socket calculator sending Atoms label={0}'.format(self._label))
self.server.handle_request()
# wait for results to be ready
self.logger.pr('socket calculator waiting for results label={0}'.format(self._label))
self.server.handle_request()
self._label += 1
[results] = self.server.get_results()
# we always compute energy, forces and stresses, regardless of what was requested
stress = -(results.info['virial']/results.get_volume())
self.results = {'energy': results.info['energy'],
'forces': results.arrays['force'],
'stress': full_3x3_to_Voigt_6_stress(stress)}
else:
self.logger.pr('calculation avoided with properties={0}, system_changes={1}'.format(properties,
system_changes))
def shutdown(self):
self.server.shutdown()
| 41,228 | 39.861249 | 290 | py |
matscipy | matscipy-master/matscipy/atomic_strain.py | #
# Copyright 2014, 2020 Lars Pastewka (U. Freiburg)
# 2015, 2017 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Compute deformation gradient tensor and D^2_min measure for non-affine
displacements.
See: Falk, Langer, Phys. Rev. E 57, 7192 (1998)
"""
import numpy as np
import scipy
from matscipy.neighbours import mic, neighbour_list
###
def get_XIJ(nat, i_now, dr_now, dr_old):
"""
Calculates the X_{ij} matrix
"""
# Do an element-wise outer product
dr_dr = dr_now.reshape(-1,3,1)*dr_old.reshape(-1,1,3)
xij = np.zeros([nat,3,3])
for i in range(3):
for j in range(3):
# For each atom, sum over all neighbors
xij[:,i,j] = np.bincount(i_now, weights=dr_dr[:,i,j])
return xij
def get_YIJ(nat, i_now, dr_old):
"""
Calculates the Y_{ij} matrix
"""
# Just do an element-wise outer product
dr_dr = dr_old.reshape(-1,3,1)*dr_old.reshape(-1,1,3)
yij = np.zeros([nat,3,3])
for i in range(3):
for j in range(3):
# For each atom, sum over all neighbors
yij[:,i,j] = np.bincount(i_now, weights=dr_dr[:,i,j])
return yij
def array_inverse(A):
"""
Compute inverse for each matrix in a list of matrices.
This is faster than calling numpy.linalg.inv for each matrix.
"""
A = np.ascontiguousarray(A, dtype=float)
n_eq = A.shape[1]
identity = np.eye(n_eq)
def lapack_inverse(a):
b = np.copy(identity)
lu, piv, x, info = scipy.linalg.lapack.dgesv(a, b)
if info > 0:
raise np.linalg.LinAlgError('Singular matrix')
return x
return np.array([lapack_inverse(a) for a in A])
def get_delta_plus_epsilon_dgesv(nat, i_now, dr_now, dr_old):
"""
Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix
"""
XIJ = get_XIJ(nat, i_now, dr_now, dr_old)
YIJ = get_YIJ(nat, i_now, dr_old)
YIJ_invert = array_inverse(YIJ)
# Perform sum_k X_ik Y_jk^-1
epsilon = np.sum(XIJ.reshape(-1,3,1,3)*YIJ_invert.reshape(-1,1,3,3), axis=3)
return epsilon
def get_delta_plus_epsilon(nat, i_now, dr_now, dr_old):
"""
Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix
"""
epsilon = []
for i in range(nat):
mask = i_now==i
x, residuals, rank, s = np.linalg.lstsq(dr_old[mask], dr_now[mask])
epsilon += [ x.T ]
return np.array(epsilon)
def get_D_square_min(atoms_now, atoms_old, i_now, j_now, delta_plus_epsilon=None):
"""
Calculate the D^2_min norm of Falk and Langer
"""
nat = len(atoms_now)
assert len(atoms_now) == len(atoms_old)
pos_now = atoms_now.positions
pos_old = atoms_old.positions
# Compute current and old distance vectors. Note that current distance
# vectors cannot be taken from the neighbor calculation, because neighbors
# are calculated from the sheared cell while these distance need to come
# from the unsheared cell. Taking the distance from the unsheared cell
# make periodic boundary conditions (and flipping of cell) a lot easier.
dr_now = mic(pos_now[i_now] - pos_now[j_now], atoms_now.cell)
dr_old = mic(pos_old[i_now] - pos_old[j_now], atoms_old.cell)
# Sanity check: Shape needs to be identical!
assert dr_now.shape == dr_old.shape
if delta_plus_epsilon is None:
# Get minimum strain tensor
delta_plus_epsilon = get_delta_plus_epsilon(nat, i_now, dr_now, dr_old)
# Spread epsilon out for each neighbor index
delta_plus_epsilon_n = delta_plus_epsilon[i_now]
# Compute D^2_min (residual of the least squares fit)
residual_n = np.sum(
(
dr_now-
np.sum(delta_plus_epsilon_n.reshape(-1,3,3)*dr_old.reshape(-1,1,3),
axis=2)
)**2,
axis=1)
# For each atom, sum over all neighbors
residual = np.bincount(i_now, weights=residual_n)
return delta_plus_epsilon, residual
def atomic_strain(atoms_now, atoms_old, cutoff=None, neighbours=None):
"""
Calculate deformation gradient tensor and D^2_min measure for non-affine
displacements.
See: Falk, Langer, Phys. Rev. B 57, 7192 (1998)
Parameters:
-----------
atoms_now : ase.Atoms
Current atomic configuration
atoms_old : ase.Atoms
Reference atomic configuration
cutoff : float
Neighbor list cutoff.
neighbours : ( array_like, array_like )
Neighbor list. Automatically computed if not provided.
Returns:
--------
delta_plus_epsilon : array
3x3 deformation gradient tensor for each atom.
residual : array
D^2_min norm for each atom
"""
if neighbours is None:
if cutoff is None:
raise ValueError('Please provide either neighbor list or neighbor '
'list cutoff.')
# Get neighbours
i_now, j_now = neighbour_list("ij", atoms_now, cutoff)
elif cutoff is not None:
raise ValueError('Please provide either neighbor list or neighbor '
'list cutoff, not both.')
else:
i_now, j_now = neighbours
# Get strain gradient tensor and D square values
delta_plus_epsilon, residual = get_D_square_min(atoms_now, atoms_old, i_now,
j_now)
return delta_plus_epsilon, residual
| 6,118 | 29.748744 | 82 | py |
matscipy | matscipy-master/matscipy/molecules.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Classes that deal with interactions defined by connectivity."""
import re
import numpy as np
from ase.geometry import find_mic, get_angles, get_dihedrals
from ase import Atoms
class Molecules:
"""Similar to ase.Atoms, but for molecular data."""
_dtypes = {
"bonds": np.dtype([('type', np.int32), ('atoms', np.int32, 2)]),
"angles": np.dtype([('type', np.int32), ('atoms', np.int32, 3)]),
"dihedrals": np.dtype([('type', np.int32), ('atoms', np.int32, 4)]),
}
def __init__(self,
bonds_connectivity=None,
bonds_types=None,
angles_connectivity=None,
angles_types=None,
dihedrals_connectivity=None,
dihedrals_types=None):
"""
Initialize with connectivity data.
Parameters
----------
bonds_connectivity : ArrayLike
Array defining bonds with atom ids.
Expected shape is ``(nbonds, 2)``.
bonds_types : ArrayLike
Array defining the bond types. Expected shape is ``nbonds``.
angles_connectivity : ArrayLike
Array defining angles with atom ids.
Expected shape is ``(nangles, 3)``.
angles_types : ArrayLike
Array defining the angle types. Expected shape is ``nangles``.
dihedrals_connectivity : ArrayLike
Array defining angles with atom ids.
Expected shape is ``(ndihedrals, 3)``.
dihedrals_types : ArrayLike
Array defining the dihedral types.
Expected shape is ``ndihedrals``.
"""
default_type = 1
# Defining data arrays
for data, dtype in self._dtypes.items():
self.__dict__[data] = np.array([], dtype=dtype)
if bonds_connectivity is not None:
self.bonds.resize(len(bonds_connectivity))
self.bonds["atoms"][:] = bonds_connectivity
self.bonds["type"][:] = bonds_types \
if bonds_types is not None else default_type
if angles_connectivity is not None:
self.angles.resize(len(angles_connectivity))
self.angles["atoms"][:] = angles_connectivity
self.angles["type"][:] = angles_types \
if angles_types is not None else default_type
if dihedrals_connectivity is not None:
self.dihedrals.resize(len(dihedrals_connectivity))
self.dihedrals["atoms"][:] = dihedrals_connectivity
self.dihedrals["type"][:] = dihedrals_types \
if dihedrals_types is not None else default_type
def get_distances(self, atoms) -> np.ndarray:
"""Compute distances for all bonds."""
positions = [
atoms.positions[self.bonds["atoms"][:, i]]
for i in range(2)
]
# Return distances only
return find_mic(positions[1] - positions[0],
atoms.cell, atoms.pbc)[1]
def get_angles(self, atoms) -> np.ndarray:
"""Compute angles (degrees) for all angles."""
positions = [
atoms.positions[self.angles["atoms"][:, i]]
for i in range(3)
]
# WARNING: returns angles in degrees
return get_angles(positions[1] - positions[0],
positions[2] - positions[0],
atoms.cell, atoms.pbc)
def get_dihedrals(self, atoms) -> np.ndarray:
"""Compute angles (degrees) for all dihedrals."""
positions = [
atoms.positions[self.dihedrals["atoms"][:, i]]
for i in range(4)
]
return get_dihedrals(positions[1] - positions[0],
positions[2] - positions[1],
positions[3] - positions[2],
atoms.cell, atoms.pbc)
@staticmethod
def from_atoms(atoms: Atoms):
"""Construct a Molecules object from ase.Atoms object."""
kwargs = {}
def parse_tuples(regex, permutation, label):
all_tuples = np.zeros((0, len(permutation)), np.int32)
types = np.array([], np.int32)
tuples = atoms.arrays[label]
bonded = np.where(tuples != '_')[0]
for i, per_atom in zip(bonded, tuples[bonded]):
per_atom = np.array(regex.findall(per_atom), np.int32)
new_tuples = np.array([
np.full(per_atom.shape[0], i, np.int32),
*(per_atom[:, :-1].T)
])
all_tuples = np.append(all_tuples,
new_tuples[permutation, :].T,
axis=0)
types = np.append(types, per_atom[:, -1])
kwargs[f'{label}_connectivity'] = all_tuples
kwargs[f'{label}_types'] = types
if 'bonds' in atoms.arrays:
bre = re.compile(r'(\d+)\((\d+)\)')
parse_tuples(bre, (0, 1), 'bonds')
if 'angles' in atoms.arrays:
are = re.compile(r'(\d+)-(\d+)\((\d+)\)')
parse_tuples(are, (0, 1, 2), 'angles')
if 'dihedrals' in atoms.arrays:
dre = re.compile(r'(\d+)-(\d+)-(\d+)\((\d+)\)')
parse_tuples(dre, (0, 1, 2, 3), 'dihedrals')
return Molecules(**kwargs)
| 6,141 | 36.224242 | 76 | py |
matscipy | matscipy-master/matscipy/spatial_correlation_function.py | #
# Copyright 2016, 2021 Lars Pastewka (U. Freiburg)
# 2016, 2018 Richard Jana (KIT & U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Compute the spatial correlation of a given function. Distances larger
than a cut-off are calculated by fourier transformation, while shorter
distances are calculated directly.
coords.shape=(n_atoms,3)
cell_vectors=[[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]]
options:
#1)dimensions of correlation [dim=]
a: along all 3 dimensions -> total distance: None (default)
b: along only one dimension, ignoring other components: 0..2
#2)mode to assign the atomic values to the FFT grid points [delta=]
a: assign value to the nearest grid point: simple (default)
b: assign value to the 8 nearest grid points, distributed
proportional to their distance: else
#3)nomalisation by variance of values [norm=]
a: off: False (default)
b: on: True
'''
import numpy as np
from math import floor, ceil
from matscipy.neighbours import neighbour_list
from ase import Atoms
def betrag(vec):
return np.sqrt((vec**2).sum())
def max_rad(cell_vectors):
#calculate length cutoff from cell vectors
x = cell_vectors[:,0]
y = cell_vectors[:,1]
z = cell_vectors[:,2]
r = np.zeros(3)
nor = np.zeros(3)
nor = np.cross(y,z)
r[0] = np.abs((x*nor).sum()/betrag(x)/betrag(nor))*betrag(x)/2.0
nor = np.cross(z,x)
r[1] = np.abs((y*nor).sum()/betrag(y)/betrag(nor))*betrag(y)/2.0
nor = np.cross(x,y)
r[2] = np.abs((z*nor).sum()/betrag(z)/betrag(nor))*betrag(z)/2.0
return r.min()
def spatial_correlation_function(atoms, values, length_cutoff=None,
output_gridsize=None, FFT_cutoff=None,
approx_FFT_gridsize=None, dim=None,
delta='simple', norm=False):
# Make sure values are floats
values = np.asarray(values, dtype=float)
xyz = atoms.get_positions()
abc = atoms.get_scaled_positions() % 1.0
cell_vectors = atoms.cell.T
n_atoms = len(xyz)
if length_cutoff is None:
length_cutoff = np.floor(max_rad(cell_vectors))
if FFT_cutoff is None:
FFT_cutoff = 7.5
if output_gridsize is None:
output_gridsize = 0.1
if approx_FFT_gridsize is None:
approx_FFT_gridsize = 1.0
n_lattice_points = np.array(np.ceil(cell_vectors.diagonal()
/approx_FFT_gridsize),
dtype=int)
FFT_gridsize = cell_vectors.diagonal()/n_lattice_points
if delta == 'simple':
# calc lattice values (add to nearest lattice point)
Q = np.zeros(shape=(n_lattice_points))
for _abc, _q in zip(abc, values):
x,y,z = np.array(_abc*n_lattice_points, dtype=int) \
%n_lattice_points
Q[x,y,z] += _q
else:
# proportional distribution on 8 neightbor points
Q = np.zeros(shape=(n_lattice_points))
a1, a2, a3 = cell_vectors.T
for _abc, _q in zip(abc, q):
x,y,z = _abc*(n_lattice_points-1)
aes = np.array([np.floor(x),np.ceil(x)] \
).reshape(-1, 1, 1, 1)/(n_lattice_points[0]-1)
bes = np.array([np.floor(y),np.ceil(y)] \
).reshape( 1,-1, 1, 1)/(n_lattice_points[1]-1)
ces = np.array([np.floor(z),np.ceil(z)] \
).reshape( 1, 1,-1, 1)/(n_lattice_points[2]-1)
octo = (aes*a1.reshape(1,1,1,-1) \
+bes*a2.reshape(1,1,1,-1) \
+ces*a3.reshape(1,1,1,-1)) \
-cartesianN(_abc,cell_vectors).reshape(1,1,1,-1)
octo = 1./(np.sqrt((octo**2).sum(axis=3)))
Q[np.floor(x):np.ceil(x)+1,np.floor(y):np.ceil(y)+1,
np.floor(z):np.ceil(z)+1] += octo/octo.sum()*_q
# FFT
Q_schlange = np.fft.fftn(Q)
C_schlange = Q_schlange*Q_schlange.conjugate()
C = np.fft.ifftn(C_schlange)*n_lattice_points.prod() \
/n_atoms/n_atoms
C = np.fft.ifftshift(C)
if dim is None:
# distance mapping (for floor/ceil convention see *i*fftshift
# definition)
a = np.reshape(np.arange(-floor(n_lattice_points[0]/2.),
ceil(n_lattice_points[0]/2.),1)
/n_lattice_points[0],(-1, 1, 1, 1))
b = np.reshape(np.arange(-floor(n_lattice_points[1]/2.),
ceil(n_lattice_points[1]/2.),1)
/n_lattice_points[1],( 1,-1, 1, 1))
c = np.reshape(np.arange(-floor(n_lattice_points[2]/2.),
ceil(n_lattice_points[2]/2.),1)
/n_lattice_points[2],( 1, 1,-1, 1))
a1, a2, a3 = cell_vectors.T
r = a*a1.reshape(1,1,1,-1)+b*a2.reshape(1,1,1,-1) \
+c*a3.reshape(1,1,1,-1)
dist = np.sqrt((r**2).sum(axis=3))
elif 0 <= dim <3:
# directional SCFs
# for floor/ceil convention see *i*fftshift definition
a = np.reshape(np.arange(-floor(n_lattice_points[0]/2.),
ceil(n_lattice_points[0]/2.),1)
/n_lattice_points[0],(-1, 1, 1, 1))
b = np.reshape(np.arange(-floor(n_lattice_points[1]/2.),
ceil(n_lattice_points[1]/2.),1)
/n_lattice_points[1],( 1,-1, 1, 1))
c = np.reshape(np.arange(-floor(n_lattice_points[2]/2.),
ceil(n_lattice_points[2]/2.),1)
/n_lattice_points[2],( 1, 1,-1, 1))
a1, a2, a3 = cell_vectors.T
r = a*a1.reshape(1,1,1,-1) +b*a2.reshape(1,1,1,-1) \
+c*a3.reshape(1,1,1,-1)
dist = np.abs(r[:,:,:,dim]) # use indices to access directions
else:
print('invalid correlation direction: '+str(dim))
sys.exit()
nbins = int(length_cutoff/output_gridsize)
bins = np.arange(0, length_cutoff+length_cutoff/nbins,
length_cutoff/nbins)
SCF, edges = np.histogram(np.ravel(dist), bins=bins,
weights=np.ravel(np.real(C)))
n, edges = np.histogram(np.reshape(dist,(-1,1)), bins=bins)
n[n==0] = 1
SCF /= n
# Alternative to the above three lines:
# SCF *= atoms.get_volume()/np.prod(n_lattice_points) / slice_volume
if norm:
v_2_mean = (values**2).mean()
v_mean_2 = (values.mean())**2
SCF = (SCF-v_mean_2)/(v_2_mean-v_mean_2)
return SCF, (edges[1:]+edges[:-1])/2
def spatial_correlation_function_near(atoms, values, gridsize=None,
cutoff=None, norm=False):
if gridsize is None:
gridsize = 0.1
if cutoff is None:
cutoff = 7.5
# close range exact calculation
nbins = int(cutoff/gridsize)+1
index1,index2,dist = neighbour_list('ijd', atoms, cutoff=cutoff)
SCF_near, edges = np.histogram(dist, bins=bins,
weights=values[index1]
*values[index2])
slice_volume = 4*np.pi/3 * (edges[1:]**3-edges[:-1]**3)
SCF_near *= atoms.get_volume()/n_atoms**2 / slice_volume
if norm:
v_2_mean = (values**2).mean()
v_mean_2 = (values.mean())**2
SCF_near = (SCF_near-v_mean_2)/(v_2_mean-v_mean_2)
return SCF_near, (edges[1:]+edges[:-1])/2
| 8,104 | 37.966346 | 72 | py |
matscipy | matscipy-master/matscipy/deformation.py | #
# Copyright 2017 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tools related to homogenously deformed volumes.
"""
import numpy as np
###
def get_shear_distance(a):
"""
Returns the distance a volume has moved during simple shear. Considers
either Lees-Edwards boundary conditions or sheared cells.
"""
cx, cy, cz = a.cell
if 'shear_dx' in a.info:
assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1])
assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2])
assert abs(cy[0]) < 1e-12, 'cx[0] = {0}'.format(cy[0])
assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2])
assert abs(cz[0]) < 1e-12, 'cz[0] = {0}'.format(cz[0])
assert abs(cz[1]) < 1e-12, 'cz[1] = {0}'.format(cz[1])
dx, dy, dz = a.info['shear_dx']
else:
assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1])
assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2])
assert abs(cy[0]) < 1e-12, 'cy[0] = {0}'.format(cy[0])
assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2])
dx, dy, sz = cz
return dx, dy
###
class RemoveSimpleShearDeformation:
"""
Remove a homogeneous cell deformation given an (iterable) trajectory
object. This will take proper care of cells that are instantaneously
flipped from +0.5 strain to -0.5 strain during simple shear, as e.g.
generated by LAMMPS.
"""
def __init__(self, traj):
self.traj = traj
self.last_d = [ ]
self.sheared_cells = [ ]
self.unsheared_cells = [ ]
def _fill_cell_info_upto(self, i):
# Iterate up to frame i the full trajectory first and generate a list
# of cell vectors.
if i < len(self.last_d):
return
# Iterate up to frame i the full trajectory first and generate a list
# of cell vectors.
if len(self.last_d) == 0:
i0 = 0
last_dx, last_dy = get_shear_distance(self.traj[0])
dx = last_dx
dy = last_dy
else:
i0 = len(self.last_d)
last_dx, last_dy = self.last_d[i0-1]
dx, dy, dummy = self.sheared_cells[i0-1][2]
for a in self.traj[i0:i+1]:
sx, sy, sz = a.cell.diagonal()
cur_dx, cur_dy = get_shear_distance(a)
while cur_dx-last_dx < -sx/2:
cur_dx += sx
dx += cur_dx-last_dx
while cur_dy-last_dy < -sy/2:
cur_dy += sy
dy += cur_dy-last_dy
# Store last shear distance
last_dx = cur_dx
last_dy = cur_dy
# Store cells and shear distance
self.last_d += [ ( last_dx, last_dy ) ]
self.sheared_cells += [ np.array([[sx,0,0],[0,sy,0],[dx,dy,sz]]) ]
self.unsheared_cells += [ np.array([sx,sy,sz]) ]
def __getitem__(self, i=-1):
if i < 0:
i = len(self) + i
if i < 0 or i >= len(self):
raise IndexError('Trajectory index out of range.')
self._fill_cell_info_upto(i)
a = self.traj[i]
# Set true cell shape
a.set_cell(self.sheared_cells[i], scale_atoms=False)
# Unshear
a.set_cell(self.unsheared_cells[i], scale_atoms=True)
# Wrap to cell
a.set_scaled_positions(a.get_scaled_positions()%1.0)
a.info['true_cell'] = self.sheared_cells[i]
return a
def __len__(self):
return len(self.traj)
| 4,220 | 30.266667 | 78 | py |
matscipy | matscipy-master/matscipy/visualise.py | #
# Copyright 2014-2015 James Kermode (Warwick U.)
# 2014-2015 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Interface from ASE to the chemview Jupyter visualiser.
Your Jupyter notebook will need to contain
from chemview import enable_notebook
enable_notebook()
"""
###
import itertools
import numpy as np
from ase.data import covalent_radii
from matscipy.neighbours import neighbour_list
from chemview import MolecularViewer
###
def view(a, colour=None, bonds=True, cell=True,
scale=10.0, cutoff_scale=1.2,
cmap=None, vmin=None, vmax=None):
topology = {}
topology['atom_types'] = a.get_chemical_symbols()
if bonds:
n = a.numbers
maxn = n.max()
cutoffs = np.zeros([maxn+1, maxn+1])
for n1, n2 in itertools.product(n, n):
cutoffs[n1, n2] = cutoff_scale*(covalent_radii[n1]+covalent_radii[n2])
# Construct a bond list
i, j, S = neighbour_list('ijS',
a, cutoffs,
np.array(a.numbers, dtype=np.int32))
m = np.logical_and(i<j, (S==0).all(axis=1))
i = i[m]
j = j[m]
topology['bonds'] = [(x, y) for x, y in zip(i, j)]
colorlist = None
if colour is not None:
colour = np.array(colour, dtype=np.float64)
if cmap is None:
from matplotlib.cm import jet
cmap = jet
if vmin is None:
vmin = np.min(colour)
if vmax is None:
vmax = np.max(colour)
colour = (colour - vmin)/(vmax - vmin)
colorlist = ['0x%02x%02x%02x' % (r*256, g*256, b*256)
for (r, g, b, alpha) in cmap(colour)]
mv = MolecularViewer(a.positions/scale,
topology=topology)
mv.ball_and_sticks(colorlist=colorlist)
if cell:
O = np.zeros(3, dtype=np.float32)
La, Lb, Lc = a.cell.astype(np.float32)/scale
start = np.r_[O, O, O,
O + Lb, O + Lc, O + La,
O + Lc, O + La, O + Lb,
O + Lb + Lc, O + La + Lc, O + La + Lb]
end = np.r_[O + La, O + Lb, O + Lc,
O + Lb + La, O + Lc + Lb, O + La + Lc,
O + Lc + La, O + La + Lb, O + Lb + Lc,
O + Lb + Lc + La, O + La + Lc + Lb, O + La + Lb + Lc]
rgb = [0xFF0000, 0x00FF00, 0x0000FF]*4
mv.add_representation('lines', {'startCoords': start,
'endCoords': end,
'startColors': rgb,
'endColors': rgb})
return mv
| 3,388 | 32.22549 | 82 | py |
matscipy | matscipy-master/matscipy/dislocation.py | #
# Copyright 2019, 2021 Lars Pastewka (U. Freiburg)
# 2018-2023 Petr Grigorev (Warwick U.)
# 2020 James Kermode (Warwick U.)
# 2019 Arnaud Allera (U. Lyon 1)
# 2019 Wolfram G. Nöhring (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tools for studying structure and movement of dislocations."""
import numpy as np
from scipy.optimize import minimize
from ase.lattice.cubic import (BodyCenteredCubic, FaceCenteredCubic,
Diamond, SimpleCubicFactory)
from ase.constraints import FixAtoms, StrainFilter
from ase.optimize import FIRE
from ase.build import bulk
from ase.calculators.lammpslib import LAMMPSlib
from ase.units import GPa # unit conversion
from ase.io import read
from matscipy.neighbours import neighbour_list, mic
from matscipy.elasticity import fit_elastic_constants
def make_screw_cyl(alat, C11, C12, C44,
cylinder_r=10, cutoff=5.5,
hard_core=False,
center=[0., 0., 0.],
l_extend=[0., 0., 0.],
symbol='W'):
"""Makes screw dislocation using atomman library
Parameters
----------
alat : float
Lattice constant of the material.
C11 : float
C11 elastic constant of the material.
C12 : float
C12 elastic constant of the material.
C44 : float
C44 elastic constant of the material.
cylinder_r : float
radius of cylinder of unconstrained atoms around the
dislocation in angstrom
cutoff : float
Potential cutoff for Marinica potentials for FS cutoff = 4.4
hard_core : bool
Description of parameter `hard_core`.
center : type
The position of the dislocation core and the center of the
cylinder with FixAtoms condition
l_extend : float
extension of the box. used for creation of initial
dislocation position with box equivalent to the final position
symbol : string
Symbol of the element to pass to ase.lattice.cubic.SimpleCubicFactory
default is "W" for tungsten
Returns
-------
disloc : ase.Atoms object
screw dislocation cylinder.
bulk : ase.Atoms object
bulk disk used to generate dislocation
u : np.array
displacement per atom.
"""
from atomman import ElasticConstants
from atomman.defect import Stroh
# Create a Stroh object with junk data
stroh = Stroh(ElasticConstants(C11=141, C12=110, C44=98),
np.array([0, 0, 1]))
axes = np.array([[1, 1, -2],
[-1, 1, 0],
[1, 1, 1]])
c = ElasticConstants(C11=C11, C12=C12, C44=C44)
burgers = alat * np.array([1., 1., 1.])/2.
# Solving a new problem with Stroh.solve
stroh.solve(c, burgers, axes=axes)
# test the solution that it does not crash
# pos_test = uc.set_in_units(np.array([12.4, 13.5, -10.6]), 'angstrom')
# disp = stroh.displacement(pos_test)
# print("displacement =", uc.get_in_units(disp, 'angstrom'), 'angstrom')
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=(False, False, True),
latticeconstant=alat)
# make the dislocation core center of the box
disloCenterX = alat * np.sqrt(6.)/6.0
disloCenterY = alat * np.sqrt(2.)/6.0
unit_cell.positions[:, 0] -= disloCenterX
unit_cell.positions[:, 1] -= disloCenterY
# shift to move the fixed atoms boundary condition for the
# configuration with shifted dislocation core
shift_x = 2.0 * center[0]
shift_y = 2.0 * center[1]
l_shift_x = 2.0 * l_extend[0]
l_shift_y = 2.0 * l_extend[1]
# size of the cubic cell as a 112 direction
Lx = int(round((cylinder_r + 3.*cutoff + shift_x + l_shift_x)
/ (alat * np.sqrt(6.))))
# size of the cubic cell as a 110 direction
Ly = int(round((cylinder_r + 3.*cutoff + shift_y + l_shift_y)
/ (alat * np.sqrt(2.))))
# factor 2 to make sure odd number of images is translated
# it is important for the correct centering of the dislocation core
bulk = unit_cell * (2*Lx, 2*Ly, 1)
# make 0, 0, at the center
bulk.positions[:, 0] -= Lx * alat * np.sqrt(6.)
bulk.positions[:, 1] -= Ly * alat * np.sqrt(2.)
# wrap
# bulk.set_scaled_positions(bulk.get_scaled_positions())
# apply shear here:
# bulk.cell *= D
# bulk.positions *= D
x, y, z = bulk.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask_zero = radius_x_y_zero < cylinder_r + 2.*cutoff
radius_x_y_center = np.sqrt((x - center[0])**2 + (y - center[1])**2)
mask_center = radius_x_y_center < cylinder_r + 2.*cutoff
radius_x_y_l_shift = np.sqrt((x - l_extend[0])**2 + (y - l_extend[1])**2)
mask_l_shift = radius_x_y_l_shift < cylinder_r + 2.*cutoff
final_mask = mask_center | mask_zero | mask_l_shift
# leave only atoms inside the cylinder
bulk = bulk[final_mask]
disloc = bulk.copy()
# calculate and apply the displacements for atomic positions
u = stroh.displacement(bulk.positions - center)
u = -u if hard_core else u
disloc.positions += u
x, y, z = disloc.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask_zero = radius_x_y_zero > cylinder_r
radius_x_y_center = np.sqrt((x - center[0])**2 + (y - center[1])**2)
mask_center = radius_x_y_center > cylinder_r
radius_x_y_l_shift = np.sqrt((x - l_extend[0])**2 + (y - l_extend[1])**2)
mask_l_shift = radius_x_y_l_shift > cylinder_r
fix_mask = mask_center & mask_zero & mask_l_shift
# leave only atoms inside the cylinder
fix_atoms = FixAtoms(mask=fix_mask)
disloc.set_constraint(fix_atoms)
# make an "region" array to map bulk and fixed atoms
# all atoms are "MM" by default
region = np.full_like(disloc, "MM")
region[fix_mask] = np.full_like(disloc[fix_mask], "fixed")
disloc.new_array("region", region)
# center the atoms to avoid "lost atoms" error by lammps
center_shift = np.diagonal(bulk.cell).copy()
center_shift[2] = 0.0 # do not shift along z direction
disloc.positions += center_shift / 2.0
bulk.positions += center_shift / 2.0
return disloc, bulk, u
def make_edge_cyl(alat, C11, C12, C44,
cylinder_r=10, cutoff=5.5,
symbol='W'):
'''
makes edge dislocation using atomman library
cylinder_r - radius of cylinder of unconstrained atoms around the
dislocation in angstrom
cutoff - potential cutoff for Marinica potentials for FS cutoff = 4.4
symbol : string
Symbol of the element to pass to ase.lattice.cubic.SimpleCubicFactory
default is "W" for tungsten
'''
from atomman import ElasticConstants
from atomman.defect import Stroh
# Create a Stroh object with junk data
stroh = Stroh(ElasticConstants(C11=141, C12=110, C44=98),
np.array([0, 0, 1]))
axes = np.array([[1, 1, 1],
[1, -1, 0],
[1, 1, -2]])
c = ElasticConstants(C11=C11, C12=C12, C44=C44)
burgers = alat * np.array([1., 1., 1.])/2.
# Solving a new problem with Stroh.solve
# Does not work with the new version of atomman
stroh.solve(c, burgers, axes=axes)
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol='W',
pbc=(False, False, True),
latticeconstant=alat)
bulk = unit_cell.copy()
# shift to make the zeros of the cell between the atomic planes
# and under the midplane on Y axes
X_midplane_shift = (1.0/3.0)*alat*np.sqrt(3.0)/2.0
Y_midplane_shift = 0.25*alat*np.sqrt(2.0)
bulk_shift = [X_midplane_shift,
Y_midplane_shift,
0.0]
bulk.positions += bulk_shift
tot_r = cylinder_r + cutoff + 0.01
Lx = int(round(tot_r/(alat*np.sqrt(3.0)/2.0)))
Ly = int(round(tot_r/(alat*np.sqrt(2.))))
# factor 2 to make sure odd number of images is translated
# it is important for the correct centering of the dislocation core
bulk = bulk * (2*Lx, 2*Ly, 1)
center_shift = [Lx * alat * np.sqrt(3.0)/2.,
Ly * alat * np.sqrt(2.),
0.0]
bulk.positions -= center_shift
ED = bulk.copy()
disp = stroh.displacement(ED.positions)
ED.positions += disp
x, y, z = ED.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask = radius_x_y_zero < tot_r
ED = ED[mask]
bulk = bulk[mask]
bulk.write("before.xyz")
ED.write("after_disp.xyz")
x, y, z = ED.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask_zero = radius_x_y_zero > cylinder_r
fix_atoms = FixAtoms(mask=mask_zero)
ED.set_constraint(fix_atoms)
x, y, z = bulk.positions.T
# move lower left segment
bulk.positions[(y < 0.0) & (x < X_midplane_shift)] -= [alat * np.sqrt(3.0) / 2.0, 0.0, 0.0]
# make the dislocation extra half plane center
bulk.positions += [(1.0/3.0)*alat*np.sqrt(3.0)/2.0, 0.0, 0.0]
return ED, bulk
def plot_vitek(dislo, bulk,
alat=3.16, plot_axes=None, xyscale=10):
"""
Plots vitek map from ase configurations.
Parameters
----------
dislo : ase.Atoms
Dislocation configuration.
bulk : ase.Atoms
Corresponding bulk configuration for calculation of displacements.
alat : float
Lattice parameter for calculation of neighbour list cutoff.
plot_axes : matplotlib.Axes.axes object
Existing axes to plot on, allows to pass existing matplotlib axes
have full control of the graph outside the function.
Makes possible to plot multiple differential displacement
maps using subplots.
Default is None, then new graph is created by plt.subplots()
Description of parameter `plot_axes`.
xyscale : float
xyscale of the graph
Returns
-------
None
"""
from atomman import load
from atomman.defect import differential_displacement
lengthB = 0.5*np.sqrt(3.)*alat
burgers = np.array([0.0, 0.0, lengthB])
base_system = load("ase_Atoms", bulk)
disl_system = load("ase_Atoms", dislo)
neighborListCutoff = 0.95 * alat
# plot window is +-10 angstroms from center in x,y directions,
# and one Burgers vector thickness along z direction
x, y, _ = bulk.positions.T
plot_range = np.array([[x.mean() - xyscale, x.mean() + xyscale],
[y.mean() - xyscale, y.mean() + xyscale],
[-0.1, alat * 3.**(0.5) / 2.]])
# This scales arrows such that b/2 corresponds to the
# distance between atoms on the plot
plot_scale = 1.885618083
_ = differential_displacement(base_system, disl_system,
burgers,
cutoff=neighborListCutoff,
xlim=plot_range[0],
ylim=plot_range[1],
zlim=plot_range[2],
matplotlib_axes=plot_axes,
plot_scale=plot_scale)
def show_NEB_configurations(images, bulk, xyscale=7,
show=True, core_positions=None):
"""
Plots Vitek differential displacement maps for the list of images
for example along the NEB path.
Parameters
----------
images : list of ase.Atoms
List of configurations with dislocations.
bulk : ase.Atoms
Corresponding bulk configuration for calculation of displacements.
xyscale : float
xyscale of the graph
show : bool
Show the figure after plotting. Default is True.
core_positions : list
[x, y] position of dislocation core to plot
Returns
-------
figure
If the show is False else returns None
"""
import matplotlib.pyplot as plt
n_images = len(images)
fig2 = plt.figure(figsize=(n_images * 4, 4))
for i, image in enumerate(images):
ax1 = fig2.add_subplot(1, n_images, i + 1)
plot_vitek(image, bulk, plot_axes=ax1, xyscale=xyscale)
if core_positions is not None:
x, y = core_positions[i]
ax1.scatter(x, y, marker="+", s=200, c='C1')
if show:
fig2.show()
return None
else:
return fig2
def show_configuration(disloc, bulk, u, fixed_mask=None):
"""shows the displacement fixed atoms."""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(16, 4))
ax1 = fig.add_subplot(131)
ax1.set_title(r"z displacement, $\AA$")
sc = ax1.scatter(bulk.positions[:, 0], bulk.positions[:, 1], c=u.T[2])
ax1.axvline(0.0, color="red", linestyle="dashed")
ax1.set_xlabel(r"x, $\AA$")
ax1.set_ylabel(r"y, $\AA$")
plt.colorbar(sc)
ax2 = fig.add_subplot(132)
ax2.set_title(r"x displacement, $\AA$")
sc = ax2.scatter(bulk.positions[:, 0], bulk.positions[:, 1], c=u.T[0])
ax2.set_xlabel(r"x, $\AA$")
ax2.set_ylabel(r"y, $\AA$")
plt.colorbar(sc, format="%.1e")
ax3 = fig.add_subplot(133)
ax3.set_title(r"y displacement, $\AA$")
sc = ax3.scatter(bulk.positions[:, 0], bulk.positions[:, 1], c=u.T[1])
plt.colorbar(sc, format="%.1e")
ax3.set_xlabel(r"x, $\AA$")
ax3.set_ylabel(r"y, $\AA$")
if fixed_mask is not None:
ax1.scatter(bulk.positions[fixed_mask, 0],
bulk.positions[fixed_mask, 1], c="k")
ax2.scatter(bulk.positions[fixed_mask, 0],
bulk.positions[fixed_mask, 1], c="k")
ax3.scatter(bulk.positions[fixed_mask, 0],
bulk.positions[fixed_mask, 1], c="k")
plt.show()
return None
def get_elastic_constants(pot_path=None,
calculator=None,
delta=1e-2,
symbol="W"):
"""
return lattice parameter, and cubic elastic constants: C11, C12, 44
using matscipy function
pot_path - path to the potential
symbol : string
Symbol of the element to pass to ase.lattice.cubic.SimpleCubicFactory
default is "W" for tungsten
"""
unit_cell = bulk(symbol, cubic=True)
if (pot_path is not None) and (calculator is None):
# create lammps calculator with the potential
lammps = LAMMPSlib(lmpcmds=["pair_style eam/fs",
"pair_coeff * * %s W" % pot_path],
atom_types={'W': 1}, keep_alive=True)
calculator = lammps
unit_cell.calc = calculator
# simple calculation to get the lattice constant and cohesive energy
# alat0 = W.cell[0][1] - W.cell[0][0]
sf = StrainFilter(unit_cell)
# or UnitCellFilter(W)
# -> to minimise wrt pos, cell
opt = FIRE(sf)
opt.run(fmax=1e-4) # max force in eV/A
alat = unit_cell.cell.lengths()[0]
# print("a0 relaxation %.4f --> %.4f" % (a0, a))
# e_coh = W.get_potential_energy()
# print("Cohesive energy %.4f" % e_coh)
Cij, Cij_err = fit_elastic_constants(unit_cell,
symmetry="cubic",
delta=delta)
Cij = Cij/GPa # unit conversion to GPa
elasticMatrix3x3 = Cij[:3, :3]
# average of diagonal elements: C11, C22, C33
C11 = elasticMatrix3x3.diagonal().mean()
# make mask to extract non diagonal elements
mask = np.ones((3, 3), dtype=bool)
np.fill_diagonal(mask, False)
# average of all non diagonal elements from 1 to 3
C12 = elasticMatrix3x3[mask].mean()
# average of diagonal elements from 4 till 6: C44, C55, C66,
C44 = Cij[3:, 3:].diagonal().mean()
# A = 2.*C44/(C11 - C12)
if (pot_path is not None) and (calculator is None):
lammps.lmp.close()
return alat, C11, C12, C44
def make_barrier_configurations(elastic_param=None,
pot_path=None, calculator=None,
cylinder_r=10, hard_core=False, **kwargs):
"""Creates the initial and final configurations for the NEB calculation
The positions in FixedAtoms constrained region are average between
final and initial configurations
Parameters
----------
pot_path : string
Path to the potential file.
calculator : type
Description of parameter `calculator`.
cylinder_r : float
Radius of cylinder of unconstrained atoms around the
dislocation in angstrom.
hard_core : bool
Type of the core hard or soft.
If hard is chosen the displacement field is reversed.
**kwargs :
Keyword arguments to pass to make_screw_cyl() function.
Returns
-------
disloc_ini : ase.Atoms
Initial dislocation configuration.
disloc_fin : ase.Atoms
Final dislocation configuration.
bulk : ase.Atoms
Perfect bulk configuration for Vitek displacement maps
"""
if pot_path is not None:
alat, C11, C12, C44 = get_elastic_constants(pot_path=pot_path)
# get the cutoff from the potential file
with open(pot_path) as potfile:
for i, tmp_str in enumerate(potfile):
if i == 4: # read the last number in the fifth line
cutoff = float(tmp_str.split()[-1])
break
elif calculator is not None:
alat, C11, C12, C44 = get_elastic_constants(calculator=calculator)
cutoff = 5.0 # the value for training data for GAP from paper
elif elastic_param is not None:
alat, C11, C12, C44 = elastic_param
cutoff = 5.5
cent_x = np.sqrt(6.0) * alat / 3.0
center = [cent_x, 0.0, 0.0]
disloc_ini, bulk_ini, __ = make_screw_cyl(alat, C11, C12, C44,
cylinder_r=cylinder_r,
cutoff=cutoff,
hard_core=hard_core,
l_extend=center, **kwargs)
disloc_fin, __, __ = make_screw_cyl(alat, C11, C12, C44,
cylinder_r=cylinder_r,
cutoff=cutoff,
hard_core=hard_core,
center=center, **kwargs)
# get the fixed atoms constrain
FixAtoms = disloc_ini.constraints[0]
# get the indices of fixed atoms
fixed_atoms_indices = FixAtoms.get_indices()
# make the average position of fixed atoms
# between initial and the last position
ini_fix_pos = disloc_ini.get_positions()[fixed_atoms_indices]
fin_fix_pos = disloc_fin.get_positions()[fixed_atoms_indices]
new_av_pos = (ini_fix_pos + fin_fix_pos)/2.0
positions = disloc_ini.get_positions()
positions[fixed_atoms_indices] = new_av_pos
disloc_ini.set_positions(positions, apply_constraint=False)
positions = disloc_fin.get_positions()
positions[fixed_atoms_indices] = new_av_pos
disloc_fin.set_positions(positions, apply_constraint=False)
return disloc_ini, disloc_fin, bulk_ini
def make_screw_cyl_kink(alat, C11, C12, C44, cylinder_r=40,
kink_length=26, kind="double", **kwargs):
"""
Function to create kink configuration based on make_screw_cyl() function.
Double kink configuration is in agreement with
quadrupoles in terms of formation energy.
Single kink configurations provide correct and stable structure,
but formation energy is not accessible?
Parameters
----------
alat : float
Lattice constant of the material.
C11 : float
C11 elastic constant of the material.
C12 : float
C12 elastic constant of the material.
C44 : float
C44 elastic constant of the material.
cylinder_r : float
radius of cylinder of unconstrained atoms around the
dislocation in angstrom
kink_length : int
Length of the cell per kink along b in unit of b, must be even.
kind : string
kind of the kink: right, left or double
**kwargs :
Keyword arguments to pass to make_screw_cyl() function.
Returns
-------
kink : ase.atoms
kink configuration
reference_straight_disloc : ase.atoms
reference straight dislocation configuration
large_bulk : ase.atoms
large bulk cell corresponding to the kink configuration
"""
b = np.sqrt(3.0) * alat / 2.0
cent_x = np.sqrt(6.0) * alat / 3.0
(disloc_ini,
disloc_fin,
bulk_ini) = make_barrier_configurations((alat, C11, C12, C44),
cylinder_r=cylinder_r,
**kwargs)
if kind == "double":
large_bulk = bulk_ini * [1, 1, 2 * kink_length]
reference_straight_disloc = disloc_ini * [1, 1, 2 * kink_length]
if kink_length % 2:
print("WARNING: length is not even!")
kink = disloc_ini * [1, 1, kink_length // 2]
middle_kink = disloc_fin * [1, 1, kink_length]
middle_kink.positions += np.array((0.0, 0.0, kink.get_cell()[2][2]))
kink.constraints[0].index = np.append(kink.constraints[0].index,
middle_kink.constraints[0].get_indices() + len(kink))
kink.extend(middle_kink)
kink.cell[2][2] += middle_kink.cell[2][2]
upper_kink = disloc_ini * [1, 1, kink_length // 2]
upper_kink.positions += np.array((0.0, 0.0, kink.get_cell()[2][2]))
kink.constraints[0].index = np.append(kink.constraints[0].index,
upper_kink.constraints[0].get_indices() + len(kink))
kink.extend(upper_kink)
kink.cell[2][2] += upper_kink.cell[2][2]
elif kind == "right":
large_bulk = bulk_ini * [1, 1, kink_length]
reference_straight_disloc = disloc_ini * [1, 1, kink_length]
kink = disloc_ini * [1, 1, kink_length // 2]
upper_disloc = disloc_fin * [1, 1, kink_length // 2]
upper_disloc.positions += np.array((0.0, 0.0, kink.cell[2][2]))
kink.extend(upper_disloc)
kink.constraints[0].index = np.append(kink.constraints[0].index,
upper_disloc.constraints[0].get_indices() + len(kink))
kink.cell[2][2] += upper_disloc.cell[2][2]
# we have to adjust the cell to make the kink vector periodic
# here we remove two atomic rows. it is nicely explained in the paper
_, _, z = large_bulk.positions.T
right_kink_mask = z < large_bulk.cell[2][2] - 2.0 * b / 3 - 0.01
kink = kink[right_kink_mask]
cell = kink.cell.copy()
# right kink is created when the kink vector is in positive x direction
# assuming (x, y, z) is right group of vectors
cell[2][0] += cent_x
cell[2][2] -= 2.0 * b / 3.0
kink.set_cell(cell, scale_atoms=False)
# make sure all the atoms are removed and cell is modified
# for the bulk as well.
large_bulk.cell[2][0] += cent_x
large_bulk.cell[2][2] -= 2.0 * b / 3.0
large_bulk = large_bulk[right_kink_mask]
for constraint in kink.constraints:
large_bulk.set_constraint(constraint)
elif kind == "left":
large_bulk = bulk_ini * [1, 1, kink_length]
reference_straight_disloc = disloc_ini * [1, 1, kink_length]
kink = disloc_fin * [1, 1, kink_length // 2]
upper_disloc = disloc_ini * [1, 1, kink_length // 2]
upper_disloc.positions += np.array((0.0, 0.0, kink.cell[2][2]))
kink.extend(upper_disloc)
kink.constraints[0].index = np.append(kink.constraints[0].index,
upper_disloc.constraints[0].get_indices() + len(kink))
kink.cell[2][2] += upper_disloc.cell[2][2]
# we have to adjust the cell to make the kink vector periodic
# here we remove one atomic row. it is nicely explained in the paper
_, _, z = large_bulk.positions.T
left_kink_mask = z < large_bulk.cell[2][2] - 1.0 * b / 3 - 0.01
kink = kink[left_kink_mask]
cell = kink.cell.copy()
# left kink is created when the kink vector is in negative x direction
# assuming (x, y, z) is right group of vectors
cell[2][0] -= cent_x
cell[2][2] -= 1.0 * b / 3.0
kink.set_cell(cell, scale_atoms=False)
# make sure all the atoms are removed and cell is modified
# for the bulk as well.
large_bulk.cell[2][0] -= cent_x
large_bulk.cell[2][2] -= 1.0 * b / 3.0
large_bulk = large_bulk[left_kink_mask]
for constraint in kink.constraints:
large_bulk.set_constraint(constraint)
else:
raise ValueError('Kind must be "right", "left" or "double"')
return kink, reference_straight_disloc, large_bulk
def slice_long_dislo(kink, kink_bulk, b):
"""Function to slice a long dislocation configuration to perform
dislocation structure and core position analysis
Parameters
----------
kink : ase.Atoms
kink configuration to slice
kink_bulk : ase.Atoms
corresponding bulk configuration to perform mapping for slicing
b : float
burgers vector b should be along z direction
Returns
-------
sliced_kink : list of [sliced_bulk, sliced_kink]
sliced configurations 1 b length each
disloc_z_positions : float
positions of each sliced configuration (center along z)
"""
if not len(kink) == len(kink_bulk):
raise ValueError('"kink" and "kink_bulk" must be same size')
n_slices = int(np.round(kink.cell[2][2] / b * 3))
atom_z_positions = kink_bulk.positions.T[2]
kink_z_length = kink_bulk.cell[2][2]
sliced_kink = []
disloc_z_positions = []
for slice_id in range(n_slices):
shift = slice_id * b / 3.0
upper_bound = 5.0 * b / 6.0 + shift
lower_bound = -b / 6.0 + shift
if upper_bound < kink_z_length:
mask = np.logical_and(atom_z_positions < upper_bound,
atom_z_positions > lower_bound)
bulk_slice = kink_bulk.copy()[mask]
kink_slice = kink.copy()[mask]
else: # take into account PBC at the end of the box
upper_mask = atom_z_positions < (upper_bound - kink_z_length)
mask = np.logical_or(upper_mask,
atom_z_positions > lower_bound)
bulk_slice = kink_bulk.copy()[mask]
kink_slice = kink.copy()[mask]
# move the bottom atoms on top of the box
kink_slice.positions[upper_mask[mask]] += np.array(kink_bulk.cell[2])
bulk_slice.positions[upper_mask[mask]] += np.array((kink_bulk.cell[2]))
# print(kink_bulk[mask].positions.T[2].max())
# print(kink_bulk[mask].positions.T[2].min())
bulk_slice.positions -= np.array((0.0, 0.0, shift))
kink_slice.positions -= np.array((0.0, 0.0, shift))
bulk_slice.cell = kink_bulk.cell
bulk_slice.cell[2][2] = b
bulk_slice.cell[2][0] = 0
kink_slice.cell = kink_bulk.cell
kink_slice.cell[2][2] = b
kink_slice.cell[2][0] = 0
sliced_kink.append([bulk_slice, kink_slice])
disloc_z_positions.append(b / 3.0 + shift)
disloc_z_positions = np.array(disloc_z_positions)
return sliced_kink, disloc_z_positions
def compare_configurations(dislo, bulk, dislo_ref, bulk_ref,
alat, cylinder_r=None, print_info=True, remap=True,
bulk_neighbours=None, origin=(0., 0.)):
"""Compares two dislocation configurations based on the gradient of
the displacements along the bonds.
Parameters
----------
dislo : ase.Atoms
Dislocation configuration.
bulk : ase.Atoms
Corresponding bulk configuration for calculation of displacements.
dislo_ref : ase.Atoms
Reference dislocation configuration.
bulk_ref : ase.Atoms
Corresponding reference bulk configuration
for calculation of displacements.
alat : float
Lattice parameter for calculation of neghbour list cutoff.
cylinder_r : float or None
Radius of region of comparison around the dislocation coreself.
If None makes global comparison based on the radius of
`dislo` configuration, else compares the regions with `cylinder_r`
around the dislocation core position.
print_info : bool
Flag to switch print statement about the type of the comparison
remap: bool
Flag to swtich off remapping of atoms between deformed and reference
configurations. Only set this to true if atom order is the same!
bulk_neighbours:
Optionally pass in bulk neighbours as a tuple (bulk_i, bulk_j)
origin: tuple
Optionally pass in coordinate origin (x0, y0)
Returns
-------
float
The Du norm of the differences per atom.
"""
x0, y0 = origin
x, y, __ = bulk.get_positions().T
x -= x0
y -= y0
radius = np.sqrt(x ** 2 + y ** 2)
if cylinder_r is None:
cutoff_radius = radius.max() - 10.
if print_info:
print("Making a global comparison with radius %.2f" % cutoff_radius)
else:
cutoff_radius = cylinder_r
if print_info:
print("Making a local comparison with radius %.2f" % cutoff_radius)
cutoff_mask = radius < cutoff_radius
second_NN_distance = alat
if bulk_neighbours is None:
bulk_i, bulk_j = neighbour_list('ij', bulk_ref, second_NN_distance)
else:
bulk_i, bulk_j = bulk_neighbours
I_core, J_core = np.array([(i, j) for i, j in zip(bulk_i, bulk_j) if cutoff_mask[i]]).T
if remap:
mapping = {}
for i in range(len(bulk)):
mapping[i] = np.linalg.norm(bulk_ref.positions -
bulk.positions[i], axis=1).argmin()
else:
mapping = dict(zip(list(range(len(bulk))),
list(range(len(bulk)))))
u_ref = dislo_ref.positions - bulk_ref.positions
u = dislo.positions - bulk.positions
u_extended = np.zeros(u_ref.shape)
u_extended[list(mapping.values()), :] = u
du = u_extended - u_ref
Du = np.linalg.norm(np.linalg.norm(mic(du[J_core, :] - du[I_core, :],
bulk.cell), axis=1))
return Du
def cost_function(pos, dislo, bulk, cylinder_r, elastic_param,
hard_core=False, print_info=True, remap=True,
bulk_neighbours=None, origin=(0, 0)):
"""Cost function for fitting analytical displacement field
and detecting dislocation core position. Uses `compare_configurations`
function for the minimisation of the core position.
Parameters
----------
pos : list of float
Positions of the core to build the analytical solution [x, y].
dislo : ase.Atoms
Dislocation configuration.
bulk : ase.Atoms
Corresponding bulk configuration for calculation of displacements.
cylinder_r : float or None
Radius of region of comparison around the dislocation coreself.
If None makes global comparison based on the radius of
`dislo` configuration, else compares the regions with `cylinder_r`
around the dislocation core position.
elastic_param : list of float
List containing alat, C11, C12, C44
hard_core : bool
type of the core True for hard
print_info : bool
Flag to switch print statement about the type of the comparison
bulk_neighbours: tuple or None
Optionally pass in neighbour list for bulk reference config to save
computing it each time.
origin: tuple
Optionally pass in coordinate origin (x0, y0)
Returns
-------
float
Error for optimisation (result from `compare_configurations` function)
"""
from atomman import ElasticConstants
from atomman.defect import Stroh
# Create a Stroh ojbect with junk data
stroh = Stroh(ElasticConstants(C11=141, C12=110, C44=98),
np.array([0, 0, 1]))
axes = np.array([[1, 1, -2],
[-1, 1, 0],
[1, 1, 1]])
alat, C11, C12, C44 = elastic_param
c = ElasticConstants(C11=C11, C12=C12, C44=C44)
burgers = alat * np.array([1., 1., 1.])/2.
# Solving a new problem with Stroh.solve
stroh.solve(c, burgers, axes=axes)
x0, y0 = origin
center = (pos[0], pos[1], 0.0)
u = stroh.displacement(bulk.positions - center)
u = -u if hard_core else u
dislo_guess = bulk.copy()
dislo_guess.positions += u
err = compare_configurations(dislo, bulk,
dislo_guess, bulk,
alat, cylinder_r=cylinder_r,
print_info=print_info, remap=remap,
bulk_neighbours=bulk_neighbours,
origin=origin)
return err
def fit_core_position(dislo_image, bulk, elastic_param, hard_core=False,
core_radius=10, current_pos=None, bulk_neighbours=None,
origin=(0, 0)):
"""
Use `cost_function()` to fit atomic positions to Stroh solution with
`scipy.optimize.minimize` is used to perform the fit using Powell's method.
Parameters
----------
dislo_image: ase.atoms.Atoms
bulk: ase.atoms.Atoms
elastic_param: array-like
[alat, C11, C12, C44]
hard_core: bool
core_radius: float
current_pos: array-like
array [core_x, core_y] containing initial guess for core position
bulk_neighbours: tuple
cache of bulk neigbbours to speed up calcualtion. Should be a
tuple (bulk_I, bulk_J) as returned by
`matscipy.neigbbours.neighbour_list('ij', bulk, alat)`.
origin: tuple
Optionally pass in coordinate origin (x0, y0)
Returns
-------
core_pos - array [core_x, core_y]
"""
if current_pos is None:
current_pos = origin
res = minimize(cost_function, current_pos, args=(
dislo_image, bulk, core_radius, elastic_param,
hard_core, False, False, bulk_neighbours, origin),
method='Powell', options={'xtol': 1e-2, 'ftol': 1e-2})
return res.x
def fit_core_position_images(images, bulk, elastic_param,
bulk_neighbours=None,
origin=(0, 0)):
"""
Call fit_core_position() for a list of Atoms objects, e.g. NEB images
Parameters
----------
images: list
list of Atoms object for dislocation configurations
bulk: ase.atoms.Atoms
bulk reference configuration
elastic_param: list
as for `fit_core_position()`.
bulk_neighbours:
as for `fit_core_position()`.
origin: tuple
Optionally pass in coordinate origin (x0, y0)
Returns
-------
core_positions: array of shape `(len(images), 2)`
"""
core_positions = []
core_position = images[0].info.get('core_position', origin)
for dislo in images:
dislo_tmp = dislo.copy()
core_position = fit_core_position(dislo_tmp, bulk, elastic_param,
current_pos=dislo.info.get(
'core_position', core_position),
bulk_neighbours=bulk_neighbours,
origin=origin)
dislo.info['core_position'] = core_position
core_positions.append(core_position)
return np.array(core_positions)
def screw_cyl_tetrahedral(alat, C11, C12, C44,
scan_r=15,
symbol="W",
imp_symbol='H',
hard_core=False,
center=(0., 0., 0.)):
"""Generates a set of tetrahedral positions with `scan_r` radius.
Applies the screw dislocation displacement for creating an initial guess
for the H positions at dislocation core.
Parameters
----------
alat : float
Lattice constant of the material.
C11 : float
C11 elastic constant of the material.
C12 : float
C12 elastic constant of the material.
C44 : float
C44 elastic constant of the material.
scan_r : float
Radius of the region to create tetrahedral positions.
symbol : string
Symbol of the element to pass to ase.lattuce.cubic.SimpleCubicFactory
default is "W" for tungsten
imp_symbol : string
Symbol of the elemnt to pass creat Atoms object
default is "H" for hydrogen
hard_core : float
Type of the dislocatino core if True then -u
(sign of displacement is flipped) is applied.
Default is False i.e. soft core is created.
center : tuple of floats
Coordinates of dislocation core (center) (x, y, z).
Default is (0., 0., 0.)
Returns
-------
ase.Atoms object
Atoms object with predicted tetrahedral
positions around dislocation core.
"""
from atomman import ElasticConstants
from atomman.defect import Stroh
axes = np.array([[1, 1, -2],
[-1, 1, 0],
[1, 1, 1]])
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=(False, False, True),
latticeconstant=alat)
BCCTetras = BodyCenteredCubicTetrahedralFactory()
impurities = BCCTetras(directions=axes.tolist(),
size=(1, 1, 1),
symbol=imp_symbol,
pbc=(False, False, True),
latticeconstant=alat)
impurities = impurities[impurities.positions.T[2] < alat*1.2]
impurities.set_cell(unit_cell.get_cell())
impurities.wrap()
disloCenterY = alat * np.sqrt(2.)/6.0
disloCenterX = alat * np.sqrt(6.)/6.0
impurities.positions[:, 0] -= disloCenterX
impurities.positions[:, 1] -= disloCenterY
# size of the cubic cell as a 112 direction
Lx = int(round((scan_r)/(alat * np.sqrt(6.))))
# size of the cubic cell as a 110 direction
Ly = int(round((scan_r) / (alat * np.sqrt(2.))))
# factor 2 to ,ake shure odd number of images is translated
# it is important for the correct centering of the dislocation core
bulk_tetra = impurities * (2*(Lx + 1), 2*(Ly + 1), 1)
# make 0, 0, at the center
# make 0, 0, at the center
bulk_tetra.positions[:, 0] -= (Lx + 1) * alat * np.sqrt(6.) - center[0]
bulk_tetra.positions[:, 1] -= (Ly + 1) * alat * np.sqrt(2.) - center[1]
x, y, z = bulk_tetra.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask_zero = radius_x_y_zero < scan_r
radius_x_y_center = np.sqrt((x - center[0])**2 + (y - center[1])**2)
mask_center = radius_x_y_center < scan_r
final_mask = mask_center | mask_zero
# leave only atoms inside the cylinder
bulk_tetra = bulk_tetra[final_mask]
# Create a Stroh object with junk data
stroh = Stroh(ElasticConstants(C11=141, C12=110, C44=98),
np.array([0, 0, 1]))
c = ElasticConstants(C11=C11, C12=C12, C44=C44)
burgers = alat * np.array([1., 1., 1.])/2.
# Solving a new problem with Stroh.solve
stroh.solve(c, burgers, axes=axes)
dislo_tetra = bulk_tetra.copy()
impurities_u = stroh.displacement(bulk_tetra.positions - center)
impurities_u = -impurities_u if hard_core else impurities_u
dislo_tetra.positions += impurities_u
return dislo_tetra
def screw_cyl_octahedral(alat, C11, C12, C44,
scan_r=15,
symbol="W",
imp_symbol='H',
hard_core=False,
center=(0., 0., 0.)):
"""Generates a set of octahedral positions with `scan_r` radius.
Applies the screw dislocation displacement for creating an initial guess
for the H positions at dislocation core.
Parameters
----------
alat : float
Lattice constant of the material.
C11 : float
C11 elastic constant of the material.
C12 : float
C12 elastic constant of the material.
C44 : float
C44 elastic constant of the material.
symbol : string
Symbol of the element to pass to ase.lattuce.cubic.SimpleCubicFactory
default is "W" for tungsten
imp_symbol : string
Symbol of the elemnt to pass creat Atoms object
default is "H" for hydrogen
symbol : string
Symbol of the elemnt to pass creat Atoms object
hard_core : float
Type of the dislocatino core if True then -u
(sign of displacement is flipped) is applied.
Default is False i.e. soft core is created.
center : tuple of floats
Coordinates of dislocation core (center) (x, y, z).
Default is (0., 0., 0.)
Returns
-------
ase.Atoms object
Atoms object with predicted tetrahedral
positions around dislocation core.
"""
# TODO: Make one function for impurities and pass factory to it:
# TODO: i.e. octahedral or terahedral
from atomman import ElasticConstants
from atomman.defect import Stroh
axes = np.array([[1, 1, -2],
[-1, 1, 0],
[1, 1, 1]])
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=(False, False, True),
latticeconstant=alat)
BCCOctas = BodyCenteredCubicOctahedralFactory()
impurities = BCCOctas(directions=axes.tolist(),
size=(1, 1, 1), symbol=imp_symbol,
pbc=(False, False, True),
latticeconstant=alat)
impurities = impurities[impurities.positions.T[2] < alat*1.2]
impurities.set_cell(unit_cell.get_cell())
impurities.wrap()
disloCenterY = alat * np.sqrt(2.)/6.0
disloCenterX = alat * np.sqrt(6.)/6.0
impurities.positions[:, 0] -= disloCenterX
impurities.positions[:, 1] -= disloCenterY
L = int(round(2.0*scan_r/(alat*np.sqrt(2.)))) + 1
bulk_octa = impurities * (L, L, 1)
# make 0, 0, at the center
bulk_octa.positions[:, 0] -= L * alat * np.sqrt(6.)/2. - center[0]
bulk_octa.positions[:, 1] -= L * alat * np.sqrt(2.)/2. - center[1]
x, y, z = bulk_octa.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask_zero = radius_x_y_zero < scan_r
radius_x_y_center = np.sqrt((x - center[0])**2 + (y - center[1])**2)
mask_center = radius_x_y_center < scan_r
final_mask = mask_center | mask_zero
# leave only atoms inside the cylinder
bulk_octa = bulk_octa[final_mask]
# Create a Stroh object with junk data
stroh = Stroh(ElasticConstants(C11=141, C12=110, C44=98),
np.array([0, 0, 1]))
c = ElasticConstants(C11=C11, C12=C12, C44=C44)
burgers = alat * np.array([1., 1., 1.])/2.
# Solving a new problem with Stroh.solve
stroh.solve(c, burgers, axes=axes)
dislo_octa = bulk_octa.copy()
impurities_u = stroh.displacement(bulk_octa.positions - center)
impurities_u = -impurities_u if hard_core else impurities_u
dislo_octa.positions += impurities_u
return dislo_octa
class BodyCenteredCubicTetrahedralFactory(SimpleCubicFactory):
"""A factory for creating tetrahedral lattices in bcc structure"""
xtal_name = "bcc_tetrahedral"
bravais_basis = [[0.0, 0.5, 0.25],
[0.0, 0.5, 0.75],
[0.0, 0.25, 0.5],
[0.0, 0.75, 0.5],
[0.5, 0.0, 0.75],
[0.25, 0.0, 0.5],
[0.75, 0.0, 0.5],
[0.5, 0.0, 0.25],
[0.5, 0.25, 0.0],
[0.5, 0.75, 0.0],
[0.25, 0.5, 0.0],
[0.75, 0.5, 0.0]]
class BodyCenteredCubicOctahedralFactory(SimpleCubicFactory):
"""A factory for creating octahedral lattices in bcc structure"""
xtal_name = "bcc_octahedral"
bravais_basis = [[0.5, 0.5, 0.0],
[0.0, 0.0, 0.5],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5]]
def dipole_displacement_angle(W_bulk, dislo_coord_left, dislo_coord_right,
shift=0.0, mode=1.0):
"""
Generates a simple displacement field for two dislocations in a dipole
configuration uding simple Voltera solution as u = b/2 * angle
"""
burgers = W_bulk.cell[2][2]
shifted_positions = W_bulk.positions + shift - dislo_coord_left
x, y, __ = shifted_positions.T
displacement_left = np.arctan2(y, x) * burgers / (2.0 * np.pi)
shifted_positions = W_bulk.positions + shift - dislo_coord_right
x, y, __ = shifted_positions.T
displacement_right = np.arctan2(y, mode*x) * burgers / (2.0 * np.pi)
# make two easy core configurations
u_dipole = np.zeros_like(W_bulk.positions)
u_dipole.T[2] = displacement_left - mode*displacement_right
return u_dipole
def get_u_img(W_bulk,
dislo_coord_left,
dislo_coord_right,
n_img=10, n1_shift=0, n2_shift=0):
"""
Function for getting displacemnt filed for images of quadrupole cells
used by `make_screw_quadrupole`
"""
u_img = np.zeros_like(W_bulk.positions)
C1_quadrupole, C2_quadrupole, __ = W_bulk.get_cell()
for n1 in range(-n_img, n_img + 1):
for n2 in range(-n_img, n_img + 1):
shift = n1 * C1_quadrupole + n2 * C2_quadrupole
if n1 != n1_shift or n2 != n2_shift:
u_img += dipole_displacement_angle(W_bulk,
dislo_coord_left + shift,
dislo_coord_right + shift,
shift=n2_shift * C2_quadrupole + n1_shift * C1_quadrupole)
return u_img
def make_screw_quadrupole(alat,
left_shift=0,
right_shift=0,
n1u=5,
symbol="W"):
r"""Generates a screw dislocation dipole configuration
for effective quadrupole arrangement. Works for BCC systems.
Parameters
----------
alat : float
Lattice parameter of the system in Angstrom.
left_shift : float, optional
Shift of the left dislocation core in number of dsitances to next
equivalent disocation core positions needed for creation for final
configuration for NEB. Default is 0.
right_shift : float, optional
shift of the right dislocation core in number of dsitances to next
equivalent disocation core positions needed for creation for final
configuration for NEB. Default is 0.
n1u : int, odd number
odd number! length of the cell a doubled distance between core along x.
Main parameter to calculate cell vectors
symbol : string
Symbol of the element to pass to ase.lattuce.cubic.SimpleCubicFactory
default is "W" for tungsten
Returns
-------
disloc_quadrupole : ase.Atoms
Resulting quadrupole configuration.
W_bulk : ase.Atoms
Perfect system.
dislo_coord_left : list of float
Coodrinates of left dislocation core [x, y]
dislo_coord_right : list of float
Coodrinates of right dislocation core [x, y]
Notes
-----
Calculation of cell vectors
+++++++++++++++++++++++++++
From [1]_ we take:
- Unit vectors for the cell are:
.. math::
u = \frac{1}{3}[1 \bar{2} 1];
.. math::
v = \frac{1}{3}[2 \bar{1} \bar{1}];
.. math::
z = b = \frac{1}{2}[1 1 1];
- Cell vectors are:
.. math::
C_1 = n^u_1 u + n^v_1 v + C^z_1 z;
.. math::
C_2 = n^u_2 u + n^v_2 v + C^z_2 z;
.. math::
C_3 = z
- For quadrupole arrangement n1u needs to be odd number,
for 135 atoms cell we take n1u=5
- To have quadrupole as as close as possible to a square one has to take:
.. math::
2 n^u_2 + n^v_2 = n^u_1
.. math::
n^v_2 \approx \frac{n^u_1}{\sqrt{3}}
- for n1u = 5:
.. math::
n^v_2 \approx \frac{n^u_1}{\sqrt{3}} = 2.89 \approx 3.0
.. math::
n^u_2 = \frac{1}{2} (n^u_1 - n^v_2) \approx \frac{1}{2} (5-3)=1
- Following [2]_ cell geometry is optimized by ading tilt compomemts
Cz1 and Cz2 for our case of n1u = 3n - 1:
Easy core
.. math::
C^z_1 = + \frac{1}{3}
.. math::
C^z_2 = + \frac{1}{6}
Hard core
.. math::
C^z_1 = + \frac{1}{3}
.. math::
C^z_2 = + \frac{1}{6}
may be typo in the paper check the original!
References:
+++++++++++
.. [1] Ventelon, L. & Willaime, F. J 'Core structure and Peierls potential
of screw dislocations in alpha-Fe from first principles: cluster versus
dipole approaches' Computer-Aided Mater Des (2007) 14(Suppl 1): 85.
https://doi.org/10.1007/s10820-007-9064-y
.. [2] Cai W. (2005) Modeling Dislocations Using a Periodic Cell.
In: Yip S. (eds) Handbook of Materials Modeling. Springer, Dordrecht
https://link.springer.com/chapter/10.1007/978-1-4020-3286-8_42
"""
unit_cell = BodyCenteredCubic(directions=[[1, -2, 1],
[2, -1, -1],
[1, 1, 1]],
symbol=symbol,
pbc=(True, True, True),
latticeconstant=alat, debug=0)
unit_cell_u, unit_cell_v, unit_cell_z = unit_cell.get_cell()
# calculate the cell vectors according to the Ventelon paper
# the real configrution depends on rounding check it here
n2v = int(np.rint(n1u/np.sqrt(3.0)))
# when the n1u - n2v difference is odd it is impossible to have
# perfect arrangemt of translation along x with C2 equal to 0.5*n1u
# choice of rounding between np.ceil() and np.trunc() makes a different
# configuration but of same quality of arrangement of quadrupoles (test)
n2u = np.ceil((n1u - n2v) / 2.)
n1v = 0
print("Not rounded values of C2 componets: ")
print("n2u: %.2f, n2v: %.2f" % ((n1u - n2v) / 2., n1u/np.sqrt(3.0)))
print("Calculated cell vectors from n1u = %i" % n1u)
print("n1v = %i" % n1v)
print("n2u = %i" % n2u)
print("n2v = %i" % n2v)
bulk = unit_cell.copy()*[n1u, n2v, 1]
# add another periodic shift in x direction to c2 vector
# for proper periodicity (n^u_2=1) of the effective quadrupole arrangement
bulk.cell[1] += n2u * unit_cell_u
C1_quadrupole, C2_quadrupole, C3_quadrupole = bulk.get_cell()
# calculation of dislocation cores positions
# distance between centers of triangles along x
# move to odd/even number -> get to upward/downward triangle
x_core_dist = alat * np.sqrt(6.)/6.0
# distance between centers of triangles along y
y_core_dist = alat * np.sqrt(2.)/6.0
# separation of the cores in a 1ux1v cell
nx_left = 2
nx_right = 5
if n2v % 2 == 0: # check if the number of cells in y direction is even
# Even: then introduce cores between two equal halves of the cell
ny_left = -2
ny_right = -1
else: # Odd: introduce cores between two equal halves of the cell
ny_left = 4
ny_right = 5
nx_left += 2.0 * left_shift
nx_right += 2.0 * right_shift
dislo_coord_left = np.array([nx_left * x_core_dist,
ny_left * y_core_dist,
0.0])
dislo_coord_right = np.array([nx_right * x_core_dist,
ny_right * y_core_dist,
0.0])
# calculation of the shifts of the initial cores coordinates for the final
# quadrupole arrangements
# different x centering preferences for odd and even values
if n2v % 2 == 0: # check if the number of cells in y direction is even
# Even:
dislo_coord_left += (n2u - 1) * unit_cell_u
dislo_coord_right += (n2u - 1 + np.trunc(n1u/2.0)) * unit_cell_u
else: # Odd:
dislo_coord_left += n2u * unit_cell_u
dislo_coord_right += (n2u + np.trunc(n1u/2.0)) * unit_cell_u
dislo_coord_left += np.trunc(n2v/2.0) * unit_cell_v
dislo_coord_right += np.trunc(n2v/2.0) * unit_cell_v
u_quadrupole = dipole_displacement_angle(bulk,
dislo_coord_left,
dislo_coord_right)
# get the image contribution from the dipoles around
# (default value of N images to scan n_img=10)
u_img = get_u_img(bulk,
dislo_coord_left,
dislo_coord_right)
u_sum = u_quadrupole + u_img
# calculate the field of neghbouring cell to estimate
# linear u_err along C1 and C2 (see. Cai paper)
# u_err along C2
n1_shift = 0
n2_shift = 1
shift = n1_shift*C1_quadrupole + n2_shift*C2_quadrupole
u_quadrupole_shifted = dipole_displacement_angle(bulk,
dislo_coord_left + shift,
dislo_coord_right + shift,
shift=shift)
u_img_shifted = get_u_img(bulk,
dislo_coord_left,
dislo_coord_right,
n1_shift=n1_shift, n2_shift=n2_shift)
u_sum_shifted = u_quadrupole_shifted + u_img_shifted
delta_u = u_sum - u_sum_shifted
delta_u_C2 = delta_u.T[2].mean()
print("delta u c2: %.2f " % delta_u_C2)
# u_err along C1
n1_shift = 1
n2_shift = 0
shift = n1_shift*C1_quadrupole + n2_shift*C2_quadrupole
u_quadrupole_shifted = dipole_displacement_angle(bulk,
dislo_coord_left + shift,
dislo_coord_right + shift,
shift=shift)
u_img_shifted = get_u_img(bulk,
dislo_coord_left,
dislo_coord_right,
n1_shift=n1_shift, n2_shift=n2_shift)
u_sum_shifted = u_quadrupole_shifted + u_img_shifted
delta_u = u_sum - u_sum_shifted
delta_u_C1 = delta_u.T[2].mean()
print("delta u c1: %.3f" % delta_u_C1)
x_scaled, y_scaled, __ = bulk.get_scaled_positions(wrap=False).T
u_err_C2 = (y_scaled - 0.5)*delta_u_C2
u_err_C1 = delta_u_C1*(x_scaled - 0.5)
u_err = u_err_C1 + u_err_C2
# Calculate the u_tilt to accomodate the stress (see. Cai paper)
burgers = bulk.cell[2][2]
u_tilt = 0.5 * burgers * (y_scaled - 0.5)
final_u = u_sum
final_u.T[2] += u_err - u_tilt
disloc_quadrupole = bulk.copy()
disloc_quadrupole.positions += final_u
# tilt the cell according to the u_tilt
disloc_quadrupole.cell[1][2] -= burgers/2.0
bulk.cell[1][2] -= burgers/2.0
return disloc_quadrupole, bulk, dislo_coord_left, dislo_coord_right
def make_screw_quadrupole_kink(alat, kind="double",
n1u=5, kink_length=20, symbol="W"):
"""Generates kink configuration using make_screw_quadrupole() function
works for BCC structure.
The method is based on paper
https://doi.org/10.1016/j.jnucmat.2008.12.053
Parameters
----------
alat : float
Lattice parameter of the system in Angstrom.
kind : string
kind of the kink: right, left or double
n1u : int
Number of lattice vectors for the quadrupole cell
(make_screw_quadrupole() function)
kink_length : int
Length of the cell per kink along b in unit of b, must be even.
symbol : string
Symbol of the element to pass to ase.lattuce.cubic.SimpleCubicFactory
default is "W" for tungsten
Returns
-------
kink : ase.atoms
kink configuration
reference_straight_disloc : ase.atoms
reference straight dislocation configuration
large_bulk : ase.atoms
large bulk cell corresponding to the kink configuration
"""
b = np.sqrt(3.0) * alat / 2.0
cent_x = np.sqrt(6.0) * alat / 3.0
(ini_disloc_quadrupole,
W_bulk, _, _) = make_screw_quadrupole(alat, n1u=n1u,
left_shift=0.0,
right_shift=0.0,
symbol=symbol)
(fin_disloc_quadrupole,
W_bulk, _, _) = make_screw_quadrupole(alat, n1u=n1u,
left_shift=1.0,
right_shift=1.0,
symbol=symbol)
reference_straight_disloc = ini_disloc_quadrupole * [1, 1, kink_length]
large_bulk = W_bulk * [1, 1, kink_length]
__, __, z = large_bulk.positions.T
if kind == "left":
# we have to adjust the cell to make the kink vector periodic
# here we remove one atomic row . it is nicely explained in the paper
left_kink_mask = z < large_bulk.get_cell()[2][2] - 1.0 * b / 3.0 - 0.01
large_bulk.cell[2][0] -= cent_x
large_bulk.cell[2][2] -= 1.0 * b / 3.0
large_bulk = large_bulk[left_kink_mask]
kink = fin_disloc_quadrupole * [1, 1, kink_length // 2]
upper_kink = ini_disloc_quadrupole * [1, 1, kink_length // 2]
upper_kink.positions += np.array((0.0, 0.0, kink.cell[2][2]))
kink.cell[2][2] += upper_kink.cell[2][2]
kink.extend(upper_kink)
# left kink is created the kink vector is in negative x direction
# assuming (x, y, z) is right group of vectors
kink = kink[left_kink_mask]
kink.cell[2][0] -= cent_x
kink.cell[2][2] -= 1.0 * b / 3.0
elif kind == "right":
# we have to adjust the cell to make the kink vector periodic
# here we remove two atomic rows . it is nicely explained in the paper
right_kink_mask = z < large_bulk.cell[2][2] - 2.0 * b / 3.0 - 0.01
large_bulk.cell[2][0] += cent_x
large_bulk.cell[2][2] -= 2.0 * b / 3.0
large_bulk = large_bulk[right_kink_mask]
kink = ini_disloc_quadrupole * [1, 1, kink_length // 2]
upper_kink = fin_disloc_quadrupole * [1, 1, kink_length // 2]
upper_kink.positions += np.array((0.0, 0.0, kink.cell[2][2]))
kink.cell[2][2] += upper_kink.cell[2][2]
kink.extend(upper_kink)
kink = kink[right_kink_mask]
# right kink is created when the kink vector is in positive x direction
# assuming (x, y, z) is right group of vectors
kink.cell[2][0] += cent_x
kink.cell[2][2] -= 2.0 * b / 3.0
elif kind == "double":
# for the double kink it is kink length per kink
kink = ini_disloc_quadrupole * [1, 1, kink_length // 2]
middle_kink = fin_disloc_quadrupole * [1, 1, kink_length]
middle_kink.positions += np.array((0.0, 0.0, kink.get_cell()[2][2]))
kink.extend(middle_kink)
kink.cell[2][2] += middle_kink.cell[2][2]
upper_kink = ini_disloc_quadrupole * [1, 1, kink_length // 2]
upper_kink.positions += np.array((0.0, 0.0, kink.get_cell()[2][2]))
kink.extend(upper_kink)
kink.cell[2][2] += upper_kink.cell[2][2]
# double kink is double length
large_bulk = W_bulk * [1, 1, 2 * kink_length]
else:
raise ValueError('Kind must be "right", "left" or "double"')
return kink, reference_straight_disloc, large_bulk
def make_edge_cyl_001_100(a0, C11, C12, C44,
cylinder_r,
cutoff=5.5,
tol=1e-6,
symbol="W"):
"""Function to produce consistent edge dislocation configuration.
Parameters
----------
alat : float
Lattice constant of the material.
C11 : float
C11 elastic constant of the material.
C12 : float
C12 elastic constant of the material.
C44 : float
C44 elastic constant of the material.
cylinder_r : float
Radius of cylinder of unconstrained atoms around the
dislocation in angstrom.
cutoff : float
Potential cutoff for determenition of size of
fixed atoms region (2*cutoff)
tol : float
Tolerance for generation of self consistent solution.
symbol : string
Symbol of the element to pass to ase.lattuce.cubic.SimpleCubicFactory
default is "W" for tungsten
Returns
-------
bulk : ase.Atoms object
Bulk configuration.
disloc : ase.Atoms object
Dislocation configuration.
disp : np.array
Corresponding displacement.
"""
from atomman import ElasticConstants
from atomman.defect import Stroh
# Create a Stroh object with junk data
stroh = Stroh(ElasticConstants(C11=141, C12=110, C44=98),
np.array([0, 0, 1]))
axes = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
c = ElasticConstants(C11=C11, C12=C12, C44=C44)
burgers = a0 * np.array([1., 0., 0.])
# Solving a new problem with Stroh.solve
stroh.solve(c, burgers, axes=axes)
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=(False, False, True),
latticeconstant=a0)
bulk = unit_cell.copy()
# shift to make the zeros of the cell betweem the atomic planes
# and under the midplane on Y axes
X_midplane_shift = -0.25*a0
Y_midplane_shift = -0.25*a0
bulk_shift = [X_midplane_shift,
Y_midplane_shift,
0.0]
bulk.positions += bulk_shift
tot_r = cylinder_r + 2*cutoff + 0.01
Lx = int(round(tot_r/a0))
Ly = int(round(tot_r/a0))
# factor 2 to make sure odd number of images is translated
# it is important for the correct centering of the dislocation core
bulk = bulk * (2*Lx, 2*Ly, 1)
center_shift = [Lx * a0, Ly * a0, 0.0]
bulk.positions -= center_shift
# bulk.write("before.xyz")
disp1 = stroh.displacement(bulk.positions)
disloc = bulk.copy()
res = np.inf
i = 0
while res > tol:
disloc.positions = bulk.positions + disp1
disp2 = stroh.displacement(disloc.positions)
res = np.abs(disp1 - disp2).max()
disp1 = disp2
print('disloc SCF', i, '|d1-d2|_inf =', res)
i += 1
if i > 10:
raise RuntimeError('Self-consistency did ' +
'not converge in 10 cycles')
disp = disp2
x, y, z = disloc.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask = radius_x_y_zero < tot_r
disloc = disloc[mask]
bulk = bulk[mask]
# disloc.write("after_disp.xyz")
x, y, z = disloc.positions.T
radius_x_y_zero = np.sqrt(x**2 + y**2)
mask_zero = radius_x_y_zero > cylinder_r
fix_atoms = FixAtoms(mask=mask_zero)
disloc.set_constraint(fix_atoms)
return bulk, disloc, disp
def read_dislo_QMMM(filename=None, image=None):
"""
Reads extended xyz file with QMMM configuration
Uses "region" for mapping of QM, MM and fixed atoms
Sets ase.constraints.FixAtoms constraint on fixed atoms
Parameters
----------
filename : path to xyz file
image : image with "region" array to set up constraint and extract qm_mask
Returns
-------
dislo_QMMM : Output ase.Atoms object
Includes "region" array and FixAtoms constraint
qm_mask : array mask for QM atoms mapping
"""
if filename is not None:
dislo_QMMM = read(filename)
elif image is not None:
dislo_QMMM = image
else:
raise RuntimeError("Please provide either path or image")
region = dislo_QMMM.get_array("region")
Nat = len(dislo_QMMM)
print("Total number of atoms in read configuration: {0:7}".format(Nat))
for region_type in np.unique(region):
print("{0:52d} {1}".format(np.count_nonzero(region == region_type),
region_type))
if len(dislo_QMMM.constraints) == 0:
print("Adding fixed atoms constraint")
fix_mask = region == "fixed"
fix_atoms = FixAtoms(mask=fix_mask)
dislo_QMMM.set_constraint(fix_atoms)
else:
print("Constraints list is not zero")
qm_mask = region == "QM"
qm_atoms = dislo_QMMM[qm_mask]
qm_atoms_types = np.array(qm_atoms.get_chemical_symbols())
print("QM region atoms: {0:3d}".format(np.count_nonzero(qm_mask)))
for qm_atom_type in np.unique(qm_atoms_types):
print("{0:20d} {1}".format(np.count_nonzero(qm_atoms_types == qm_atom_type),
qm_atom_type))
return dislo_QMMM, qm_mask
def plot_bulk(atoms, n_planes=3, ax=None, ms=200):
"""
Plots x, y coordinates of atoms colored according
to non-equivalent planes in z plane
"""
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
x, y, z = atoms.positions.T
zlim = atoms.cell[2, 2] / n_planes
bins = np.linspace(zlim, atoms.cell[2, 2], num=n_planes)
bins -= atoms.cell[2, 2] / (2.0 * n_planes)
plane_ids = np.digitize(z, bins=bins)
for plane_id in np.unique(plane_ids):
mask = plane_id == plane_ids
ax.scatter(x[mask], y[mask], s=ms, edgecolor="k")
def ovito_dxa_straight_dislo_info(disloc, structure="BCC", replicate_z=3):
"""
A function to extract information from ovito dxa analysis.
Current version works for 1b thick configurations
containing straight dislocations.
Parameters
----------
disloc: ase.Atoms
Atoms object containing the atomic configuration to analyse
replicate_z: int
Specifies number of times to replicate the configuration
along the dislocation line.
Ovito dxa analysis needs at least 3b thick cell to work.
Returns
-------
Results: np.array(position, b, line, angle)
"""
from ovito.io.ase import ase_to_ovito
from ovito.modifiers import ReplicateModifier, DislocationAnalysisModifier
from ovito.pipeline import StaticSource, Pipeline
dxa_disloc = disloc.copy()
if 'fix_mask' in dxa_disloc.arrays:
del dxa_disloc.arrays['fix_mask']
input_crystal_structures = {"BCC": DislocationAnalysisModifier.Lattice.BCC,
"FCC": DislocationAnalysisModifier.Lattice.FCC,
"Diamond": DislocationAnalysisModifier.Lattice.CubicDiamond}
data = ase_to_ovito(dxa_disloc)
pipeline = Pipeline(source=StaticSource(data=data))
pipeline.modifiers.append(ReplicateModifier(num_z=replicate_z))
dxa = DislocationAnalysisModifier(
input_crystal_structure=input_crystal_structures[structure])
pipeline.modifiers.append(dxa)
data = pipeline.compute()
results = []
for segment in data.dislocations.segments:
# insure that this is a straight dislocation in a 1b thick cell
length = segment.length / replicate_z
try:
np.testing.assert_almost_equal(length, dxa_disloc.cell[2, 2],
decimal=2)
except AssertionError as error:
print("Dislocation might not be straight:")
print(error)
b = segment.true_burgers_vector
b_hat = np.array(segment.spatial_burgers_vector)
b_hat /= np.linalg.norm(b_hat)
lines = np.diff(segment.points, axis=0)
angles = []
positions = []
for point in segment.points:
positions.append(point[:2])
for line in lines:
t_hat = line / np.linalg.norm(line)
dot = np.abs(np.dot(t_hat, b_hat))
angle = np.degrees(np.arccos(dot))
angles.append(angle)
position = np.array(positions).mean(axis=0)
line = np.array(lines).mean(axis=0)
angle = np.array(angles).mean()
results.append([position, b, line, angle])
return results
def get_centering_mask(atoms, radius,
core_position=[0., 0., 0.],
extension=[0., 0., 0.],):
center = np.diag(atoms.cell) / 2
r = np.sqrt(((atoms.positions[:, [0, 1]]
- center[[0, 1]]) ** 2).sum(axis=1))
mask = r < radius
core_position = np.array(core_position)
shifted_center = center + core_position
shifted_r = np.sqrt(((atoms.positions[:, [0, 1]] -
shifted_center[[0, 1]]) ** 2).sum(axis=1))
shifted_mask = shifted_r < radius
extension = np.array(extension)
extended_center = center + extension
extended_r = np.sqrt(((atoms.positions[:, [0, 1]] -
extended_center[[0, 1]]) ** 2).sum(axis=1))
extended_mask = extended_r < radius
final_mask = mask | shifted_mask | extended_mask
return final_mask
def check_duplicates(atoms, distance=0.1):
"""
Returns a mask of atoms that have at least
one other atom closer than distance
"""
mask = atoms.get_all_distances() < distance
duplicates = np.full_like(atoms, False)
for i, row in enumerate(mask):
if any(row[i+1:]):
duplicates[i] = True
# print(f"found {duplicates.sum()} duplicates")
return duplicates.astype(np.bool)
class CubicCrystalDislocation:
def __init__(self, unit_cell, alat, C11, C12, C44, axes, burgers,
unit_cell_core_position=None,
parity=None, glide_distance=None, n_planes=None,
self_consistent=None):
"""
This class represents a dislocation in a cubic crystal
The dislocation is defined by the crystal unit cell,
elastic constants C11, C12 and C44, crystal axes,
burgers vector and optional shift and parity vectors.
Parameters
----------
unit_cell : unit cell to build the dislocation configuration
alat : lattice constant
C11 : elastic constants
C12
C44
axes : cell axes (b is normally along z direction)
burgers : burgers vector of the dislocation
unit_cell_core_position : dislocation core position in the unit cell
used to shift atomic positions to
make the dislocation core the center
of the cell
parity
glide_distance : distance to the next equivalent
core position in the glide direction
n_planes : int
number of non equivalent planes in z direction
self_consistent : float
default value for the displacement calculation
"""
self.unit_cell = unit_cell.copy()
self.alat = alat
self.C11 = C11
self.C12 = C12
self.C44 = C44
self.axes = axes
self.burgers = burgers
if unit_cell_core_position is None:
unit_cell_core_position = np.zeroes(3)
self.unit_cell_core_position = unit_cell_core_position
if parity is None:
parity = np.zeros(2, dtype=int)
self.parity = parity
if glide_distance is None:
glide_distance = 0.0
self.glide_distance = glide_distance
if n_planes is None:
n_planes = 3
self.n_planes = n_planes
if self_consistent is None:
self_consistent = True
self.self_consistent = self_consistent
self.stroh = None
def init_stroh(self):
from atomman import ElasticConstants
from atomman.defect import Stroh
c = ElasticConstants(C11=self.C11, C12=self.C12, C44=self.C44)
self.stroh = Stroh(c, burgers=self.burgers, axes=self.axes)
def set_burgers(self, burgers):
self.burgers = burgers
if self.stroh is None:
self.init_stroh()
def plot_unit_cell(self, ms=250, ax=None):
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
plot_bulk(self.unit_cell, self.n_planes, ax=ax, ms=ms)
x_core, y_core, _ = self.unit_cell_core_position
ax.scatter(x_core, y_core, marker="x", s=ms, c="red")
ax.scatter(x_core + self.glide_distance, y_core, marker="x", s=ms,
c="blue")
ax.set_aspect('equal')
x0, y0, _ = np.diag(self.unit_cell.cell)
ax.plot([0.0, 0.0, x0, x0, 0.0],
[0.0, y0, y0, 0.0, 0.0], color="black", zorder=0)
bulk_atoms = ax.scatter([], [], color="w", edgecolor="k",
label="lattice atoms")
core1 = ax.scatter([], [], marker="x", label="initial core position",
c="r")
core2 = ax.scatter([], [], marker="x", label="glide core position",
c="b")
ax.legend(handles=[bulk_atoms, core1, core2], fontsize=12)
ax.set_xlabel(r"$\AA$")
ax.set_ylabel(r"$\AA$")
def displacements(self, bulk_positions, center, self_consistent=True,
tol=1e-6, max_iter=100, verbose=True):
if self.stroh is None:
self.init_stroh()
disp1 = np.real(self.stroh.displacement(bulk_positions - center))
if not self_consistent:
return disp1
res = np.inf
i = 0
while res > tol:
disloc_positions = bulk_positions + disp1
disp2 = np.real(self.stroh.displacement(disloc_positions - center))
res = np.abs(disp1 - disp2).max()
disp1 = disp2
if verbose:
print('disloc SCF', i, '|d1-d2|_inf =', res)
i += 1
if i > max_iter:
raise RuntimeError('Self-consistency' +
f'did not converge in {max_iter} cycles')
return disp2
def build_cylinder(self, radius,
core_position=np.array([0., 0., 0.]),
extension=np.array([0., 0., 0.]),
fix_width=10.0, self_consistent=None):
if self_consistent is None:
self_consistent = self.self_consistent
extent = np.array([2 * (radius + fix_width),
2 * (radius + fix_width), 1.])
repeat = np.ceil(extent / np.diag(self.unit_cell.cell)).astype(int)
# if the extension and core position is
# within the unit cell, do not add extra unit cells
repeat_extension = np.floor(2.0 * extension /
np.diag(self.unit_cell.cell)).astype(int)
repeat_core_position = np.floor(2.0 * core_position /
np.diag(self.unit_cell.cell)).astype(int)
extra_repeat = np.stack((repeat_core_position,
repeat_extension)).max(axis=0)
repeat += extra_repeat
repeat[2] = 1 # exactly one cell in the periodic direction
# ensure correct parity in x and y directions
if repeat[0] % 2 != self.parity[0]:
repeat[0] += 1
if repeat[1] % 2 != self.parity[1]:
repeat[1] += 1
bulk = self.unit_cell * repeat
# in order to get center from an atom to the desired position
# we have to move the atoms in the opposite direction
bulk.positions -= self.unit_cell_core_position
center = np.diag(bulk.cell) / 2
shifted_center = center + core_position
cylinder_mask = get_centering_mask(bulk, radius,
core_position, extension)
# add square borders for the case of large extension or core position
x, y, _ = bulk.positions.T
x_mask = x - center[0] < extension[0] + core_position[0]
x_mask = x_mask * (x - center[0] > 0)
y_mask = np.abs(y - center[1]) < radius
square_mask = y_mask & x_mask
final_mask = square_mask | cylinder_mask
bulk = bulk[final_mask]
# disloc is a copy of bulk with displacements applied
disloc = bulk.copy()
disloc.positions += self.displacements(bulk.positions, shifted_center,
self_consistent=self_consistent)
r = np.sqrt(((bulk.positions[:, [0, 1]]
- center[[0, 1]])**2).sum(axis=1))
fix_mask = r > radius - fix_width
shifted_r = np.sqrt(((bulk.positions[:, [0, 1]] -
shifted_center[[0, 1]]) ** 2).sum(axis=1))
shifted_fix_max = shifted_r > radius - fix_width
extension = np.array(extension)
extended_center = center + extension
extended_r = np.sqrt(((bulk.positions[:, [0, 1]] -
extended_center[[0, 1]]) ** 2).sum(axis=1))
extended_fix_max = extended_r > radius - fix_width
final_fix_mask = fix_mask & shifted_fix_max & extended_fix_max
x, y, _ = bulk.positions.T
x_mask = x - center[0] < extension[0] + core_position[0]
x_mask = x_mask * (x - center[0] > 0)
y_mask = np.abs(y - center[1]) > radius - fix_width
# change mask only between the centers of the cylinders
final_fix_mask[x_mask] = y_mask[x_mask]
disloc.set_array('fix_mask', final_fix_mask)
disloc.set_constraint(FixAtoms(mask=final_fix_mask))
# adding vacuum and centering breaks consistency
# of displacement = dislo.positions - bulk.positions
# which is essential for plot_vitek and other tools
# I could not find a way to add vacuum to both disloc and bulk
# without spoiling the displacement
# disloc.center(vacuum=2 * fix_width, axis=(0, 1))
return bulk, disloc
def build_glide_configurations(self, radius,
average_positions=False, **kwargs):
final_core_position = np.array([self.glide_distance, 0.0, 0.0])
bulk_ini, disloc_ini = self.build_cylinder(radius,
extension=final_core_position,
**kwargs)
_, disloc_fin = self.build_cylinder(radius,
core_position=final_core_position,
**kwargs)
if average_positions:
# get the fixed atoms constrain
FixAtoms = disloc_ini.constraints[0]
# get the indices of fixed atoms
fixed_atoms_indices = FixAtoms.get_indices()
# make the average position of fixed atoms
# between initial and the last position
ini_fix_pos = disloc_ini.get_positions()[fixed_atoms_indices]
fin_fix_pos = disloc_fin.get_positions()[fixed_atoms_indices]
new_av_pos = (ini_fix_pos + fin_fix_pos) / 2.0
positions = disloc_ini.get_positions()
positions[fixed_atoms_indices] = new_av_pos
disloc_ini.set_positions(positions, apply_constraint=False)
positions = disloc_fin.get_positions()
positions[fixed_atoms_indices] = new_av_pos
disloc_fin.set_positions(positions, apply_constraint=False)
averaged_cell = (disloc_ini.cell + disloc_fin.cell) / 2.0
disloc_ini.set_cell(averaged_cell)
disloc_fin.set_cell(averaged_cell)
return bulk_ini, disloc_ini, disloc_fin
def build_impurity_cylinder(self, disloc, impurity, radius,
imp_symbol="H",
core_position=np.array([0., 0., 0.]),
extension=np.array([0., 0., 0.]),
self_consistent=False,
extra_bulk_at_core=False,
core_radius=0.5,
shift=np.array([0.0, 0.0, 0.0])):
extent = np.array([2 * radius + np.linalg.norm(self.burgers),
2 * radius + np.linalg.norm(self.burgers), 1.])
repeat = np.ceil(extent / np.diag(self.unit_cell.cell)).astype(int)
# if the extension and core position is
# within the unit cell, do not add extra unit cells
repeat_extension = np.floor(extension /
np.diag(self.unit_cell.cell)).astype(int)
repeat_core_position = np.floor(core_position /
np.diag(self.unit_cell.cell)).astype(int)
extra_repeat = np.stack((repeat_core_position,
repeat_extension)).max(axis=0)
repeat += extra_repeat
repeat[2] = 1 # exactly one cell in the periodic direction
# ensure correct parity in x and y directions
if repeat[0] % 2 != self.parity[0]:
repeat[0] += 1
if repeat[1] % 2 != self.parity[1]:
repeat[1] += 1
impurities_unit_cell = impurity(directions=self.axes.tolist(),
size=(1, 1, 1),
symbol=imp_symbol,
pbc=(False, False, True),
latticeconstant=self.alat)
impurities_unit_cell.cell = self.unit_cell.cell
impurities_unit_cell.wrap(pbc=True)
duplicates = check_duplicates(impurities_unit_cell)
impurities_unit_cell = impurities_unit_cell[np.logical_not(duplicates)]
impurities_bulk = impurities_unit_cell * repeat
# in order to get center from an atom to the desired position
# we have to move the atoms in the opposite direction
impurities_bulk.positions -= self.unit_cell_core_position
# build a bulk impurities cylinder
mask = get_centering_mask(impurities_bulk,
radius + np.linalg.norm(self.burgers),
core_position, extension)
impurities_bulk = impurities_bulk[mask]
center = np.diag(impurities_bulk.cell) / 2
shifted_center = center + core_position
# use stroh displacement for impurities
# disloc is a copy of bulk with displacements applied
impurities_disloc = impurities_bulk.copy()
core_mask = get_centering_mask(impurities_bulk,
core_radius,
core_position, extension)
print(f"Ignoring {core_mask.sum()} core impurities")
non_core_mask = np.logical_not(core_mask)
displacemets = self.displacements(impurities_bulk.positions[non_core_mask],
shifted_center,
self_consistent=self_consistent)
impurities_disloc.positions[non_core_mask] += displacemets
if extra_bulk_at_core: # add extra bulk positions at dislocation core
bulk_mask = get_centering_mask(impurities_bulk,
1.1 * self.unit_cell_core_position[1],
core_position + shift, extension)
print(f"Adding {bulk_mask.sum()} extra atoms")
impurities_disloc.extend(impurities_bulk[bulk_mask])
mask = get_centering_mask(impurities_disloc,
radius,
core_position,
extension)
impurities_disloc = impurities_disloc[mask]
disloc_center = np.diag(disloc.cell) / 2.
delta = disloc_center - center
delta[2] = 0.0
impurities_disloc.positions += delta
impurities_disloc.cell = disloc.cell
return impurities_disloc
class BCCScrew111Dislocation(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='W'):
axes = np.array([[1, 1, -2],
[-1, 1, 0],
[1, 1, 1]])
burgers = alat * np.array([1, 1, 1]) / 2.0
unit_cell_core_position = alat * np.array([np.sqrt(6.)/6.0,
np.sqrt(2.)/6.0, 0])
parity = [0, 0]
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=True,
latticeconstant=alat)
glide_distance = alat * np.linalg.norm(axes[0]) / 3.0
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance)
class BCCEdge111Dislocation(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='W'):
axes = np.array([[1, 1, 1],
[1, -1, 0],
[1, 1, -2]])
burgers = alat * np.array([1, 1, 1]) / 2.0
unit_cell_core_position = alat * np.array([(1.0/3.0) * np.sqrt(3.0)/2.0,
0.25 * np.sqrt(2.0), 0])
parity = [0, 0]
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=True,
latticeconstant=alat)
glide_distance = np.linalg.norm(burgers) / 3.0
n_planes = 6
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes)
class BCCMixed111Dislocation(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='W'):
axes = np.array([[1, -1, -2],
[1, 1, 0],
[1, -1, 1]])
burgers = alat * np.array([1, -1, -1]) / 2.0
parity = [0, 0]
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=True,
latticeconstant=alat)
# middle of the right edge of the first upward triangle
core_position = (unit_cell.positions[1] +
unit_cell.positions[2]) / 2.0
unit_cell_core_position = np.array([core_position[0],
core_position[1], 0])
glide_distance = alat * np.linalg.norm(axes[0]) / 3.0
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance)
class BCCEdge100Dislocation(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='W'):
axes = np.array([[1, 0, 0],
[0, 0, -1],
[0, 1, 0]])
burgers = alat * np.array([1, 0, 0])
unit_cell_core_position = alat * np.array([0.25,
0.25, 0])
parity = [0, 0]
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=True,
latticeconstant=alat)
glide_distance = alat
n_planes = 2
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes)
class BCCEdge100110Dislocation(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='W'):
axes = np.array([[1, 0, 0],
[0, 1, 1],
[0, -1, 1]])
burgers = alat * np.array([1, 0, 0])
unit_cell_core_position = alat * np.array([0.5,
np.sqrt(2.) / 4.0, 0])
parity = [0, 0]
unit_cell = BodyCenteredCubic(directions=axes.tolist(),
size=(1, 1, 1), symbol=symbol,
pbc=True,
latticeconstant=alat)
glide_distance = 0.5 * alat
n_planes = 2
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes)
class DiamondGlide30degreePartial(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='C'):
axes = np.array([[1, 1, -2],
[1, 1, 1],
[1, -1, 0]])
burgers = alat * np.array([1, -2, 1.]) / 6.
disloCenterX = 0.5 * (alat * np.linalg.norm(axes[0])) / 6.0
# 1/4 + 1/2 * (1/3 - 1/4) - to be in the middle of the glide set
disloCenterY = 7.0 * (alat * np.linalg.norm(axes[1])) / 24.0
unit_cell_core_position = np.array([disloCenterX,
disloCenterY, 0])
parity = [0, 0]
unit_cell = Diamond(symbol, directions=axes.tolist(),
pbc=(False, False, True),
latticeconstant=alat)
glide_distance = alat * np.linalg.norm(axes[0]) / 4.0
n_planes = 2
# There is very small distance between
# atomic planes in glide configuration.
# Due to significant anisotropy application of the self consistent
# displacement field leads to deformation of the atomic planes.
# This leads to the cut plane crossing one of the atomic planes and
# thus breaking the stacking fault.
self_consistent = False
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes,
self_consistent=self_consistent)
class DiamondGlide90degreePartial(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='C'):
axes = np.array([[1, 1, -2],
[1, 1, 1],
[1, -1, 0]])
burgers = alat * np.array([1., 1., -2.]) / 6.
disloCenterX = 0.5 * (alat * np.linalg.norm(axes[0])) / 6.0
# 1/4 + 1/2 * (1/3 - 1/4) - to be in the middle of the glide set
disloCenterY = 7.0 * (alat * np.linalg.norm(axes[1])) / 24.0
unit_cell_core_position = np.array([disloCenterX,
disloCenterY, 0])
parity = [0, 0]
unit_cell = Diamond(symbol, directions=axes.tolist(),
pbc=(False, False, True),
latticeconstant=alat)
glide_distance = alat * np.linalg.norm(axes[0]) / 4.0
n_planes = 2
# There is very small distance between
# atomic planes in glide configuration.
# Due to significant anisotropy application of the self consistent
# displacement field leads to deformation of the atomic planes.
# This leads to the cut plane crossing one of the atomic planes and
# thus breaking the stacking fault.
self_consistent = False
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes,
self_consistent=self_consistent)
class CubicCrystalDissociatedDislocation(CubicCrystalDislocation):
def __init__(self, left_dislocation, right_dislocation, burgers):
"""This class represents a dissociated dislocation in a cubic crystal
with burgers vercor b = b_left + b_right.
Args:
left_dislocation (CubicCrystalDislocation): dislocation with b_left
right_dislocation (CubicCrystalDislocation): dislocation with b_right
burgers (ndarray of 3 floats): resulting burgers vector
Raises:
ValueError: If resulting burgers vector
burgers is not a sum of burgers vectors of
left and right dislocations.
ValueError: If one of the properties of
left and righ dislocations are not the same.
"""
try:
np.testing.assert_almost_equal(left_dislocation.burgers +
right_dislocation.burgers,
burgers)
except AssertionError as error:
print(error)
raise ValueError("Burgers vectors of left and right disloctions" +
"do not add up to the desired vector")
# checking that parameters of
# left and right dislocations are the same
try:
assert left_dislocation.alat == right_dislocation.alat
assert left_dislocation.C11 == right_dislocation.C11
assert left_dislocation.C12 == right_dislocation.C12
assert left_dislocation.C44 == right_dislocation.C44
np.testing.assert_equal(left_dislocation.unit_cell.get_chemical_symbols(),
right_dislocation.unit_cell.get_chemical_symbols())
np.testing.assert_equal(left_dislocation.unit_cell.cell.cellpar(),
right_dislocation.unit_cell.cell.cellpar())
np.testing.assert_equal(left_dislocation.unit_cell.positions,
right_dislocation.unit_cell.positions)
np.testing.assert_equal(left_dislocation.axes,
right_dislocation.axes)
np.testing.assert_equal(left_dislocation.unit_cell_core_position,
right_dislocation.unit_cell_core_position)
np.testing.assert_equal(left_dislocation.parity,
right_dislocation.parity)
np.testing.assert_equal(left_dislocation.glide_distance,
right_dislocation.glide_distance)
assert left_dislocation.n_planes == right_dislocation.n_planes
assert left_dislocation.self_consistent == right_dislocation.self_consistent
except AssertionError as error:
print("Parameters of left and right partials are not the same!")
print(error)
raise ValueError("Parameters of left and right" +
"partials must be the same")
self.left_dislocation = left_dislocation
self.right_dislocation = right_dislocation
super().__init__(left_dislocation.unit_cell,
left_dislocation.alat,
left_dislocation.C11,
left_dislocation.C12,
left_dislocation.C44,
left_dislocation.axes,
burgers,
unit_cell_core_position=left_dislocation.unit_cell_core_position,
parity=left_dislocation.parity,
glide_distance=right_dislocation.glide_distance,
n_planes=left_dislocation.n_planes,
self_consistent=left_dislocation.self_consistent)
def build_cylinder(self, radius, partial_distance=0,
core_position=np.array([0., 0., 0.]),
extension=np.array([0., 0., 0.]),
fix_width=10.0, self_consistent=None):
"""
Overloaded function to make dissociated dislocations.
Partial distance is provided as an integer to define number
of glide distances between two partials.
Parameters
----------
radius: float
radius of the cell
partial_distance: int
distance between partials (SF length) in number of glide distances.
Default is 0 -> non dissociated dislocation
with b = b_left + b_right is produced
"""
if self_consistent is None:
self_consistent = self.self_consistent
partial_distance_Angstrom = np.array(
[self.glide_distance * partial_distance, 0.0, 0.0])
bulk, disloc = self.left_dislocation.build_cylinder(radius,
extension=extension + partial_distance_Angstrom,
core_position=core_position,
fix_width=fix_width,
self_consistent=self_consistent)
_, disloc_right = self.right_dislocation.build_cylinder(radius,
core_position=core_position + partial_distance_Angstrom,
extension=extension,
fix_width=fix_width,
self_consistent=self_consistent)
u_right = disloc_right.positions - bulk.positions
disloc.positions += u_right
return bulk, disloc
def displacements(self, bulk_positions, center,
partial_distance=0, **kwargs):
"""Overloaded function to provide correct displacements
for the dissociated dislocation.
Partial distance is provided as an integer to define number
of glide distances between two partials.
"""
partial_distance_Angstrom = np.array(
[self.glide_distance * partial_distance, 0.0, 0.0])
left_u = self.left_dislocation.displacements(bulk_positions, center,
**kwargs)
right_u = self.right_dislocation.displacements(bulk_positions,
center + partial_distance_Angstrom,
**kwargs)
return left_u + right_u
class DiamondGlideScrew(CubicCrystalDissociatedDislocation):
def __init__(self, alat, C11, C12, C44, symbol='C'):
# aiming for the resulting burgers vector
burgers = alat * np.array([1, -1, 0]) / 2.
# 30 degree
burgers_left = alat * np.array([2., -1., -1.]) / 6.
left30 = DiamondGlide30degreePartial(alat, C11, C12, C44,
symbol=symbol)
left30.set_burgers(burgers_left)
# another 30 degree
# burgers_right = alat * np.array([1, -2, 1.]) / 6. - default value
right30 = DiamondGlide30degreePartial(alat, C11, C12, C44,
symbol=symbol)
super().__init__(left30, right30, burgers)
class DiamondGlide60Degree(CubicCrystalDissociatedDislocation):
def __init__(self, alat, C11, C12, C44, symbol='C'):
# aiming for the resulting burgers vector
burgers = alat * np.array([1, 0, -1]) / 2.
# 30 degree
burgers_left = alat * np.array([2., -1., -1.]) / 6.
left30 = DiamondGlide30degreePartial(alat, C11, C12, C44,
symbol=symbol)
left30.set_burgers(burgers_left)
# 90 degree
# burgers_right = alat * np.array([1, 1, -2.]) / 6. - default value
right90 = DiamondGlide90degreePartial(alat, C11, C12, C44,
symbol=symbol)
super().__init__(left30, right90, burgers)
class FCCScrewShockleyPartial(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='Fe'):
axes = np.array([[1, 1, -2],
[1, 1, 1],
[1, -1, 0]])
burgers = alat * np.array([1, -2, 1.]) / 6.
parity = [0, 0]
unit_cell = FaceCenteredCubic(symbol, directions=axes.tolist(),
pbc=(False, False, True),
latticeconstant=alat)
# put the dislocation in the centroid of the first triangle in xy plane
# get sorting of the atoms in the distance of atoms in xy plane
sorted_indices = np.argsort(np.linalg.norm(unit_cell.positions[:, :2],
axis=1))
# centroid coordinates are simply
# mean of x y coordinates of the trianlge
(disloCenterX,
disloCenterY,
_) = unit_cell.positions[sorted_indices[:3]].mean(axis=0)
unit_cell_core_position = np.array([disloCenterX,
disloCenterY, 0])
glide_distance = alat * np.linalg.norm(axes[0]) / 4.0
n_planes = 2
self_consistent = True
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes,
self_consistent=self_consistent)
class FCCScrew110Dislocation(CubicCrystalDissociatedDislocation):
def __init__(self, alat, C11, C12, C44, symbol='Fe'):
# aiming for the resulting burgers vector
burgers = alat * np.array([1, -1, 0]) / 2.
# Shockley partial
burgers_left = alat * np.array([2., -1., -1.]) / 6.
left_shockley = FCCScrewShockleyPartial(alat, C11, C12, C44,
symbol=symbol)
left_shockley.set_burgers(burgers_left)
# another Shockley partial
# burgers_right = alat * np.array([1, -2, 1.]) / 6. - default value
right_shockley = FCCScrewShockleyPartial(alat, C11, C12, C44,
symbol=symbol)
super().__init__(left_shockley, right_shockley, burgers)
class FCCEdgeShockleyPartial(CubicCrystalDislocation):
def __init__(self, alat, C11, C12, C44, symbol='Fe'):
axes = np.array([[1, -1, 0],
[1, 1, 1],
[-1, -1, 2]])
burgers = alat * np.array([1, -2, 1.]) / 6.
parity = [0, 0]
unit_cell = FaceCenteredCubic(symbol, directions=axes.tolist(),
pbc=(False, False, True),
latticeconstant=alat)
disloCenterX = 0.0
# middle between two (111) planes
disloCenterY = unit_cell.cell[1][1] / 6.0
unit_cell_core_position = np.array([disloCenterX,
disloCenterY, 0])
glide_distance = alat * np.linalg.norm(axes[0]) / 4.0
n_planes = 6
self_consistent = True
super().__init__(unit_cell, alat, C11, C12, C44,
axes, burgers, unit_cell_core_position, parity,
glide_distance, n_planes=n_planes,
self_consistent=self_consistent)
class FCCEdge110Dislocation(CubicCrystalDissociatedDislocation):
def __init__(self, alat, C11, C12, C44, symbol='Fe'):
# aiming for the resulting burgers vector
burgers = alat * np.array([1, -1, 0]) / 2.
# Shockley partial
burgers_left = alat * np.array([2., -1., -1.]) / 6.
left_shockley = FCCEdgeShockleyPartial(alat, C11, C12, C44,
symbol=symbol)
left_shockley.set_burgers(burgers_left)
# another Shockley partial
# burgers_right = alat * np.array([1, -2, 1.]) / 6. - default value
right_shockley = FCCEdgeShockleyPartial(alat, C11, C12, C44,
symbol=symbol)
super().__init__(left_shockley, right_shockley, burgers)
class FixedLineAtoms:
"""Constrain atoms to move along a given direction only."""
def __init__(self, a, direction):
self.a = a
self.dir = direction / np.sqrt(np.dot(direction, direction))
def adjust_positions(self, atoms, newpositions):
steps = newpositions[self.a] - atoms.positions[self.a]
newpositions[self.a] = (atoms.positions[self.a] +
np.einsum("ij,j,k", steps, self.dir, self.dir))
def adjust_forces(self, atoms, forces):
forces[self.a] = np.einsum("ij,j,k", forces[self.a], self.dir, self.dir)
def gamma_line(unit_cell, calc=None, shift_dir=0, surface=2,
size=[2, 2, 2], factor=15, n_dots=11,
relax=True, fmax=1.0e-2, return_images=False):
"""
This function performs a calculation of a cross-sections in 'shift_dir`
of the generalized stacking fault (GSF) gamma
surface with `surface` orientation.
*A gamma surface is defined as the energy variation when the
crystal is cut along a particular plane and then one of the
resulting parts is displaced along a particular direction. This
quantity is related to the energy landscape of dislocations and
provides data out of equilibrium, preserving the crystal state.*
For examples for the case of W and more details see section 4.2
and figure 2 in [J. Phys.: Condens. Matter 25 (2013) 395502 (15pp)]\
(http://iopscience.iop.org/0953-8984/25/39/395502)
Parameters
----------
unit_cell: ase.Atoms
Unit cell to construct gamma surface from.
Should have a ase.calculator attached as calc
in order to perform relaxation.
calc: ase.calculator
if unit_cell.calc is None set unit_cell.calc to calc
shift_dir: int
index of unit_cell axes to shift atoms
surface: int
index of unit_cell axes to be the surface normal direction
size: list of ints
start size of the cell
factor: int
factor to increase the size of the cell along
the surface normal direction
n_dots: int
number of images along the gamma line
relax: bool
flag to perform relaxation
fmax: float
maximum force value for relaxation
return_images: bool
flag to control if the atomic configurations are returned
together with the results
Returns
-------
deltas: np.array
shift distance of every image in Angstroms
totens: np.array
gamma surface energy in eV / Angstroms^2
images: list of ase.Atoms
images along the gamma surface. Returned if return_images is True
"""
from ase.optimize import LBFGSLineSearch
if unit_cell.calc is None:
if calc is None:
raise RuntimeError("Please set atoms calculator or provide calc")
else:
unit_cell.calc = calc
size = np.array(size)
directions = np.array([0, 1, 2])
period = unit_cell.cell[shift_dir, shift_dir]
size[surface] *= factor
slab = unit_cell * size.tolist()
top_mask = slab.positions.T[surface] > slab.cell[surface, surface] / 2.0
surface_direction = directions == surface
slab.pbc = (~surface_direction).tolist()
slab.center(axis=surface, vacuum=10)
images = []
totens = []
deltas = []
for delta in np.linspace(0.0, period, num=n_dots):
image = slab.copy()
image.positions[:, shift_dir][top_mask] += delta
select_all = np.full_like(image, True, dtype=bool)
image.set_constraint(
FixedLineAtoms(select_all, surface_direction.astype(int)))
image.calc = unit_cell.calc
if image.get_forces().max() < fmax:
raise RuntimeError(
"Initial max force is smaller than fmax!" +
"Check surface direction")
if relax:
opt = LBFGSLineSearch(image)
opt.run(fmax=fmax)
images.append(image)
deltas.append(delta)
totens.append(image.get_potential_energy())
totens = np.array(totens)
totens -= totens[0]
surface_area_dirs = directions[~(directions == surface)]
surface_area = (slab.cell.lengths()[surface_area_dirs[0]] *
slab.cell.lengths()[surface_area_dirs[1]])
totens /= surface_area # results in eV/A^2
if return_images:
return np.array(deltas), totens, images
else:
return np.array(deltas), totens
| 110,569 | 34.644745 | 120 | py |
matscipy | matscipy-master/matscipy/rings.py | #
# Copyright 2014-2015 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import numpy as np
from matscipy.neighbours import neighbour_list
from .ffi import distances_on_graph, find_sp_rings
###
def ring_statistics(a, cutoff, maxlength=-1):
"""
Compute number of shortest path rings in sample.
See: D.S. Franzblau, Phys. Rev. B 44, 4925 (1991)
Parameters
----------
a : ase.Atoms
Atomic configuration.
cutoff : float
Cutoff for neighbor counting.
maxlength : float, optional
Maximum ring length. Search for rings will stop at this length. This
is useful to speed up calculations for large systems.
Returns
-------
ringstat : array
Array with number of shortest path rings.
"""
i, j, r = neighbour_list('ijD', a, cutoff)
d = distances_on_graph(i, j)
if maxlength > 0:
ringstat = np.zeros(maxlength)
rs = find_sp_rings(i, j, r, d, maxlength)
ringstat[:len(rs)] += rs
else:
ringstat = find_sp_rings(i, j, r, d, maxlength)
return find_sp_rings(i, j, r, d, maxlength)
| 1,893 | 29.548387 | 76 | py |
matscipy | matscipy-master/matscipy/logger.py | #
# Copyright 2015-2017, 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Log status to screen.
"""
import os
import sys
import inspect
from functools import reduce
from math import isnan, isinf
from numbers import Real
from ase.parallel import world
###
def hdr_str(s, x):
""" Return header description strings
"""
if isinstance(x, str):
return s
r = [ ]
try:
for i, v in enumerate(x):
r += [ s+'('+chr(ord('x')+i)+')' ]
except:
r = s
return r
def hdrfmt_str(x, i):
""" Return header format string for datatype x
"""
if isinstance(x, str):
return '{'+str(i)+':>20}'
elif isinstance(x, int):
return '{'+str(i)+':>8}'
elif isinstance(x, Real):
return '{'+str(i)+':>20}'
else:
# Is this something we need to iterate over?
try:
return [ hdrfmt_str(k,i+j) for j, k in enumerate(x) ]
except:
return '{'+str(i)+':>20}'
def numfmt_str(x, i):
""" Return numeric format string for datatype x
"""
if isinstance(x, str):
return '{'+str(i)+':>20}'
elif isinstance(x, int):
return '{'+str(i)+':>8}'
elif isinstance(x, Real):
return '{'+str(i)+':>20.12e}'
else:
# Is this something we need to iterate over?
try:
return [ numfmt_str(k,i+j) for j, k in enumerate(x) ]
except:
return '{'+str(i)+':>20}'
def flatten(x):
if isinstance(x, str):
return [ x ]
else:
# Is this something we can iterate over?
try:
return reduce(lambda a,b: a+b, [ flatten(i) for i in x ])
except:
return [ x ]
###
class Logger(object):
# Debug option, redirect all output to screen
__all_output_to_stdout = False
def __init__(self, logfile=sys.stdout, outevery=1, sepevery=10):
self.sepevery = sepevery
self.set_outevery(outevery)
self.it = 1
self.logfn = None
self.logfile = None
self.buffer = [ ]
self.set_logfile(logfile)
def __open_logfile(self):
if world.rank != 0:
return
if self.logfile is None and self.logfn is not None and \
not self.__all_output_to_stdout:
self.outcounter = self.outevery
self.sepcounter = 0
fn = self.logfn.format(self.it)
if os.path.exists(fn):
# Save old log file as .bak
i = 0
while os.path.exists('{0}.{1}.bak'.format(fn, i)):
i += 1
os.rename(fn, '{0}.{1}.bak'.format(fn, i))
self.logfile = open(fn, 'w')
def _print(self, s, logfile=None):
if world.rank != 0:
return
if logfile and self.logfile != logfile:
print(s, file=logfile)
if self.logfile:
print(s, file=self.logfile)
else:
self.buffer += [ s ]
def flush(self):
if self.logfile:
self.logfile.flush()
def set_logfile(self, logfile):
if world.rank != 0:
return
if self.__all_output_to_stdout:
self.logfile = sys.stdout
elif isinstance(logfile, str):
if logfile.find('{0}') != -1 or logfile.find('{}') != -1:
self.logfn = logfile
self.__open_logfile()
else:
self.logfile = open(logfile, 'w')
else:
self.logfile = logfile
if self.logfile is not None:
for s in self.buffer:
self._print(s)
self.buffer = [ ]
def pr(self, s, caller=None, logfile=None):
self.__open_logfile()
if caller is None:
caller = inspect.stack()[1]
self._print('# {{{0}}}: {1}'.format(caller[3], s), logfile=logfile)
self.flush()
def warn(self, s, caller=None):
self.pr('Warning: '+s, caller=caller, logfile=sys.stdout)
def st(self, hdr, vals, force_print=False):
assert len(hdr) == len(vals)
self.__open_logfile()
do_print = force_print
if self.outevery == 1:
do_print = True
else:
self.outcounter -= 1
if self.outcounter <= 0:
do_print = True
self.outcounter = self.outevery
if do_print:
self.sepcounter -= 1
if self.sepcounter <= 0:
# For vectors we need a column for each component
hdr = flatten([ hdr_str(a, b) for a,b in zip(hdr, vals) ])
fmt_str = '#'+reduce(
lambda a,b: '{0} {1}'.format(a, b),
flatten([ hdrfmt_str(i,j)
for j,i in enumerate(flatten(vals)) ])
)
self._print(fmt_str.format(*[ '{0}:{1}'.format(str(i+1), s)
for i, s in enumerate(hdr) ]))
self.sepcounter = self.sepevery
fmt_str = ' '+reduce(
lambda a,b: '{0} {1}'.format(a, b),
flatten([ numfmt_str(i,j)
for j,i in enumerate(flatten(vals)) ])
)
self._print(fmt_str.format(*flatten(vals)))
self.flush()
def iteration_finished(self):
self.it += 1
self.outcounter = self.outevery
self.sepcounter = 0
if self.logfn is not None:
self.logfile = None
def get_logfile(self):
return self.logfile
def has_logfile(self):
if world.rank != 0:
raise RuntimeError('`has_logfile` only works on the root rank.')
return self.logfile is not None
def set_outevery(self, outevery):
self.outevery = outevery
self.outcounter = outevery
self.sepcounter = 0
###
quiet = Logger(None)
screen = Logger()
| 6,687 | 25.967742 | 76 | py |
matscipy | matscipy-master/matscipy/pressurecoupling.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2020, 2022, 2023 Thomas Reichenbach (Fraunhofer IWM)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Classes to be used with ASE in order to perform pressure relaxation
and/or sliding simulations under pressure.
A usage example can be found in docs/applications/tribology.ipynb.
Some parts are based on
L. Pastewka, S. Moser, and M. Moseler, Tribol. Lett. 39, 49 (2010)
as indicated again below.
"""
import logging
import numpy as np
from ase.units import kB, fs, GPa
logger = logging.getLogger(__name__)
class AutoDamping(object):
"""Automatic damping.
Following L. Pastewka, S. Moser, and M. Moseler,
Tribol. Lett. 39, 49 (2010).
Parameters
----------
C11 : float
Elastic material constant.
p_c : float
Empirical cut-off parameter.
"""
def __init__(self, C11, p_c=0.01):
self.C11 = float(C11)
self.p_c = float(p_c)
def get_M_gamma(self, slider, atoms):
"""Calculate mass M and dissipation constant gamma.
Parameters
----------
slider : matscipy.pressurecoupling.SlideWithNormalPressureCuboidCell
ASE constraint used for sliding with pressure coupling.
atoms : ase.Atoms
Atomic configuration.
Returns
-------
M: float
Mass parameter.
gamma : float
Dissipation constant parameter.
"""
A = slider.get_A(atoms)
l = atoms.cell[slider.vdir, slider.vdir]
t_c = l / slider.v
omega_c = 2. * np.pi / t_c
h1 = atoms.positions[slider.top_mask, slider.Pdir].min()
h2 = atoms.positions[slider.bottom_mask, slider.Pdir].max()
h = h1 - h2
k = self.C11 * A / h
M = k * omega_c ** -2 * np.sqrt(self.p_c ** -2 - 1.)
# gamma = np.sqrt(2. * M * k) # incorrect expression from paper
gamma = 2. * np.sqrt(M * k)
return M, gamma
class FixedDamping(object):
"""Damping with fixed damping constant and fixed mass.
Parameters
----------
gamma : float
Damping constant.
M_factor : float
Multiplicative factor to increase actual mass of upper rigid atoms.
"""
def __init__(self, gamma, M_factor=1.0):
self.gamma = float(gamma)
self.M_factor = float(M_factor)
def get_M_gamma(self, slider, atoms):
"""Calculate mass M and damping constant gamma.
Parameters
----------
slider : matscipy.pressurecoupling.SlideWithNormalPressureCuboidCell
ASE constraint used for sliding with pressure coupling.
atoms : ase.Atoms
Atomic configuration.
Returns
-------
M: float
Mass parameter.
gamma : float
Damping parameter.
"""
M_top = atoms.get_masses()[slider.top_mask].sum()
return M_top * self.M_factor, self.gamma
class FixedMassCriticalDamping(object):
"""Damping with fixed mass and critical damping constant.
Useful for fast pressure equilibration with small lid mass.
Parameters
----------
C11 : float
Elastic material constant.
M_factor : float
Multiplicative factor to increase actual mass of upper rigid atoms.
"""
def __init__(self, C11, M_factor=1.0):
self.C11 = float(C11)
self.M_factor = float(M_factor)
def get_M_gamma(self, slider, atoms):
"""Calculate mass M and damping constant gamma.
Parameters
----------
slider : matscipy.pressurecoupling.SlideWithNormalPressureCuboidCell
ASE constraint used for sliding with pressure coupling.
atoms : ase.Atoms
Atomic configuration.
Returns
-------
M: float
Mass parameter.
gamma : float
Damping parameter.
"""
M_top = atoms.get_masses()[slider.top_mask].sum()
M = M_top * self.M_factor
A = slider.get_A(atoms)
h1 = atoms.positions[slider.top_mask, slider.Pdir].min()
h2 = atoms.positions[slider.bottom_mask, slider.Pdir].max()
h = h1 - h2
k = self.C11 * A / h
gamma = 2. * np.sqrt(M * k)
return M, gamma
class SlideWithNormalPressureCuboidCell(object):
"""ASE constraint used for sliding with pressure coupling.
Following L. Pastewka, S. Moser, and M. Moseler,
Tribol. Lett. 39, 49 (2010).
Parameters
----------
top_mask : boolean numpy array
Array a with a[i] == True for each index i of the
constraint top atoms (the atoms which slide with constant speed).
bottom_mask : boolean numpy array
same as top_mask but for completely fixed bottom atoms.
Pdir : int
Index of cell axis (0, 1, 2) along which normal pressure is applied.
P : int
Normal pressure in ASE units (e.g. 10.0 * ase.units.GPa).
vdir : int
Index of cell axis (0, 1, 2) along which to slide.
v : float
Constant sliding speed in ASE units
(e.g. 100.0 * ase.units.m / ase.units.s).
damping :
Damping object (e.g. matscipy.pressurecoupling.AutoDamping instance).
"""
def __init__(self, top_mask, bottom_mask, Pdir, P, vdir, v, damping):
self.top_mask = top_mask
self.Ntop = top_mask.sum()
self.bottom_mask = bottom_mask
self.Pdir = int(Pdir)
self.P = float(P)
self.vdir = int(vdir)
self.v = float(v)
self.damping = damping
@property
def Tdir(self):
"""Direction used for thermostatting.
Thermostat direction is normal to the sliding direction
and the direction of the applied load direction.
Returns
-------
int
Direction used for thermostatting.
"""
all_dirs = {0, 1, 2}
all_dirs.remove(self.Pdir)
all_dirs.remove(self.vdir)
return all_dirs.pop()
@property
def middle_mask(self):
"""Mask of free atoms.
Returns
-------
numpy boolean array
Array a with a[i] == True for each index i of the atoms
not being part of lower or upper rigid group.
"""
return np.logical_not(np.logical_or(self.top_mask, self.bottom_mask))
def adjust_positions(self, atoms, positions):
"""Do not adjust positions."""
pass
def get_A(self, atoms):
"""Calculate cell area normal to applied load.
Returns
-------
float
Cell area normal to applied load.
Raises
------
NotImplementedError
If atoms.get_cell() is non-orthogonal,
SlideWithNormalPressureCuboidCell only works for orthogonal cells.
"""
if np.abs(atoms.get_cell().sum() - atoms.get_cell().trace()) > 0:
raise NotImplementedError("Can't do non-orthogonal cell!")
A = 1.0
for c in (0, 1, 2):
if c != self.Pdir:
A *= atoms.cell[c, c]
return A
def adjust_forces(self, atoms, forces):
"""Adjust forces of upper and lower rigid atoms.
Raises
------
NotImplementedError
If atoms.get_cell() is non-orthogonal,
SlideWithNormalPressureCuboidCell only works for orthogonal cells.
"""
if np.abs(atoms.get_cell().sum() - atoms.get_cell().trace()) > 0:
raise NotImplementedError("Can't do non-orthogonal cell!")
A = self.get_A(atoms)
M, gamma = self.damping.get_M_gamma(self, atoms)
Ftop = forces[self.top_mask, self.Pdir].sum()
vtop = atoms.get_velocities()[self.top_mask,
self.Pdir].sum() / self.Ntop
F = Ftop - self.P * A - gamma * vtop
a = F / M
forces[self.bottom_mask, :] = .0
forces[self.top_mask, :] = .0
forces[self.top_mask,
self.Pdir] = atoms.get_masses()[self.top_mask] * a
def adjust_momenta(self, atoms, momenta):
"""Adjust momenta of upper and lower rigid atoms.
Raises
------
NotImplementedError
If atoms.get_cell() is non-orthogonal,
SlideWithNormalPressureCuboidCell only works for orthogonal cells.
"""
if np.abs(atoms.get_cell().sum() - atoms.get_cell().trace()) > 0:
raise NotImplementedError("Can't do non-orthogonal cell!")
top_masses = atoms.get_masses()[self.top_mask]
vtop = (momenta[self.top_mask,
self.Pdir] / top_masses).sum() / self.Ntop
momenta[self.bottom_mask, :] = 0.0
momenta[self.top_mask, :] = 0.0
momenta[self.top_mask, self.vdir] = self.v * top_masses
momenta[self.top_mask, self.Pdir] = vtop * top_masses
def adjust_potential_energy(self, atoms):
"""Do not adjust energy."""
return 0.0
class SlideLogger(object):
"""Logger to be attached to an ASE integrator.
For new files (not restart jobs), the write_header method should
be called once in order to write the header to the file.
Parameters
----------
handle : filehandle
Filehandle e.g. pointing to a file opened in w or a mode.
atoms : ase.Atoms
Atomic configuration.
slider : slider object
Instance of SlideWithNormalPressureCuboidCell.
integrator : ASE integrator object,
Instance of ASE integrator e.g. ase.md.langevin.Langevin.
step_offset : int
Last step already written to log file, useful for restarts.
Examples
--------
1. For new runs:
log_handle = open(logfn, 'w', 1) # line buffered\n
logger = SlideLogger(log_handle, ...)\n
logger.write_header()\n
integrator.attach(logger)\n
integrator.run(steps_integrate)\n
log_handle.close()
2. For restarts:
with open(logfn, 'r') as log_handle:\n
step_offset = SlideLog(log_handle).step[-1]\n
log_handle = open(logfn, 'a', 1) # line buffered append\n
logger = SlideLogger(log_handle, ..., step_offset=step_offset)\n
integrator.attach(logger)\n
integrator.run(steps_integrate)\n
log_handle.close()
"""
def __init__(self, handle, atoms, slider, integrator, step_offset=0):
self.handle = handle
self.atoms = atoms
self.slider = slider
self.integrator = integrator
self.step_offset = step_offset
def write_header(self):
"""Write header of log-file."""
self.handle.write('# step | time / fs | T_thermostat / K | P_top / GPa'
' | P_bottom / GPa | h / Ang | v / Ang * fs '
'| a / Ang * fs ** 2 | tau_top / GPa | '
'tau_bottom / GPa\n')
def __call__(self):
"""Write current status (time, T, P, ...) to log-file."""
slider = self.slider
atoms = self.atoms
integrator = self.integrator
p = atoms.get_momenta()[slider.middle_mask, slider.Tdir]
v = atoms.get_velocities()[slider.middle_mask, slider.Tdir]
dof = len(p)
Ekin = 0.5 * (p * v).sum()
T = 2. * Ekin / (dof * kB)
step = integrator.nsteps + self.step_offset
t = step * integrator.dt / fs
A = atoms.cell[slider.vdir, slider.vdir]\
* atoms.cell[slider.Tdir, slider.Tdir]
F = atoms.get_forces(apply_constraint=False)
F_top = F[slider.top_mask, slider.Pdir].sum()
F_bottom = F[slider.bottom_mask, slider.Pdir].sum()
P_top = F_top / A / GPa
P_bottom = F_bottom / A / GPa
h1 = atoms.positions[slider.top_mask, slider.Pdir].min()
h2 = atoms.positions[slider.bottom_mask, slider.Pdir].max()
h = h1 - h2
v = atoms.get_velocities()[slider.top_mask, slider.Pdir][0] * fs
a = atoms.get_forces(md=True)[slider.top_mask, slider.Pdir][0]\
/ atoms.get_masses()[slider.top_mask][0] * fs ** 2
F_v_top = F[slider.top_mask, slider.vdir].sum()
tau_top = F_v_top / A / GPa
F_v_bottom = F[slider.bottom_mask, slider.vdir].sum()
tau_bottom = F_v_bottom / A / GPa
self.handle.write('%d %r %r %r %r %r %r %r %r %r\n' %
(step, float(t), float(T), float(P_top),
float(P_bottom), float(h), float(v), float(a),
float(tau_top), float(tau_bottom)))
class SlideLog(object):
"""Reader for logs written with SlideLogger instance.
Parameters
----------
handle matscipy.pressurecoupling.SlideLogger instance
Handle or filename pointing to log file.
Attributes
----------
step : ndarray
Step indices 0, 1, 2, ....
time : ndarray
Simulation time in fs at step.
T_thermostat : ndarray
Instantantaneous temperature in K from thermostat-region
only from degrees of freedom along thermalized direction.
P_top : ndarray
Normal pressure on lid in GPa.
P_bottom : ndarray
Normal pressure on base in GPa.
h : ndarray
Separation of lid and base in Ang.
v : ndarray
Normal speed of lid in Ang / fs.
a : ndarray
Normal acceleration of lid in Ang / fs ** 2.
tau_top : ndarray
Shear stress on lid in GPa.
tau_bottom : ndarray
Shear stress on base in GPa.
rows : ndarray
All data in a 2d array with axis 0 step and axis 1
the values ordered as above.
"""
def __init__(self, handle):
self.rows = np.loadtxt(handle)
(self.step, self.time, self.T_thermostat, self.P_top,
self.P_bottom, self.h, self.v, self.a, self.tau_top,
self.tau_bottom) = self.rows.T
self.step = self.step.astype(int)
| 14,605 | 32.120181 | 79 | py |
matscipy | matscipy-master/matscipy/surface.py | #
# Copyright 2014, 2020 James Kermode (Warwick U.)
# 2019 James Brixey (Warwick U.)
# 2015 Punit Patel (Warwick U.)
# 2014 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import functools
import numpy as np
from numpy.linalg import norm, inv
def gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a%b
return a
class MillerIndex(np.ndarray):
"""
Representation of a three of four index Miller direction or plane
A :class:`MillerIndex` can be constructed from vector or parsed from a string::
x = MillerIndex('-211')
y = MillerIndex('111', type='plane')
z = x.cross(y)
print x # prints "[-211]"
print y # prints "(111)", note round brackets denoting a plane
print z.latex()
assert(angle_between(x,y) == pi/2.)
assert(angle_between(y,z) == pi/2.)
assert(angle_between(x,z) == pi/2.)
"""
__array_priority__ = 101.0
brackets = {'direction': '[]',
'direction_family': '<>',
'plane': '()',
'plane_family': '{}'}
all_brackets = list(itertools.chain(*brackets.values()))
def __new__(cls, v=None, type='direction'):
if isinstance(v, str):
v = MillerIndex.parse(v)
if len(v) == 3 or len(v) == 4:
self = np.ndarray.__new__(cls, len(v))
self[:] = v
else:
raise ValueError('%s input v should be of length 3 or 4' % cls.__name__)
self.type = type
self.simplify()
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.type = getattr(obj, 'type', 'direction')
def __repr__(self):
return ('%s(['+'%d'*len(self)+'])') % ((self.__class__.__name__,) + tuple(self))
def __str__(self):
bopen, bclose = MillerIndex.brackets[self.type]
return (bopen+'%d'*len(self)+bclose) % tuple(self)
def latex(self):
"""
Format this :class:`MillerIndex` as a LaTeX string
"""
s = '$'
bopen, bclose = MillerIndex.brackets[self.type]
s += bopen
for component in self:
if component < 0:
s += r'\bar{%d}' % abs(component)
else:
s += '%d' % component
s += bclose
s += '$'
return s
@classmethod
def parse(cls, s):
r"""
Parse a Miller index string
Negative indices can be denoted by:
1. leading minus sign, e.g. ``[11-2]``
2. trailing ``b`` (for 'bar'), e.g. ``112b``
3. LaTeX ``\bar{}``, e.g. ``[11\bar{2}]`` (which renders as :math:`[11\bar{2}]` in LaTeX)
Leading or trailing brackets of various kinds are ignored.
i.e. ``[001]``, ``{001}``, ``(001)``, ``[001]``, ``<001>``, ``001`` are all equivalent.
Returns an array of components (i,j,k) or (h,k,i,l)
"""
if not isinstance(s, str):
raise TypeError("Can't parse from %r of type %r" % (s, type(s)))
orig_s = s
for (a, b) in [(r'\bar{','-')] + [(b,'') for b in MillerIndex.all_brackets]:
s = s.replace(a, b)
L = list(s)
components = np.array([1,1,1,1]) # space for up to 4 elements
i = 3 # parse backwards from end of string
while L:
if i < 0:
raise ValueError('Cannot parse Miller index from string "%s", too many components found' % orig_s)
c = L.pop()
if c == '-':
if i == 3:
raise ValueError('Miller index string "%s" cannot end with a minus sign' % orig_s)
components[i+1] *= -1
elif c == 'b':
components[i] *= -1
elif c in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
components[i] *= int(c)
i -= 1
else:
raise ValueError('Unexpected character "%s" in miller index string "%s"' % (c, orig_s))
if i == 0:
return components[1:]
elif i == -1:
return components
else:
raise ValueError('Cannot parse Miller index from string %s, too few components found' % orig_s)
self.simplify()
def simplify(self):
"""
Simplify by dividing through by greatest common denominator
"""
d = abs(functools.reduce(gcd, self))
self[:] /= d
def simplified(self):
copy = self.copy()
copy.simplify()
return copy
def norm(self):
return np.linalg.norm(self)
def normalised(self):
a = self.as3()
return np.array(a, dtype=float)/a.norm()
hat = normalised
def cross(self, other):
a = self.as3()
b = MillerIndex(other).as3()
return np.cross(a, b).view(MillerIndex).simplified()
def cosine(self, other):
other = MillerIndex(other)
return np.dot(self.normalised(), other.normalised())
def angle(self, other):
return np.arccos(self.cosine(other))
def as4(self):
if len(self) == 4:
return self
else:
h, k, l = self
i = -(h+l)
return MillerIndex((h,k,i,l))
def as3(self):
if len(self) == 3:
return self
else:
h, k, i, l = self
return MillerIndex((h, k, l))
def plane_spacing(self, a):
return a/self.as3().norm()
def MillerPlane(v):
"""Special case of :class:`MillerIndex` with ``type="plane"``"""
return MillerIndex(v, 'plane')
def MillerDirection(v):
"""Special case of :class:`MillerIndex` with ``type="direction"`` (the default)"""
return MillerIndex(v, 'direction')
def angle_between(a, b):
"""Angle between crystallographic directions between a=[ijk] and b=[lmn], in radians."""
return MillerIndex(a).angle(b)
def make_unit_slab(unit_cell, axes):
"""
General purpose unit slab creation routine
Only tested with cubic unit cells.
Code translated from quippy.structures.unit_slab()
https://github.com/libAtoms/QUIP/blob/public/src/libAtoms/Structures.f95
Arguments
---------
unit_cell : Atoms
Atoms object containing primitive unit cell
axes: 3x3 array
Miller indices of desired slab, as columns
Returns
-------
slab : Atoms
Output slab, with axes aligned with x, y, z.
"""
a1 = axes[:,0]
a2 = axes[:,1]
a3 = axes[:,2]
rot = np.zeros((3,3))
rot[0,:] = a1/norm(a1)
rot[1,:] = a2/norm(a2)
rot[2,:] = a3/norm(a3)
pos = unit_cell.get_positions().T
lattice = unit_cell.get_cell().T
lattice = np.dot(rot, lattice)
at = unit_cell.copy()
at.set_positions(np.dot(rot, pos).T)
at.set_cell(lattice.T)
sup = at * (5,5,5)
sup.positions[...] -= sup.positions.mean(axis=0)
sup_lattice = np.zeros((3,3))
for i in range(3):
sup_lattice[:,i] = (axes[0,i]*lattice[:,0] +
axes[1,i]*lattice[:,1] +
axes[2,i]*lattice[:,2])
sup.set_cell(sup_lattice.T, scale_atoms=False)
# Form primitive cell by discarding atoms with
# lattice coordinates outside range [-0.5,0.5]
d = [0.01,0.02,0.03] # Small shift to avoid conincidental alignments
i = 0
g = inv(sup_lattice)
sup_pos = sup.get_positions().T
while True:
t = np.dot(g, sup_pos[:, i] + d)
if (t <= -0.5).any() | (t >= 0.5).any():
del sup[i]
sup_pos = sup.get_positions().T
i -= 1 # Retest since we've removed an atom
if i == len(sup)-1:
break
i += 1
sup.set_scaled_positions(sup.get_scaled_positions())
return sup
| 8,658 | 29.489437 | 114 | py |
matscipy | matscipy-master/matscipy/ffi.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Foreign function interface module.
Depending on the build-system (particularly pip version), the compiled extension
_matscipy.<cpython>.so may be installed in site-packages/ or in matscipy/, with
the latter being the intended destination. This module abstracts away the
import of symbols from the extension.
Example usage:
--------------
>>> from .ffi import first_neighbours # imports a function from extension
>>> from . import ffi # import as module
"""
try:
from ._matscipy import * # noqa
except ModuleNotFoundError:
from _matscipy import * # noqa
from warnings import warn as _warn
_warn("importing top-level _matscipy")
| 1,444 | 33.404762 | 80 | py |
matscipy | matscipy-master/matscipy/numerical.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018, 2020 Jan Griesser (U. Freiburg)
# 2014, 2020 James Kermode (Warwick U.)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Numerical algorithms for force, stress, hessian, etc."""
import numpy as np
import ase
from scipy.sparse import coo_matrix
from ase.calculators.calculator import Calculator
def numerical_forces(atoms: ase.Atoms, d: float = 1e-5):
"""
Compute numerical forces using finite differences.
Parameters
----------
atoms : ase.Atoms
Atomic configuration in a local or global minumum.
d : float
Displacement increment.
"""
return Calculator().calculate_numerical_forces(atoms, d=d)
def numerical_stress(atoms: ase.Atoms, d: float = 1e-5, voigt: bool = True):
"""
Compute numerical stresses using finite differences.
Parameters
----------
atoms : ase.Atoms
Atomic configuration in a local or global minumum.
d : float
Displacement increment.
voigt : bool
Return results in Voigt notation.
"""
return Calculator().calculate_numerical_stress(atoms, d=d, voigt=voigt)
def numerical_hessian(atoms: ase.Atoms, d: float = 1e-5, indices=None) -> coo_matrix:
"""
Compute the hessian matrix from Jacobian of forces using central differences.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
d: float
Displacement increment
indices:
Compute the hessian only for these atom IDs
"""
nat = len(atoms)
if indices is None:
indices = range(nat)
row = []
col = []
H = []
for i, AtomId1 in enumerate(indices):
for direction in range(3):
atoms.positions[AtomId1, direction] += d
fp_nc = atoms.get_forces().ravel()
atoms.positions[AtomId1, direction] -= 2 * d
fn_nc = atoms.get_forces().ravel()
atoms.positions[AtomId1, direction] += d
dH_nc = (fn_nc - fp_nc) / (2 * d)
for j, AtomId2 in enumerate(range(nat)):
for k in range(3):
H.append(dH_nc[3 * j + k])
row.append(3 * i + direction)
col.append(3 * AtomId2 + k)
return coo_matrix(
(H, (row, col)), shape=(3 * len(indices), 3 * len(atoms))
)
def numerical_nonaffine_forces(atoms: ase.Atoms, d: float = 1e-5):
"""
Calculate numerical non-affine forces using central differences.
This is done by deforming the box, rescaling atoms and measure the force.
Parameters
----------
atoms : ase.Atoms
Atomic configuration in a local or global minima.
d : float
Finite difference step size.
"""
nat = len(atoms)
cell = atoms.cell.copy()
fna_ncc = np.zeros((nat, 3, 3, 3))
for i in range(3):
# Diagonal
x = np.eye(3)
x[i, i] += d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
fplus = atoms.get_forces()
x[i, i] -= 2 * d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
fminus = atoms.get_forces()
fna_ncc[..., i, i] = (fplus - fminus) / (2 * d)
# Off diagonal
x = np.eye(3)
j = i - 2
x[i, j] = x[j, i] = d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
fplus = atoms.get_forces()
x[i, j] = x[j, i] = -d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
fminus = atoms.get_forces()
fna_ncc[..., i, j] = fna_ncc[..., j, i] = (fplus - fminus) / (4 * d)
return fna_ncc
def numerical_nonaffine_forces_reference(atoms: ase.Atoms, d: float = 1e-5):
"""
Compute nonaffine forces in the reference configuration using finite differences.
"""
fna_ncc = np.zeros([len(atoms)] + 3 * [3])
pos = atoms.positions
for i in range(len(atoms)):
for dim in range(3):
pos[i, dim] += d
fna_ncc[i, dim] = atoms.get_stress(voigt=False)
pos[i, dim] -= 2 * d
fna_ncc[i, dim] -= atoms.get_stress(voigt=False)
pos[i, dim] += d # reset position
fna_ncc *= -atoms.get_volume() / (2 * d)
return fna_ncc
def get_derivative_volume(atoms: ase.Atoms, d: float = 1e-5):
"""
Calculate the derivative of the volume with respect to strain using central differences.
Parameters
----------
atoms : ase.Atoms
Atomic configuration in a local or global minima.
d : float
Finite difference step size.
"""
cell = atoms.cell.copy()
dvol = np.zeros((3, 3))
for i in range(3):
# Diagonal
x = np.eye(3)
x[i, i] += d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
Vplus = atoms.get_volume()
x[i, i] -= 2 * d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
Vminus = atoms.get_volume()
derivative_volume = (Vplus - Vminus) / (2 * d)
dvol[i, i] = derivative_volume
# Off diagonal
j = i - 2
x[i, j] = d
x[j, i] = d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
Vplus = atoms.get_volume()
x[i, j] = -d
x[j, i] = -d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
Vminus = atoms.get_volume()
derivative_volume = (Vplus - Vminus) / (4 * d)
dvol[i, j] = derivative_volume
dvol[j, i] = derivative_volume
return dvol
def get_derivative_wave_vector(atoms: ase.Atoms, d: float = 1e-5):
"""
Calculate the derivative of a wave vector with respect to strain using central differences.
Parameters
----------
atoms : ase.Atoms
Atomic configuration in a local or global minima.
d : float
Finite difference step size.
"""
cell = atoms.cell.copy()
e = np.ones(3)
initial_k = 2 * np.pi * np.dot(np.linalg.inv(cell), e)
dk = np.zeros((3, 3, 3))
for i in range(3):
# Diagonal
x = np.eye(3)
x[i, i] += d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
k_pos = 2 * np.pi * np.dot(np.linalg.inv(atoms.get_cell()), e)
x[i, i] -= 2 * d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
k_minus = 2 * np.pi * np.dot(np.linalg.inv(atoms.get_cell()), e)
derivative_k = (k_pos - k_minus) / (2 * d)
dk[:, i, i] = derivative_k
# Off diagonal --> xy, xz, yz
j = i - 2
x[i, j] = d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
k_pos = 2 * np.pi * np.dot(np.linalg.inv(atoms.get_cell()), e)
x[i, j] = -d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
k_minus = 2 * np.pi * np.dot(np.linalg.inv(atoms.get_cell()), e)
derivative_k = (k_pos - k_minus) / (2 * d)
dk[:, i, j] = derivative_k
# Odd diagonal --> yx, zx, zy
x[j, i] = d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
k_pos = 2 * np.pi * np.dot(np.linalg.inv(atoms.get_cell()), e)
x[j, i] = -d
atoms.set_cell(np.dot(cell, x), scale_atoms=True)
k_minus = 2 * np.pi * np.dot(np.linalg.inv(atoms.get_cell()), e)
derivative_k = (k_pos - k_minus) / (2 * d)
dk[:, j, i] = derivative_k
return dk
| 8,113 | 28.292419 | 95 | py |
matscipy | matscipy-master/matscipy/angle_distribution.py | #
# Copyright 2014-2015 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from . import ffi
###
def angle_distribution(i, j, dr, nbins, *args):
"""
Compute a bond angle distribution from a neighbour list.
Parameters
----------
i, j, dr : array_like
Neighbour list, including list of distance vectors.
nbins : int
Number of bins for bond angle histogram.
cutoff : float, optional
Bond length cutoff, i.e. consider only bonds shorter than this length.
"""
return ffi.angle_distribution(np.asarray(i), np.asarray(j),
np.asarray(dr), nbins, *args)
| 1,426 | 32.186047 | 78 | py |
matscipy | matscipy-master/matscipy/numpy_tricks.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import numpy as np
def mabincount(x, weights, minlength, axis=0):
"""
Multi-axis bin count. Executes a bin count online a long a specific axis.
(`numpy.bincount` only works on flattened arrays.)
Parameters
----------
x : array_like
Array containing bin indices.
weights : array_like
Weights to be binned, dimension `axis` must have same size as x.
minlength : int
A minimum number of bins for the output array.
axis : int, optional
Axis along which the bin counting is performed. (Default: 0)
Returns
-------
out : np.ndarray
Array containing the counted data. Array has same dimensions as
weights, with the exception of dimension `axis` that has is of at least
`minlength` length.
"""
# Construct shapes of result array and iterator
result_shape = list(weights.shape)
result_shape[axis] = minlength
iter_shape = list(weights.shape)
del iter_shape[axis]
# Initialize result array to zero
result = np.zeros(result_shape, dtype=weights.dtype)
# Loop over all trailing dimensions and perform bin count
for c in itertools.product(*(range(s) for s in iter_shape)):
axis_slice = list(c)
axis_slice.insert(axis, slice(None))
axis_slice = tuple(axis_slice)
result[axis_slice] = np.bincount(x, weights=weights[axis_slice], minlength=minlength)
# Return results
return result
| 2,266 | 32.338235 | 93 | py |
matscipy | matscipy-master/matscipy/__init__.py | #
# Copyright 2015, 2017 Lars Pastewka (U. Freiburg)
# 2015 Till Junge (EPFL)
# 2014-2015 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Generic stuff may go here.
from matscipy.logger import screen
from .distributed_computation import BaseWorker, BaseResultManager
def has_parameter(name):
"""
Test if a parameter has been provided in params.py.
Parameters
----------
name : str
Name of the parameter.
Returns
-------
value : bool
Returns True if parameter exists.
"""
import sys
for x in ['.', '..']:
if x not in sys.path:
sys.path += [x]
import params
return name in params.__dict__
def parameter(name, default=None, logger=screen):
"""
Read parameter from params.py control file.
Parameters
----------
name : str
Name of the parameter.
default : optional
Default value. Will be returned if parameter is not present.
Returns
-------
value
Value of the parameter.
"""
import sys
for x in ['.', '..']:
if x not in sys.path:
sys.path += [x]
import params
try:
value = params.__dict__[name]
logger.pr('(user value) {0} = {1}'.format(name, value))
except KeyError:
if default is not None:
value = default
logger.pr('(default value) {0} = {1}'.format(name, value))
else:
raise
return value
from ._version import __version__
| 2,247 | 25.761905 | 72 | py |
matscipy | matscipy-master/matscipy/hydrogenate.py | #
# Copyright 2015 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import numpy as np
import ase
from matscipy.neighbours import first_neighbours, neighbour_list
###
def hydrogenate(a, cutoff, bond_length, b=None, mask=[True, True, True],
exclude=None, vacuum=None):
"""
Hydrogenate a slab of material at its periodic boundary conditions.
Boundary conditions are turned into nonperiodic.
Parameters
----------
a : ase.Atoms
Atomic configuration.
cutoff : float
Cutoff for neighbor counting.
bond_length : float
X-H bond length for hydrogenation.
b : ase.Atoms, optional
If present, this is the configuration to hydrogenate. Number of atoms
must be identical to a object. All bonds present in a but not present
in b will be hydrogenated in b.
mask : list of bool
Cartesian directions which to hydrogenate, only if b argument is not
given.
exclude : array_like
Boolean array masking atoms to be excluded from hydrogenation.
vacuum : float, optional
Add this much vacuum after hydrogenation.
Returns
-------
a : ase.Atoms
Atomic configuration of the hydrogenated slab.
"""
if b is None:
b = a.copy()
b.set_pbc(np.logical_not(mask))
if exclude is None:
exclude = np.zeros(len(a), dtype=bool)
i_a, j_a, D_a, d_a = neighbour_list('ijDd', a, cutoff)
i_b, j_b = neighbour_list('ij', b, cutoff)
firstneigh_a = first_neighbours(len(a), i_a)
firstneigh_b = first_neighbours(len(b), i_b)
coord_a = np.bincount(i_a, minlength=len(a))
coord_b = np.bincount(i_b, minlength=len(b))
hydrogens = []
# Surface atoms have coord_a != coord_b. Those need hydrogenation
for k in np.arange(len(a))[np.logical_and(coord_a!=coord_b,
np.logical_not(exclude))]:
l1_a = firstneigh_a[k]
l2_a = firstneigh_a[k+1]
l1_b = firstneigh_b[k]
l2_b = firstneigh_b[k+1]
n_H = 0
for l_a in range(l1_a, l2_a):
assert i_a[l_a] == k
bond_exists = False
for l_b in range(l1_b, l2_b):
assert i_b[l_b] == k
if j_a[l_a] == j_b[l_b]:
bond_exists = True
if not bond_exists:
# Bond existed before cut
hydrogens += [b[k].position+bond_length*D_a[l_a]/d_a[l_a]]
n_H += 1
assert n_H == coord_a[k]-coord_b[k]
if hydrogens == []:
raise RuntimeError('No Hydrogen created.')
b += ase.Atoms(['H']*len(hydrogens), hydrogens)
if vacuum is not None:
axis=[]
for i in range(3):
if mask[i]:
axis += [i]
b.center(vacuum, axis=axis)
return b
| 3,594 | 30.814159 | 77 | py |
matscipy | matscipy-master/matscipy/distributed_computation.py | #
# Copyright 2015 James Kermode (Warwick U.)
# 2015 Till Junge (EPFL)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import multiprocessing
import multiprocessing.managers
try:
import argparse
except ImportError:
from matscipy.logger import screen
screen.pr('argparse module not availability, some functionality disabled')
import abc
import datetime
import sys
class BaseResultManager(object):
"""
Baseclass for job distribution servers. User needs to implement the method process
"""
__metaclass__ = abc.ABCMeta
def __init__(self, port, key):
"""
Keyword Arguments:
port -- listening port
key -- auth_key
verbose -- (default False) if set, outputs debugging messages
"""
self.port = port
self.key = key
self.job_queue = None
self.result_queue = None
self.todo_counter = None
self.work_done_flag = None
self.manager = None
self.done = False
self.create_manager()
def create_manager(self):
"""
creates a multiprocessing.SyncManager
"""
self.job_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.JoinableQueue()
# the -1 is for 'uninitialized
self.todo_counter = multiprocessing.Manager().Value('i', -1)
self.work_done_flag = multiprocessing.Manager().Event()
self.work_done_flag.clear()
# This is based on the examples in the official docs of multiprocessing.
# get_{job|result}_q return synchronized proxies for the actual Queue
# objects.
class JobQueueManager(multiprocessing.managers.SyncManager):
pass
JobQueueManager.register('get_job_queue',
callable=lambda: self.job_queue)
JobQueueManager.register('get_result_queue',
callable=lambda: self.result_queue)
JobQueueManager.register('get_todo_counter',
callable=lambda: self.todo_counter,
proxytype= multiprocessing.managers.ValueProxy)
JobQueueManager.register('get_work_done_event',
callable=lambda: self.work_done_flag,
proxytype= multiprocessing.managers.EventProxy)
self.manager = JobQueueManager(address=('', self.port), authkey=self.key)
self.manager.start()
def set_todo_counter(self, counter):
self.todo_counter.set(counter)
self.done = (counter == 0)
def get_todo_counter(self):
return self.todo_counter.get()
def decrement_todo_counter(self):
new_counter = self.todo_counter.get() - 1
self.done = (new_counter == 0)
self.todo_counter.set(self.todo_counter.get() - 1)
@classmethod
def get_arg_parser(cls, parser=None):
"""
create or extend a argparser to read command line arguments required by
the server.
Keyword Arguments:
parser -- optional: if provided, parser is extended to include port and
authentication key
"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=9995,
help='server listening port')
parser.add_argument('--auth-token', type=str, default='auth_token',
help=('shared information used to authenticate the '
'client to the server'))
return parser
def run(self):
"""
this is the actual serving method. it fills the jobqueue and processes
incoming results
"""
print("Start serving jobs and processing results")
while not self.done:
self.schedule_available_jobs()
self.receive_results()
print()
print("Signalling end of work to worker processes")
self.work_done_flag.set()
print("Waiting for stragglers to hand in results")
self.result_queue.join()
print("Wrapping this up")
self.manager.shutdown()
@abc.abstractmethod
def schedule_available_jobs(self):
"""
to be implemented by inheriting classes. should push available jobs
into the job queue
"""
raise NotImplementedError()
def receive_results(self):
"""
proposed standard result receiver, can be overloaded by inheriting
classes
"""
try:
result = self.result_queue.get()
if result:
value, job_id = result
self.process(value, job_id)
finally:
self.result_queue.task_done()
@abc.abstractmethod
def process(self, value, job_id):
"""
to be implemented by inheriting classes. should push available jobs
into the job queue
"""
raise NotImplementedError()
class BaseWorker(multiprocessing.Process):
"""
Baseclass for distributed calculation worker threads
"""
__metaclass__ = abc.ABCMeta
def __init__(self, server_address, port, key, verbose=False, walltime=None):
"""
Keyword Arguments:
server_address -- ip or fully qualified hostname
port -- listening port
key -- auth_key
verbose -- (default False) if set, outputs debugging messages
walltime -- (default None) if set, worker commits suicide after
walltime hours
"""
super(BaseWorker, self).__init__()
self.server_address = server_address
self.port = port
self.key = key
self.job_queue = None
self.result_queue = None
self.todo_counter = None
self.work_done_flag = None
self.manager = None
self.create_manager()
self.verbose = verbose
self.commit_suicide = walltime is not None
self.time_of_death = None
if self.commit_suicide:
self.time_of_death = (datetime.datetime.now() +
datetime.timedelta(hours = walltime))
def create_manager(self):
"""
creates a multiprocessing.SyncManager
"""
self.job_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.JoinableQueue()
# the -1 is for 'uninitialized
self.todo_counter = multiprocessing.Manager().Value('i', -1)
self.work_done_flag = multiprocessing.Manager().Event()
self.work_done_flag.clear()
# This is based on the examples in the official docs of multiprocessing.
# get_{job|result}_q return synchronized proxies for the actual Queue
# objects.
class ServerQueueManager(multiprocessing.managers.SyncManager):
pass
ServerQueueManager.register('get_job_queue')
ServerQueueManager.register('get_result_queue')
ServerQueueManager.register('get_todo_counter')
ServerQueueManager.register('get_work_done_event')
self.manager = ServerQueueManager(
address=(self.server_address, self.port),
authkey=self.key)
self.manager.connect()
self.job_queue = self.manager.get_job_queue()
self.result_queue = self.manager.get_result_queue()
self.todo_counter = self.manager.get_todo_counter()
self.work_done_flag = self.manager.get_work_done_event()
return self.manager
@classmethod
def get_arg_parser(cls, parser=None):
"""
create or extend a argparser to read command line arguments required by
the cliend.
Keyword Arguments:
parser -- optional: if provided, parser is extended to include port and
authentication key
"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('--server_address', metavar='INET_ADDR', type=str,
default='',
help=('job server ip address or fully qualified '
'hostname'))
parser.add_argument('--port', type=int, default=9995,
help='server listening port')
parser.add_argument('--auth-token', type=str, default='auth_token',
help=('shared information used to authenticate the '
'client to the server'))
return parser
def run(self):
"""
standard method that any multiprocessing.Process must implement
"""
if self.verbose:
print("Starting to run")
if self.commit_suicide:
def gotta_commit_suicide():
do_I = datetime.datetime.now() > self.time_of_death
if do_I:
print("Reached walltime, stopping accepting new jobs (zombie)")
return do_I
else:
def gotta_commit_suicide():
return False
while not self.work_done_flag.is_set() and not gotta_commit_suicide():
try:
if self.verbose:
print("trying to get a job")
job_description, job_id = self.job_queue.get()
if self.verbose:
print("got job {}".format(job_id))
try:
self.process(job_description, job_id)
except Exception as err:
print("ERROR:::: {}".format(err))
raise
finally:
try:
self.job_queue.task_done()
except EOFError:
pass
@abc.abstractmethod
def process(self, job_description, job_id):
raise NotImplementedError()
| 10,625 | 34.657718 | 86 | py |
matscipy | matscipy-master/matscipy/neighbours.py | #
# Copyright 2014-2015, 2017-2019, 2021 Lars Pastewka (U. Freiburg)
# 2020 Jonas Oldenstaedt (U. Freiburg)
# 2020 Wolfram G. Nöhring (U. Freiburg)
# 2019 Jan Griesser (U. Freiburg)
# 2015 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tools for computing and working with local topology of atomic structures."""
import itertools as it
import typing as ts
from abc import ABC, abstractmethod
from collections import defaultdict
from copy import deepcopy
import numpy as np
import ase
from ase.data import atomic_numbers
from ase.geometry import find_mic
from . import ffi
from .ffi import first_neighbours, get_jump_indicies
from .molecules import Molecules
class Neighbourhood(ABC):
"""Abstract class defining a neighbourhood of atoms (pairs, triplets)."""
def __init__(self, atom_types=None):
"""Initialize with atoms and optional atom types."""
self.atom_type = atom_types \
if atom_types is not None else lambda i: np.asanyarray(i)
@abstractmethod
def get_pairs(self, atoms: ase.Atoms, quantities: str, cutoff=None):
"""Return requested data on pairs."""
@abstractmethod
def get_triplets(self,
atoms: ase.Atoms,
quantities: str,
neighbours=None,
cutoff=None,
full_connectivity=False):
"""Return requested data on triplets."""
@staticmethod
def mask(pair_distances, cutoff):
if not isinstance(cutoff, dict):
return pair_distances > cutoff
raise NotImplementedError("heterogeneous cutoffs not implemented")
@staticmethod
def make_result(quantities, connectivity, D, d, S,
accepted_quantities) -> ts.List:
"""Construct result list."""
if not set(quantities) <= set(accepted_quantities):
unknowns = set(quantities) - set(accepted_quantities)
raise ValueError(f"Unknown requested quantities {unknowns}")
e_size = connectivity.shape[1]
quantities_map = {
idx: connectivity[:, i]
for i, idx in enumerate("ijk"[:e_size])
}
quantities_map.update({'d': d, 'D': D})
res = [quantities_map[data] for data in quantities]
if len(res) == 1:
return res[0]
return res
@staticmethod
def compute_distances(
atoms: ase.Atoms,
connectivity: np.ndarray,
indices: ts.List[int],
) -> ts.Tuple[np.ndarray, np.ndarray]:
"""Return distances and vectors for connectivity."""
n_nuplets = connectivity.shape[0]
dim = atoms.positions.shape[1]
positions = [atoms.positions[col] for col in connectivity.T]
D = np.zeros((n_nuplets, len(indices), dim))
d = np.zeros((n_nuplets, len(indices)))
if positions:
for i, idx in enumerate(indices):
D[:, i, :], d[:, i] = \
find_mic(positions[idx[1]] - positions[idx[0]],
atoms.cell, atoms.pbc)
# if connectivity.shape[1] == 3:
# for i, idx in enumerate(indices):
# D[:, i, :] = \
# (positions[idx[1]] - positions[idx[0]])
# d[:, i] = np.linalg.norm(D[:, i], axis=-1)
return D.squeeze(), d.squeeze()
def connected_triplets(self, atoms: ase.Atoms, pair_list, triplet_list,
nb_pairs):
i_p, j_p = pair_list
ij_t, ik_t, jk_t = triplet_list
first_p = first_neighbours(nb_pairs, ij_t)
all_ij_pairs = []
all_ijm_types = []
all_ijn_types = []
for pair_im, pair_in in zip(ij_t, ik_t):
pairs_ij = ik_t[first_p[pair_im]:first_p[pair_im + 1]]
all_ij_pairs.append(pairs_ij[(pairs_ij != pair_im)
& (pairs_ij != pair_in)])
all_ijm_types.append(
self.find_triplet_types(atoms, i_p[pair_im], j_p[pairs_ij],
j_p[pair_im]))
all_ijn_types.append(
self.find_triplet_types(atoms, i_p[pair_in], j_p[pairs_ij],
j_p[pair_in]))
return all_ij_pairs, all_ijm_types, all_ijn_types
def triplet_to_numbers(self, atoms: ase.Atoms, i, j, k):
ids = map(np.asarray, (i, j, k))
max_size = max(map(len, ids))
full_ids = np.empty((3, max_size), ids[0].dtype)
for idx, id in enumerate(ids):
full_ids[idx, :] = id
return (atoms.numbers[i] for i in full_ids)
def find_triplet_types(self, atoms: ase.Atoms, i, j, k):
"""Return triplet types from atom ids."""
return self.triplet_type(*self.triplet_to_numbers(atoms, i, j, k))
@staticmethod
def lexsort(connectivity: np.ndarray):
return np.lexsort(np.flipud(connectivity.T))
@abstractmethod
def double_neighbourhood(self):
"""Return neighbourhood with double cutoff/connectivity."""
@abstractmethod
def reverse_pair_indices(self,
i_p: np.ndarray,
j_p: np.ndarray,
r_p: np.ndarray):
"""Return indices of reverse pairs."""
class CutoffNeighbourhood(Neighbourhood):
"""Class defining neighbourhood based on proximity."""
def __init__(self,
atom_types=None,
pair_types=None,
triplet_types=None,
cutoff: ts.Union[float, dict] = None):
"""Initialize with atoms, atom types, pair types and cutoff.
Parameters
----------
atom_types : ArrayLike
atom types array
pair_types : function of 2 atom type arrays
maps 2 atom types array to an array of pair types
cutoff : float or dict
Cutoff for neighbor search. It can be
- A single float: This is a global cutoff for all elements.
- A dictionary: This specifies cutoff values for element
pairs. Specification accepts element numbers of symbols.
Example: {(1, 6): 1.1, (1, 1): 1.0, ('C', 'C'): 1.85}
- A list/array with a per atom value: This specifies the radius
of an atomic sphere for each atoms. If spheres overlap, atoms
are within each others neighborhood.
"""
super().__init__(atom_types)
self.pair_type = (pair_types if pair_types is not None else
lambda i, j: np.ones_like(i))
self.triplet_type = (triplet_types if triplet_types is not None else
lambda i, j, k: np.ones_like(i))
self.cutoff = cutoff
def get_pairs(self, atoms: ase.Atoms, quantities: str, cutoff=None):
"""Return pairs and quantities from conventional neighbour list."""
if cutoff is None:
cutoff = self.cutoff
return neighbour_list(quantities, atoms, cutoff)
def get_triplets(self,
atoms: ase.Atoms,
quantities: str,
neighbours=None,
cutoff=None):
"""Return triplets and quantities from conventional neighbour list."""
if cutoff is None:
cutoff = self.cutoff
full_connectivity = 'k' in quantities
if neighbours is None:
i_p, j_p, d_p, D_p = neighbour_list("ijdD", atoms, cutoff)
else:
i_p, j_p, d_p, D_p = neighbours
first_n = first_neighbours(len(atoms), i_p)
# Getting all references in pair list
ij_t, ik_t, jk_t = triplet_list(first_n, d_p, cutoff, i_p, j_p)
connectivity = np.array([ij_t, ik_t, jk_t]).T
if full_connectivity and np.any(jk_t == -1):
raise ValueError("Cutoff is too small for complete "
"triplet connectivity")
D, d = None, None
# If any distance is requested, compute distances vectors and norms
# Distances are computed from neighbour list
if "d" in quantities or "D" in quantities:
D = np.zeros((len(ij_t), 3, 3))
D[:, 0] = D_p[ij_t] # i->j
D[:, 1] = D_p[ik_t] # i->k
D[:, 2] = D[:, 1] - D[:, 0] # j->k
d = np.linalg.norm(D, axis=-1) # distances
return self.make_result(
quantities, connectivity, D, d, None, accepted_quantities="ijkdD")
def double_neighbourhood(self):
double_cutoff = deepcopy(self.cutoff)
if isinstance(self.cutoff, defaultdict):
double_cutoff.default_factory = \
lambda: 2 * self.cutoff.default_factory()
if isinstance(double_cutoff, dict):
for k in double_cutoff:
double_cutoff[k] *= 2
else:
double_cutoff *= 2
return double_cutoff, self.cutoff, self
def reverse_pair_indices(self,
i_p: np.ndarray,
j_p: np.ndarray,
r_p: np.ndarray):
return find_indices_of_reversed_pairs(i_p, j_p, r_p)
class MolecularNeighbourhood(Neighbourhood):
"""Class defining neighbourhood based on molecular connectivity."""
def __init__(self,
molecules: Molecules,
atom_types=None,
double_cutoff=False):
"""Initialze with atoms and molecules."""
super().__init__(atom_types)
self.double_cutoff = double_cutoff
self.cutoff = np.inf
self.molecules = molecules
def double_neighbourhood(self):
if not self.double_cutoff:
return np.inf, np.inf, MolecularNeighbourhood(self.molecules,
self.atom_type,
True)
return np.inf, np.inf, self
@property
def molecules(self):
"""Molecules instance that defines neighbourhood."""
return self._molecules
@molecules.setter
def molecules(self, molecules):
"""Create full connectivity when assigning new molecules."""
self._molecules = molecules
# Get ij + ji pairs and ijk + kji angles to mimic the cutoff behavior
self.connectivity = {
"bonds": self.double_connectivity(molecules.bonds),
}
self.connectivity["angles"] = \
self.double_connectivity(molecules.angles) if self.double_cutoff \
else molecules.angles
# Add pairs from the angle connectivity with negative types
# This way they should be ignored for the pair potentials
if molecules.angles.size > 0:
self.complete_connectivity(
typeoffset=-(np.max(molecules.angles["type"]) + 1))
# Double angles connectivity after completing bonds
if not self.double_cutoff:
self.connectivity["angles"] = \
self.double_connectivity(molecules.angles)
# not doing anything to triplet list
else:
self.triplet_list = np.zeros([0, 3], dtype=np.int32)
@property
def pair_type(self):
"""Map atom types to pair types."""
return lambda ti_p, tj_p: self.connectivity["bonds"]["type"]
@property
def triplet_type(self):
"""Map atom types to triplet types."""
def tp(ti_p, tj_p, tk_p):
types = self.connectivity["angles"]["type"]
if self.double_cutoff:
return np.concatenate([types] * 2)
return types
return tp
@staticmethod
def double_connectivity(connectivity: np.ndarray) -> np.ndarray:
"""Sort and stack connectivity + reverse connectivity."""
c = np.zeros(2 * len(connectivity), dtype=connectivity.dtype)
c["type"].reshape(2, -1)[:] = connectivity["type"]
c_fwd, c_bwd = np.split(c["atoms"], 2)
c_fwd[:] = connectivity["atoms"]
if connectivity["atoms"].shape[1] != 3:
c_bwd[:] = connectivity["atoms"][:, ::-1]
else:
c_bwd[:] = connectivity["atoms"][:, (0, 2, 1)]
return c
def complete_connectivity(self, typeoffset: int = 0):
"""Add angles to pair connectivity."""
bonds, angles = self.connectivity["bonds"], self.connectivity["angles"]
permutations = list(
it.combinations(range(angles["atoms"].shape[1]), 2))
# permutations = [(1, 2)]
e = len(permutations)
n, nn = len(bonds), e * len(angles)
new_bonds = np.zeros(n + nn, dtype=bonds.dtype)
# Copying bonds connectivity and types
new_bonds[:n] = bonds
new_bonds["type"][n:].reshape(e, -1)[:] = angles["type"]
new_bonds["type"][n:] += typeoffset
for arr, permut in zip(
np.split(new_bonds["atoms"][n:], e), permutations):
arr[:] = angles["atoms"][:, permut]
# Construct unique bond list and triplet_list
self.connectivity["bonds"], indices_r = \
np.unique(new_bonds, return_inverse=True)
# Need to sort after all the shenanigans
# Below sorts lexicographically the pairs (first col, then second col)
idx = Neighbourhood.lexsort(self.connectivity["bonds"]["atoms"])
self.connectivity["bonds"][:] = self.connectivity["bonds"][idx]
# To construct triplet references (aka ij_t, ik_t and jk_t):
# - revert sort operation
# - apply reverse unique operatation
# - take only appended values
# - reshape
# - re-sort so that ij_t is sorted
r_idx = np.zeros_like(idx, dtype=np.int32)
r_idx[idx] = np.arange(len(idx)) # revert sort
self.triplet_list = r_idx[indices_r][n:].reshape(e, -1).T
idx = Neighbourhood.lexsort(self.triplet_list) # sort ij_t
self.triplet_list = self.triplet_list[idx]
def get_pairs(self, atoms: ase.Atoms, quantities: str, cutoff=None):
"""Return pairs and quantities from connectivities."""
D, d = None, None
connectivity = self.connectivity["bonds"]["atoms"].astype(np.int32)
# If any distance is requested, compute distances vectors and norms
if "d" in quantities or "D" in quantities:
D, d = self.compute_distances(atoms, connectivity, [(0, 1)])
return self.make_result(
quantities, connectivity, D, d, None, accepted_quantities="ijdD")
def get_triplets(self,
atoms: ase.Atoms,
quantities: str,
neighbours=None,
cutoff=None):
"""Return triplets and quantities from connectivities."""
D, d = None, None
# Need to reorder connectivity for distances
bonds = self.connectivity["bonds"]["atoms"]
double_triplets = np.vstack([self.triplet_list,
self.triplet_list[:, (1, 0, 2)]])
# Returning triplet references in bonds list
connectivity = double_triplets.copy()
i_p, j_p = bonds.T
first_neigh = first_neighbours(len(atoms), i_p)
ij_t, ik_t, jk_t = connectivity.T
jk_t[:] = -np.ones(len(ij_t), dtype='int32')
# This is slow as
for t, (ij, ik) in enumerate(zip(ij_t, ik_t)):
for i in np.arange(first_neigh[j_p[ij]],
first_neigh[j_p[ij] + 1]):
if i_p[i] == j_p[ij] and j_p[i] == j_p[ik]:
jk_t[t] = i
break
connectivity_in_bounds = np.array([
bonds[connectivity[:, i], j]
for i, j in [(0, 0), (0, 1), (1, 1)]
]).T
# If any distance is requested, compute distances vectors and norms
if "d" in quantities or "D" in quantities:
# i j i k j k
indices = [(0, 1), (0, 2), (1, 2)] # defined in Jan's paper
D, d = self.compute_distances(atoms,
connectivity_in_bounds, indices)
return self.make_result(
quantities, connectivity, D, d, None, accepted_quantities="ijkdD")
def find_triplet_types(self, atoms: ase.Atoms, i, j, k):
triplet_numbers = self.triplet_to_numbers(atoms, i, j, k)
connectivity_numbers = \
atoms.numbers[self.connectivity["angles"]["atoms"]]
unique_numbers, indices = np.unique(
connectivity_numbers, return_index=True, axis=0)
unique_types = self.connectivity["angles"]["type"][indices]
all_types = np.zeros(len(triplet_numbers), dtype=np.int32)
for i in range(all_types.shape[0]):
all_types[i] = unique_types[
np.argwhere(np.all(
np.equal(unique_numbers, triplet_numbers[i]), axis=1))
]
return all_types
def reverse_pair_indices(self,
i_p: np.ndarray,
j_p: np.ndarray,
r_p: np.ndarray):
inverse = np.zeros_like(self.connectivity["bonds"]["type"])
idx = np.arange(inverse.size)
for t in np.unique(self.connectivity["bonds"]["type"]):
mask = self.connectivity["bonds"]["type"] == t
inverse[mask] = idx[mask][find_indices_of_reversed_pairs(i_p[mask],
j_p[mask],
r_p[mask])]
return inverse
def mic(dr, cell, pbc=None):
"""
Apply minimum image convention to an array of distance vectors.
Parameters
----------
dr : array_like
Array of distance vectors.
cell : array_like
Simulation cell.
pbc : array_like, optional
Periodic boundary conditions in x-, y- and z-direction. Default is to
assume periodic boundaries in all directions.
Returns
-------
dr : array
Array of distance vectors, wrapped according to the minimum image
convention.
"""
# Check where distance larger than 1/2 cell. Particles have crossed
# periodic boundaries then and need to be unwrapped.
rec = np.linalg.inv(cell)
if pbc is not None:
rec *= np.array(pbc, dtype=int).reshape(3, 1)
dri = np.round(np.dot(dr, rec))
# Unwrap
return dr - np.dot(dri, cell)
def neighbour_list(quantities,
atoms=None,
cutoff=None,
positions=None,
cell=None,
pbc=None,
numbers=None,
cell_origin=None):
"""
Compute a neighbor list for an atomic configuration. Atoms outside periodic
boundaries are mapped into the box. Atoms outside nonperiodic boundaries
are included in the neighbor list but the complexity of neighbor list search
for those can become n^2.
The neighbor list is sorted by first atom index 'i', but not by second
atom index 'j'.
The neighbour list accepts either an ASE Atoms object or positions and cell
vectors individually.
Parameters
----------
quantities : str
Quantities to compute by the neighbor list algorithm. Each character
in this string defines a quantity. They are returned in a tuple of
the same order. Possible quantities are
- ``i`` : first atom index
- ``j`` : second atom index
- ``d`` : absolute distance
- ``D`` : distance vector
- ``S`` : shift vector (number of cell boundaries crossed by the
bond between atom i and j). With the shift vector S, the
distances D between atoms can be computed from:
``D = a.positions[j]-a.positions[i]+S.dot(a.cell)``
atoms : ase.Atoms
Atomic configuration. (Default: None)
cutoff : float or dict
Cutoff for neighbor search. It can be
- A single float: This is a global cutoff for all elements.
- A dictionary: This specifies cutoff values for element
pairs. Specification accepts element numbers of symbols.
Example: {(1, 6): 1.1, (1, 1): 1.0, ('C', 'C'): 1.85}
- A list/array with a per atom value: This specifies the radius of
an atomic sphere for each atoms. If spheres overlap, atoms are
within each others neighborhood.
positions : array_like
Atomic positions. (Default: None)
cell : array_like
Cell vectors as a 3x3 matrix. (Default: Shrink wrapped cell)
pbc : array_like
3-vector containing periodic boundary conditions in all three
directions. (Default: Nonperiodic box)
numbers : array_like
Array containing the atomic numbers.
Returns
-------
i, j, ... : array
Tuple with arrays for each quantity specified above. Indices in `i`
are returned in ascending order 0..len(a), but the order of (i,j)
pairs is not guaranteed.
Examples
--------
Examples assume Atoms object *a* and numpy imported as *np*.
1. Coordination counting::
i = neighbour_list('i', a, 1.85)
coord = np.bincount(i)
2. Coordination counting with different cutoffs for each pair of species::
i = neighbour_list('i', a,
{('H', 'H'): 1.1, ('C', 'H'): 1.3, ('C', 'C'): 1.85})
coord = np.bincount(i)
3. Pair distribution function::
d = neighbour_list('d', a, 10.00)
h, bin_edges = np.histogram(d, bins=100)
pdf = h/(4*np.pi/3*(bin_edges[1:]**3 - bin_edges[:-1]**3)) * a.get_volume()/len(a)
4. Pair potential::
i, j, d, D = neighbour_list('ijdD', a, 5.0)
energy = (-C/d**6).sum()
pair_forces = (6*C/d**5 * D.T/d).T
forces_x = np.bincount(j, weights=pair_forces[:, 0], minlength=len(a)) - \
np.bincount(i, weights=pair_forces[:, 0], minlength=len(a))
forces_y = np.bincount(j, weights=pair_forces[:, 1], minlength=len(a)) - \
np.bincount(i, weights=pair_forces[:, 1], minlength=len(a))
forces_z = np.bincount(j, weights=pair_forces[:, 2], minlength=len(a)) - \
np.bincount(i, weights=pair_forces[:, 2], minlength=len(a))
5. Dynamical matrix for a pair potential stored in a block sparse format::
from scipy.sparse import bsr_matrix
i, j, dr, abs_dr = neighbour_list('ijDd', atoms)
energy = (dr.T / abs_dr).T
dynmat = -(dde * (energy.reshape(-1, 3, 1) * energy.reshape(-1, 1, 3)).T).T \
-(de / abs_dr * (np.eye(3, dtype=energy.dtype) - \
(energy.reshape(-1, 3, 1) * energy.reshape(-1, 1, 3))).T).T
dynmat_bsr = bsr_matrix((dynmat, j, first_i), shape=(3*len(a), 3*len(a)))
dynmat_diag = np.empty((len(a), 3, 3))
for x in range(3):
for y in range(3):
dynmat_diag[:, x, y] = -np.bincount(i, weights=dynmat[:, x, y])
dynmat_bsr += bsr_matrix((dynmat_diag, np.arange(len(a)),
np.arange(len(a) + 1)),
shape=(3 * len(a), 3 * len(a)))
i_n, j_n, dr_nc, abs_dr_n = neighbour_list('ijDd', atoms, dict)
e_nc = (dr_nc.T/abs_dr_n).T
D_ncc = -(dde_n * (e_nc.reshape(-1,3,1) * e_nc.reshape(-1,1,3)).T).T
D_ncc += -(de_n/abs_dr_n * (np.eye(3, dtype=e_nc.dtype) - (e_nc.reshape(-1,3,1) * e_nc.reshape(-1,1,3))).T).T
D = bsr_matrix((D_ncc, j_n, first_i), shape=(3*nat,3*nat))
Ddiag_icc = np.empty((nat,3,3))
for x in range(3):
for y in range(3):
Ddiag_icc[:,x,y] = -np.bincount(i_n, weights = D_ncc[:,x,y])
D += bsr_matrix((Ddiag_icc,np.arange(nat),np.arange(nat+1)), shape=(3*nat,3*nat))
return D
"""
if cutoff is None:
raise ValueError('Please provide a value for the cutoff radius.')
if atoms is None:
if positions is None:
raise ValueError('You provided neither an ASE Atoms object nor '
'a positions array.')
if cell is None:
# Shrink wrapped cell
rmin = np.min(positions, axis=0)
rmax = np.max(positions, axis=0)
cell_origin = rmin
cell = np.diag(rmax - rmin)
if cell_origin is None:
cell_origin = np.zeros(3)
if pbc is None:
pbc = np.zeros(3, dtype=bool)
if numbers is None:
numbers = np.ones(len(positions), dtype=np.int32)
else:
if positions is not None:
raise ValueError(
'You cannot provide an ASE Atoms object and '
'individual position atomic positions at the same '
'time.')
positions = atoms.positions
if cell_origin is not None:
raise ValueError('You cannot provide an ASE Atoms object and '
'a cell origin at the same time.')
cell_origin = np.zeros(3)
if cell is not None:
raise ValueError('You cannot provide an ASE Atoms object and '
'cell vectors at the same time.')
cell = atoms.cell
if pbc is not None:
raise ValueError('You cannot provide an ASE Atoms object and '
'separate periodicity information at the same '
'time.')
pbc = atoms.pbc
if numbers is not None:
raise ValueError('You cannot provide an ASE Atoms object and '
'separate atomic numbers at the same time.')
numbers = atoms.numbers.astype(np.int32)
if isinstance(cutoff, defaultdict):
_cutoff = cutoff.default_factory()
elif isinstance(cutoff, dict):
maxel = np.max(numbers)
_cutoff = np.zeros([maxel + 1, maxel + 1], dtype=float)
for (el1, el2), c in cutoff.items():
try:
el1 = atomic_numbers[el1]
except:
pass
try:
el2 = atomic_numbers[el2]
except:
pass
if el1 < maxel + 1 and el2 < maxel + 1:
_cutoff[el1, el2] = c
_cutoff[el2, el1] = c
else:
_cutoff = cutoff
try:
return ffi.neighbour_list(quantities, cell_origin, cell,
np.linalg.inv(cell.T), pbc, positions,
_cutoff, numbers)
except ValueError as e:
if str(e) == "object of too small depth for desired array":
raise TypeError(f"cutoff of invalid type {type(_cutoff)}")
raise e
def triplet_list(first_neighbours,
abs_dr_p=None,
cutoff=None,
i_p=None,
j_p=None):
"""
Compute a triplet list for an atomic configuration. The triple list is a
mask that can be applied to the corresponding neighbour list to mask
triplet properties.
The triplet list accepts an first_neighbour array (generated by
first_neighbours) as input.
Parameters
----------
first_neighbours : array
adresses of the first time an atom occours in the neighour list
Returns
-------
ij_t, ik_t : array
lists of adresses that form triples in the pair lists
jk_t : array (if and only if i_p, j_p, first_i != None)
list of pairs jk that connect each triplet ij, ik
between atom j and k
Example
-------
i_n, j_n, abs_dr_p = neighbour_list('ijd', atoms=atoms, cutoff=cutoff)
first_i = np.array([0, 2, 6, 10], dtype='int32')
a = triplet_list(first_i, [2.2]*9+[3.0], 2.6)
# one may obtain first_ij by using
find_ij = first_neighbours(len(i_p), ij_t)
# or (slower but less parameters and more general,
# i.e for every ordered list)
first_ij = get_jump_indicies(ij_t)
"""
if not (abs_dr_p is None or cutoff is None):
res = ffi.triplet_list(first_neighbours, abs_dr_p, cutoff)
else:
res = ffi.triplet_list(first_neighbours)
# TODO: should be wrapped in c and be independet of i_n
# and j_n as of j_n is sorted; related issue #50
# add some tests!!!
if not (i_p is None or j_p is None or first_neighbours is None):
ij_t, ik_t = res
jk_t = -np.ones(len(ij_t), dtype='int32')
for t, (ij, ik) in enumerate(zip(ij_t, ik_t)):
for i in np.arange(first_neighbours[j_p[ij]],
first_neighbours[j_p[ij] + 1]):
if i_p[i] == j_p[ij] and j_p[i] == j_p[ik]:
jk_t[t] = i
break
return ij_t, ik_t, jk_t
else:
return res
def find_indices_of_reversed_pairs(i_n, j_n, abs_dr_n):
"""Find neighbor list indices where reversed pairs are stored
Given list of identifiers of neighbor atoms `i_n` and `j_n`,
determines the list of indices `reverse` into the neighbor list
where each pair is reversed, i.e. `i_n[reverse[n]]=j_n[n]` and
`j_n[reverse[n]]=i_n[n]` for each index `n` in the neighbor list
In the case of small periodic systems, one needs to be careful, because
the same pair may appear more than one time, with different pair
distances. Therefore, the pair distance must be taken into account.
We assume that there is in fact one reversed pair for every pair.
However, we do not check this assumption in order to avoid overhead.
Parameters
----------
i_n : array_like
array of atom identifiers
j_n : array_like
array of atom identifiers
abs_dr_n : array_like
pair distances
Returns
-------
reverse : numpy.ndarray
array of indices into i_n and j_n
"""
sorted_1 = np.lexsort(keys=(abs_dr_n, i_n, j_n))
sorted_2 = np.lexsort(keys=(abs_dr_n, j_n, i_n))
# np.testing.assert_equal(j_n[sorted_1], i_n[sorted_2])
# np.testing.assert_equal(i_n[sorted_1], j_n[sorted_2])
# np.testing.assert_equal(abs_dr_n[sorted_1], abs_dr_n[sorted_2])
# print(np.c_[i_n[sorted_2], j_n[sorted_2], abs_dr_n[sorted_2],
# i_n[sorted_1], j_n[sorted_1], abs_dr_n[sorted_1]])
tmp2 = np.arange(i_n.size)[sorted_2]
tmp1 = np.arange(i_n.size)[sorted_1]
# np.arange(i_n.size) are indices into the neighbor list, so
# * the nth element in tmp1 is the index where i,j was before reordering with sorted_1
# * the nth element in tmp2 is the index where j,i was before reordering with sorted_2
reverse = np.empty(i_n.size, dtype=i_n.dtype)
reverse[tmp1] = tmp2
return reverse
def find_common_neighbours(i_n, j_n, nat):
"""Find common neighbors of pairs of atoms
For each pair ``(i1, j1)`` in the neighbor list, find all other pairs
``(i2, j1)`` which share the same ``j1``. This includes ``(i1,j1)``
itself. In this way, create a list with ``n`` blocks of rows, where ``n``
is the length of the neighbor list. All rows in a block have the same
``j1``. Each row corresponds to one triplet ``(i1, j1 ,i2)``. The number
of rows in the block is equal to the total number of neighbors of ``j1``.
Parameters
----------
i_n : array_like
array of atom identifiers
j_n : array_like
array of atom identifiers
nat: int
number of atoms
Returns
-------
cnl_i1_i2: array
atom numbers i1 and i2
cnl_j1: array
shared neighbor of i1 and i2
nl_index_i1_j1: array
index in the neighbor list of pair i1, j1
nl_index_i2_j1: array
index in the neighbor list of pair i2, j1
Examples
--------
Accumulate random numbers for pairs with common neighbors:
>>> import numpy as np
>>> import matscipy
>>> from ase.lattice.cubic import FaceCenteredCubic
>>> from matscipy.neighbours import neighbour_list, find_common_neighbours
>>> cutoff = 6.5
>>> atoms = FaceCenteredCubic('Cu', size=[4, 4, 4])
>>> nat = len(atoms.numbers)
>>> print(nat)
256
>>> i_n, j_n, dr_nc, abs_dr_n = neighbour_list('ijDd', atoms, cutoff)
>>> print(i_n.shape)
(22016,)
>>> cnl_i1_i2, cnl_j1, nl_index_i1_j1, nl_index_i2_j1 = find_common_neighbours(i_n, j_n, nat)
>>> print(cnl_i1_i2.shape)
(1893376, 2)
>>> unique_pairs_i1_i2, bincount_bins = np.unique(cnl_i1_i2, axis=0, return_inverse=True)
>>> print(unique_pairs_i1_i2.shape)
(65536, 2)
>>> tmp = np.random.rand(cnl_i1_i2.shape[0])
>>> my_sum = np.bincount(bincount_bins, weights=tmp, minlength=unique_pairs_i1_i2.shape[0])
>>> print(my_sum.shape)
(65536,)
"""
# Create a copy of the neighbor list which is sorted by j_n, e.g.
# +---------------+ +---------------+
# | sorted by i_n | | sorted by j_n |
# +=======+=======+ +=======+=======+
# | i_n | j_n | | i_n | j_n |
# +-------+-------+ +-------+-------+
# | 1 | 2 | | 2 | 1 |
# +-------+-------+ +-------+-------+
# | 1 | 95 | | 4 | 1 |
# +-------+-------+ +-------+-------+
# | 2 | 51 | | 81 | 2 |
# +-------+-------+ +-------+-------+
# | 2 | 99 | | 12 | 2 |
# +-------+-------+ +-------+-------+
# | 2 | 1 | | 6 | 2 |
# +-------+-------+ +-------+-------+
# | 3 | 78 | | 143 | 3 |
# +-------+-------+ +-------+-------+
# | ... | ... | | ... | ... |
# +-------+-------+ +-------+-------+
j_order = np.argsort(j_n)
i_n_2 = i_n[j_order]
j_n_2 = j_n[j_order]
# Find indices in the copy where contiguous blocks with same j_n_2 start
first_j = first_neighbours(nat, j_n_2)
num_rows_per_j = first_j[j_n + 1] - first_j[j_n]
num_rows_cnl = np.sum(num_rows_per_j)
# The common neighbor information could be stored as
# a 2D array. However, multiple 1D arrays are likely
# better for performance (fewer cache misses later).
nl_index_i1_j1 = np.empty(num_rows_cnl, dtype=i_n.dtype)
cnl_j1 = np.empty(num_rows_cnl, dtype=i_n.dtype)
nl_index_i2_j1 = np.empty(num_rows_cnl, dtype=i_n.dtype)
cnl_i1_i2 = np.empty((num_rows_cnl, 2), dtype=i_n.dtype)
block_start = np.r_[0, np.cumsum(num_rows_per_j)]
slice_for_j1 = {
j1: slice(first_j[j1], first_j[j1 + 1])
for j1 in np.arange(nat)
}
for block_number, (i1, j1) in enumerate(zip(i_n, j_n)):
slice1 = slice(block_start[block_number],
block_start[block_number + 1])
slice2 = slice_for_j1[j1]
nl_index_i1_j1[slice1] = block_number
cnl_j1[slice1] = j1
nl_index_i2_j1[slice1] = j_order[slice2]
cnl_i1_i2[slice1, 0] = i1
cnl_i1_i2[slice1, 1] = i_n_2[slice2]
return cnl_i1_i2, cnl_j1, nl_index_i1_j1, nl_index_i2_j1
| 35,932 | 37.104984 | 117 | py |
matscipy | matscipy-master/matscipy/elasticity.py | #
# Copyright 2014-2015, 2020-2021 Lars Pastewka (U. Freiburg)
# 2021 Jan Griesser (U. Freiburg)
# 2021 Petr Grigorev (Warwick U.)
# 2014-2015, 2020 James Kermode (Warwick U.)
# 2015 Manuel Aldegunde
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Computing elastic moduli of atomistic systems under small deformations.
"""
import itertools
import warnings
import numpy as np
import scipy.stats as scipy_stats
from numpy.linalg import inv, norm
from scipy.linalg import sqrtm
from scipy.sparse.linalg import cg
import ase.units as units
from ase.atoms import Atoms
###
# The indices of the full stiffness matrix of (orthorhombic) interest
Voigt_notation = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)]
def full_3x3_to_Voigt_6_index(i, j):
if i == j:
return i
return 6-i-j
###
def Voigt_6_to_full_3x3_strain(strain_vector):
"""
Form a 3x3 strain matrix from a 6 component vector in Voigt notation
"""
e1, e2, e3, e4, e5, e6 = np.transpose(strain_vector)
return np.transpose([[e1, 0.5*e6, 0.5*e5],
[0.5*e6, e2, 0.5*e4],
[0.5*e5, 0.5*e4, e3]])
def Voigt_6_to_full_3x3_stress(stress_vector):
"""
Form a 3x3 stress matrix from a 6 component vector in Voigt notation
"""
s1, s2, s3, s4, s5, s6 = np.transpose(stress_vector)
return np.transpose([[s1, s6, s5],
[s6, s2, s4],
[s5, s4, s3]])
def full_3x3_to_Voigt_6_strain(strain_matrix):
"""
Form a 6 component strain vector in Voigt notation from a 3x3 matrix
"""
strain_matrix = np.asarray(strain_matrix)
return np.transpose([strain_matrix[...,0,0],
strain_matrix[...,1,1],
strain_matrix[...,2,2],
strain_matrix[...,1,2]+strain_matrix[...,2,1],
strain_matrix[...,0,2]+strain_matrix[...,2,0],
strain_matrix[...,0,1]+strain_matrix[...,1,0]])
def full_3x3_to_Voigt_6_stress(stress_matrix):
"""
Form a 6 component stress vector in Voigt notation from a 3x3 matrix
"""
stress_matrix = np.asarray(stress_matrix)
return np.transpose([stress_matrix[...,0,0],
stress_matrix[...,1,1],
stress_matrix[...,2,2],
(stress_matrix[...,1,2]+stress_matrix[...,1,2])/2,
(stress_matrix[...,0,2]+stress_matrix[...,0,2])/2,
(stress_matrix[...,0,1]+stress_matrix[...,0,1])/2])
def Voigt_6x6_to_full_3x3x3x3(C):
"""
Convert from the Voigt representation of the stiffness matrix to the full
3x3x3x3 representation.
Parameters
----------
C : array_like
6x6 stiffness matrix (Voigt notation).
Returns
-------
C : array_like
3x3x3x3 stiffness matrix.
"""
C = np.asarray(C)
C_out = np.zeros((3,3,3,3), dtype=float)
for i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)):
Voigt_i = full_3x3_to_Voigt_6_index(i, j)
Voigt_j = full_3x3_to_Voigt_6_index(k, l)
C_out[i, j, k, l] = C[Voigt_i, Voigt_j]
return C_out
def full_3x3x3x3_to_Voigt_6x6(C, tol=1e-3, check_symmetry=True):
"""
Convert from the full 3x3x3x3 representation of the stiffness matrix
to the representation in Voigt notation. Checks symmetry in that process.
"""
C = np.asarray(C)
Voigt = np.zeros((6,6))
for i in range(6):
for j in range(6):
k, l = Voigt_notation[i]
m, n = Voigt_notation[j]
Voigt[i,j] = C[k,l,m,n]
"""
print('---')
print("k,l,m,n", C[k,l,m,n])
print("m,n,k,l", C[m,n,k,l])
print("l,k,m,n", C[l,k,m,n])
print("k,l,n,m", C[k,l,n,m])
print("m,n,l,k", C[m,n,l,k])
print("n,m,k,l", C[n,m,k,l])
print("l,k,n,m", C[l,k,n,m])
print("n,m,l,k", C[n,m,l,k])
print('---')
"""
if check_symmetry:
assert abs(Voigt[i,j]-C[m,n,k,l]) < tol, \
'1 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], m, n, k, l, C[m,n,k,l])
assert abs(Voigt[i,j]-C[l,k,m,n]) < tol, \
'2 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], l, k, m, n, C[l,k,m,n])
assert abs(Voigt[i,j]-C[k,l,n,m]) < tol, \
'3 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], k, l, n, m, C[k,l,n,m])
assert abs(Voigt[i,j]-C[m,n,l,k]) < tol, \
'4 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], m, n, l, k, C[m,n,l,k])
assert abs(Voigt[i,j]-C[n,m,k,l]) < tol, \
'5 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], n, m, k, l, C[n,m,k,l])
assert abs(Voigt[i,j]-C[l,k,n,m]) < tol, \
'6 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], l, k, n, m, C[l,k,n,m])
assert abs(Voigt[i,j]-C[n,m,l,k]) < tol, \
'7 Voigt[{},{}] = {}, C[{},{},{},{}] = {}' \
.format(i, j, Voigt[i,j], n, m, l, k, C[n,m,l,k])
return Voigt
def Voigt_6x6_to_cubic(C):
"""
Convert the Voigt 6x6 representation into the cubic elastic constants
C11, C12 and C44.
"""
tol = 1e-6
C_check = np.zeros_like(C)
C_check[np.diag_indices_from(C_check)] = C[np.diag_indices_from(C)]
C_check[0:3,0:3] = C[0:3,0:3]
if np.any(np.abs(C-C_check) > tol):
raise ValueError('"C" does not have cubic symmetry.')
C11s = np.array([C[0,0], C[1,1], C[2,2]])
C12s = np.array([C[1,2], C[0,2], C[0,1]])
C44s = np.array([C[3,3], C[4,4], C[5,5]])
C11 = np.mean(C11s)
C12 = np.mean(C12s)
C44 = np.mean(C44s)
if np.any(np.abs(C11-C11s) > tol) or np.any(np.abs(C12-C12s) > tol) or \
np.any(np.abs(C44-C44s) > tol):
raise ValueError('"C" does not have cubic symmetry.')
return np.array([C11, C12, C44])
def cubic_to_Voigt_6x6(C11, C12, C44):
return np.array([[C11,C12,C12, 0, 0, 0],
[C12,C11,C12, 0, 0, 0],
[C12,C12,C11, 0, 0, 0],
[ 0, 0, 0,C44, 0, 0],
[ 0, 0, 0, 0,C44, 0],
[ 0, 0, 0, 0, 0,C44]])
def _invariants(s, syy=None, szz=None, syz=None, sxz=None, sxy=None,
full_3x3_to_Voigt_6=full_3x3_to_Voigt_6_stress):
"""
Receives a list of stress tensors and returns the three tensor invariants.
"""
if syy is None:
s = np.asarray(s)
if s.shape == (6,):
s = s.reshape(1,-1)
elif s.shape == (3,3):
s = full_3x3_to_Voigt_6(s)
elif s.shape[-1] == 3 and s.shape[-2] == 3:
s = full_3x3_to_Voigt_6(s)
else:
s = np.transpose([np.transpose(s),
np.transpose(syy),
np.transpose(szz),
np.transpose(syz),
np.transpose(sxz),
np.transpose(sxy)])
I1 = s[...,0]+s[...,1]+s[...,2]
I2 = s[...,0]*s[...,1]+s[...,1]*s[...,2]+s[...,2]*s[...,0]-s[...,3]**2- \
s[...,4]**2-s[...,5]**2
I3 = s[...,0]*s[...,1]*s[...,2]+2*s[...,3]*s[...,4]*s[...,5]- \
s[...,3]**2*s[...,2]-s[...,4]**2*s[...,0]-s[...,5]**2*s[...,1]
return I1, I2, I3
def invariants(s, syy=None, szz=None, syz=None, sxz=None, sxy=None,
full_3x3_to_Voigt_6=full_3x3_to_Voigt_6_stress):
I1, I2, I3 = _invariants(s, syy=syy, szz=szz, syz=syz, sxz=sxz, sxy=sxy,
full_3x3_to_Voigt_6=full_3x3_to_Voigt_6)
J2 = I1**2/3-I2
J3 = 2*I1**3/27-I1*I2/3+I3
# Return hydrostatic pressure, octahedral shear stress and J3
return -I1/3, np.sqrt(2*J2/3), J3
###
def rotate_cubic_elastic_constants(C11, C12, C44, A, tol=1e-6):
"""
Return rotated elastic moduli for a cubic crystal given the elastic
constant in standard C11, C12, C44 notation.
Parameters
----------
C11, C12, C44 : float
Cubic elastic moduli.
A : array_like
3x3 rotation matrix.
Returns
-------
C : array
6x6 matrix of rotated elastic constants (Voigt notation).
"""
A = np.asarray(A)
# Is this a rotation matrix?
if np.sometrue(np.abs(np.dot(np.array(A), np.transpose(np.array(A))) -
np.eye(3, dtype=float)) > tol):
raise RuntimeError('Matrix *A* does not describe a rotation.')
# Invariant elastic constants
la = C12
mu = C44
al = C11 - la - 2*mu
# Construct rotated C in Voigt notation
C = [ ]
for i, j in Voigt_notation:
for k, l in Voigt_notation:
h = 0.0
if i == j and k == l:
h += la
if i == k and j == l:
h += mu
if i == l and j == k:
h += mu
h += al*np.sum(A[i,:]*A[j,:]*A[k,:]*A[l,:])
C += [ h ]
C = np.array(C)
C.shape = (6, 6)
return C
###
def rotate_elastic_constants(C, A, tol=1e-6):
"""
Return rotated elastic moduli for a general crystal given the elastic
constant in Voigt notation.
Parameters
----------
C : array_like
6x6 matrix of elastic constants (Voigt notation).
A : array_like
3x3 rotation matrix.
Returns
-------
C : array
6x6 matrix of rotated elastic constants (Voigt notation).
"""
A = np.asarray(A)
# Is this a rotation matrix?
if np.sometrue(np.abs(np.dot(np.array(A), np.transpose(np.array(A))) -
np.eye(3, dtype=float)) > tol):
raise RuntimeError('Matrix *A* does not describe a rotation.')
# Rotate
return full_3x3x3x3_to_Voigt_6x6(np.einsum('ia,jb,kc,ld,abcd->ijkl',
A, A, A, A,
Voigt_6x6_to_full_3x3x3x3(C)))
###
class CubicElasticModuli:
tol = 1e-6
def __init__(self, C11, C12, C44):
"""
Initialize a cubic system with elastic constants C11, C12, C44
"""
warnings.warn('CubicElasticModuli is deprecated. Use '
'rotate_elastic_constants function instead.')
# la, mu, al are the three invariant elastic constants
self.la = C12
self.mu = C44
self.al = C11 - self.la - 2*self.mu
A = np.eye(3, dtype=float)
# Compute initial stiffness matrix
self.rotate(A)
def rotate(self, A):
"""
Compute the rotated stiffness matrix
"""
A = np.asarray(A)
# Is this a rotation matrix?
if np.sometrue(np.abs(np.dot(np.array(A), np.transpose(np.array(A))) -
np.eye(3, dtype=float)) > self.tol):
raise RuntimeError('Matrix *A* does not describe a rotation.')
C = [ ]
for i, j in Voigt_notation:
for k, l in Voigt_notation:
h = 0.0
if i == j and k == l:
h += self.la
if i == k and j == l:
h += self.mu
if i == l and j == k:
h += self.mu
h += self.al*np.sum(A[i,:]*A[j,:]*A[k,:]*A[l,:])
C += [ h ]
self.C = np.asarray(C)
self.C.shape = (6, 6)
return self.C
def _rotate_explicit(self, A):
"""
Compute the rotated stiffness matrix by applying the rotation to the
full stiffness matrix. This function is for debugging purposes only.
"""
A = np.asarray(A)
# Is this a rotation matrix?
if np.sometrue(np.abs(np.dot(np.array(A), np.transpose(np.array(A))) -
np.eye(3, dtype=float) ) > self.tol):
raise RuntimeError('Matrix *A* does not describe a rotation.')
C = np.zeros((3, 3, 3, 3), dtype=float)
# Construct unrotated stiffness matrix
for i in range(3):
for j in range(3):
for k in range(3):
for m in range(3):
h = 0.0
if i == j and k == m:
h += self.la
if i == k and j == m:
h += self.mu
if i == m and j == k:
h += self.mu
if i == j and j == k and k == m:
h += self.al
C[i, j, k, m] = h
# Rotate
C = np.einsum('ia,jb,kc,ld,abcd->ijkl', A, A, A, A, C)
self.C = full_3x3x3x3_to_Voigt_6x6(C)
return self.C
def stiffness(self):
"""
Return the elastic constants
"""
return self.C
def compliance(self):
"""
Return the compliance coefficients
"""
return inv(self.C)
###
def measure_triclinic_elastic_constants(a, delta=0.001, optimizer=None,
logfile=None, **kwargs):
"""
Brute-force measurement of elastic constants for a triclinic (general)
unit cell.
Parameters
----------
a : ase.Atoms
Atomic configuration.
optimizer : ase.optimizer.*
Optimizer to use for atomic position. Does not optimize atomic
position if set to None.
delta : float
Strain increment for analytical derivatives of stresses.
Returns
-------
C : array_like
6x6 matrix of the elastic constants in Voigt notation.
"""
if optimizer is not None:
optimizer(a, logfile=logfile).run(**kwargs)
r0 = a.positions.copy()
cell = a.cell.copy()
volume = a.get_volume()
C = np.zeros((3,3,3,3), dtype=float)
for i in range(3):
for j in range(3):
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
e = np.zeros((3, 3))
e[i, j] += 0.5*delta
e[j, i] += 0.5*delta
F = np.eye(3) + e
a.set_cell(np.dot(F, cell.T).T, scale_atoms=True)
if optimizer is not None:
optimizer(a, logfile=logfile).run(**kwargs)
sp = Voigt_6_to_full_3x3_stress(a.get_stress())
e = np.zeros((3, 3))
e[i, j] -= 0.5*delta
e[j, i] -= 0.5*delta
F = np.eye(3) + e
#F = sqrtm(np.eye(3) + 2*e)
a.set_cell(np.dot(F, cell.T).T, scale_atoms=True)
if optimizer is not None:
optimizer(a, logfile=logfile).run(**kwargs)
sm = Voigt_6_to_full_3x3_stress(a.get_stress())
C[:,:,i,j] = (sp-sm)/(2*delta)
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
return C
Cij_symmetry = {
'cubic': np.array([[1, 7, 7, 0, 0, 0],
[7, 1, 7, 0, 0, 0],
[7, 7, 1, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 4]]),
'trigonal_high': np.array([[1, 7, 8, 9, 10, 0],
[7, 1, 8, 0,-9, 0],
[8, 8, 3, 0, 0, 0],
[9, -9, 0, 4, 0, 0],
[10, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 6]]),
'trigonal_low': np.array([[1, 7, 8, 9, 10, 0 ],
[7, 1, 8, -9, -10, 0 ],
[8, 8, 3, 0, 0, 0 ],
[9, -9, 0, 4, 0, -10],
[10,-10, 0, 0, 4, 9 ],
[0, 0, 0, -10 , 9, 6 ]]),
'tetragonal_high': np.array([[1, 7, 8, 0, 0, 0],
[7, 1, 8, 0, 0, 0],
[8, 8, 3, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 6]]),
'tetragonal_low': np.array([[1, 7, 8, 0, 0, 11],
[7, 1, 8, 0, 0, -11],
[8, 8, 3, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 4, 0],
[11, -11, 0, 0, 0, 6]]),
'orthorhombic': np.array([[ 1, 7, 8, 0, 0, 0],
[ 7, 2, 12, 0, 0, 0],
[ 8, 12, 3, 0, 0, 0],
[ 0, 0, 0, 4, 0, 0],
[ 0, 0, 0, 0, 5, 0],
[ 0, 0, 0, 0, 0, 6]]),
'monoclinic': np.array([[ 1, 7, 8, 0, 10, 0],
[ 7, 2, 12, 0, 14, 0],
[ 8, 12, 3, 0, 17, 0],
[ 0, 0, 0, 4, 0, 20],
[10, 14, 17, 0, 5, 0],
[ 0, 0, 0, 20, 0, 6]]),
'triclinic': np.array([[ 1, 7, 8, 9, 10, 11],
[ 7, 2, 12, 13, 14, 15],
[ 8, 12, 3, 16, 17, 18],
[ 9, 13, 16, 4, 19, 20],
[10, 14, 17, 19, 5, 21],
[11, 15, 18, 20, 21, 6 ]]),
}
def _dec(pattern):
# Decrease C_ij indices patterns by one.
# Used to make strain_patterns definitions below more readable.
return [(i-1, j-1) for (i,j) in pattern]
strain_patterns = {
'cubic': [
# strain pattern e1+e4, yields C11, C21, C31 and C44, then C12 is average of C21 and C31
[ np.array([1,0,0,1,0,0]), _dec([(1,1), (2,1), (3,1), (4,4)])]
],
'trigonal_high': [
# strain pattern e3 yield C13, C23 and C33
[ np.array([0,0,1,0,0,0]), _dec([(1,3), (2,3), (3,3)])],
# strain pattern e1+e4 yields C11 C21 C31 and C44
[ np.array([1,0,0,1,0,0]), _dec([(1,1), (2,1), (3,1), (4,4)])],
# strain pattern e1 yields C11 C21 C31 C41 C51
[ np.array([1,0,0,0,0,0]), _dec([(1,1), (2,1), (3,1), (4,1), (5,1)])],
# strain pattern e3+e4
[ np.array([0,0,1,1,0,0]), _dec([(3,3), (4,4)])]
],
'trigonal_low': [
# strain pattern e1, yields C11, C21, C31, C41, C51
[ np.array([1,0,0,0,0,0]), _dec([(1,1), (2,1), (3,1), (4,1), (5,1)])],
# strain pattern e3 + e4, yields C33, C44
[ np.array([0,0,1,1,0,0]), _dec([(3,3), (4,4)]) ],
[ np.array([0,0,0,0,0,1]), _dec([(6,6)]) ]
],
'tetragonal': [
# strain pattern e1+e4
[ np.array([1,0,0,1,0,0]), _dec([(1,1), (2,1), (3,1), (6,1), (4,4)]) ],
# strain pattern e3+e6
[ np.array([0,0,1,0,0,1]), _dec([(3,3), (6,6)]) ]
],
'orthorhombic': [
# strain pattern e1+e4
[ np.array([1,0,0,1,0,0]), _dec([(1,1), (2,1), (3,1), (4,4)]) ],
# strain pattern e2+e5
[ np.array([0,1,0,0,1,0]), _dec([(1,2), (2,2), (3,2), (5,5)]) ],
# strain pattern e3+e6
[ np.array([0,0,1,0,0,1]), _dec([(1,3), (2,3), (3,3), (6,6)]) ]
],
'monoclinic': [
# strain pattern e1+e4
[ np.array([1,0,0,1,0,0]), _dec([(1,1), (2,1), (3,1), (4,4), (5,1), (6,4)]) ],
# strain pattern e3+e6
[ np.array([0,0,1,0,0,1]), _dec([(1,3), (2,3), (3,3), (5,3), (4,6), (6,6)]) ],
# strain pattern e2
[ np.array([0,1,0,0,0,0]), _dec([(1,2), (2,2), (3,2), (5,2)]) ],
# strain pattern e5
[ np.array([0,0,0,0,1,0]), _dec([(1,5), (2,5), (3,5), (5,5)]) ]
],
'triclinic': [
[ np.array([1,0,0,0,0,0]), _dec([(1,1), (2,1), (3,1), (4,1), (5,1), (6,1)])],
[ np.array([0,1,0,0,0,0]), _dec([(1,2), (2,2), (3,2), (4,2), (5,2), (6,2)])],
[ np.array([0,0,1,0,0,0]), _dec([(1,3), (2,3), (3,3), (4,3), (5,3), (6,3)])],
[ np.array([0,0,0,1,0,0]), _dec([(1,4), (2,4), (3,4), (4,4), (5,4), (6,4)])],
[ np.array([0,0,0,0,1,0]), _dec([(1,5), (2,5), (3,5), (4,5), (5,5), (6,5)])],
[ np.array([0,0,0,0,0,1]), _dec([(1,6), (2,6), (3,6), (4,6), (5,6), (6,6)])],
]
}
Cij_symmetry['hexagonal'] = Cij_symmetry['trigonal_high']
Cij_symmetry[None] = Cij_symmetry['triclinic']
strain_patterns['hexagonal'] = strain_patterns['trigonal_high']
strain_patterns['tetragonal_high'] = strain_patterns['tetragonal_low'] = strain_patterns['tetragonal']
strain_patterns[None] = strain_patterns['triclinic']
def generate_strained_configs(at0, symmetry='triclinic', N_steps=5, delta=1e-2):
"""
Generate a sequence of strained configurations
Parameters
----------
a : ase.Atoms
Bulk crystal configuration - both unit cell and atomic positions
should be relaxed before calling this routine.
symmetry : string
Symmetry to use to determine which strain patterns are necessary.
Default is 'triclininc', i.e. no symmetry.
N_steps : int
Number of atomic configurations to generate for each strain pattern.
Default is 5. Absolute strain values range from -delta*N_steps/2
to +delta*N_steps/2.
delta : float
Strain increment for analytical derivatives of stresses. Default 1e-2
Returns
-------
Generator which yields a sequence of ase.Atoms instances corresponding
to the minima set of strained conifugurations required to evaluate the
full 6x6 C_ij matrix under the assumed symmetry.
"""
if not symmetry in strain_patterns:
raise ValueError('Unknown symmetry %s. Valid options are %s' % (symmetry, strain_patterns.keys()))
for pindex, (pattern, fit_pairs) in enumerate(strain_patterns[symmetry]):
for step in range(N_steps):
strain = np.where(pattern == 1, delta*(step+1-(N_steps+1)/2.0), 0.0)
at = at0.copy()
if at0.calc is not None:
at.calc = at0.calc
T = np.eye(3) + Voigt_6_to_full_3x3_strain(strain)
at.set_cell(np.dot(T, at.cell.T).T, scale_atoms=False)
at.positions[:] = np.dot(T, at.positions.T).T
at.info['strain'] = T - np.eye(3)
yield at
# Elastic constant calculation.
# Code adapted from elastics.py script, available from
# http://github.com/djw/elastic-constants
#
# Copyright (c) 2008, Dan Wilson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DAN WILSON ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL DAN WILSON BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def fit_elastic_constants(a, symmetry='triclinic', N_steps=5, delta=1e-2, optimizer=None,
verbose=True, graphics=False, logfile=None, **kwargs):
"""
Compute elastic constants by linear regression of stress vs. strain
Parameters
----------
a : ase.Atoms or list of ase.Atoms
Either a single atomic configuration or a list of strained
configurations. If a single configuration is given, it is
passed :func:`generate_strained_configs` along with `symmetry`, `N_steps`,
and `delta` to generate the set of strained configurations.
symmetry : string
Symmetry to use to determine which strain patterns are necessary.
Default is 'triclininc', i.e. no symmetry.
N_steps : int
Number of atomic configurations to generate for each strain pattern.
Default is 5. Absolute strain values range from -delta*N_steps/2
to +delta*N_steps/2.
delta : float
Strain increment for analytical derivatives of stresses.
Default is 1e-2.
optimizer : ase.optimizer.*
Optimizer to use for atomic positions (internal degrees of freedom)
for each applied strain. Initial config `a` is not optimised, and should
already have relaxed cell and atomic positions. Does not optimize atomic
positions if set to None.
verbose : bool
If True, print additional infomation about the quality of fit
and summarise results of C_ij and estimated errors. Default True.
graphics : bool
If True, use :mod:`matplotlib.pyplot` to plot the stress vs. strain
curve for each C_ij component fitted. Default True.
logfile : bool
Log file to write optimizer output to. Default None (i.e. suppress
output).
**kwargs : dict
Additional arguments to pass to `optimizer.run()` method e.g. `fmax`.
Returns
-------
C : array_like
6x6 matrix of the elastic constants in Voigt notation.
C_err : array_like
If scipy.stats module is available then error estimates for each C_ij
component are obtained from the accuracy of the linear regression.
Otherwise an array of np.zeros((6,6)) is returned.
Notes
-----
Code originally adapted from elastics.py script, available from
http://github.com/djw/elastic-constants
"""
if isinstance(a, Atoms):
if a.constraints:
raise ValueError('Atoms passed to fit_elastic_constants() '
'has constraints attached')
# we've been passed a single Atoms object: use it to generate
# set of strained configurations according to symmetry
strained_configs = generate_strained_configs(a, symmetry, N_steps, delta)
else:
# assume we've been passed a list of strained configs
strained_configs = a
if graphics:
import matplotlib.pyplot as plt
def do_fit(index1, index2, stress, strain, patt):
if verbose:
print('Fitting C_%d%d' % (index1+1, index2+1))
print('Strain %r' % strain[:,index2])
print('Stress %r GPa' % (stress[:,index1]/units.GPa))
if scipy_stats is not None:
cijFitted, intercept,r,tt,stderr = scipy_stats.linregress(strain[:,index2],stress[:,index1])
else:
cijFitted, intercept = np.polyfit(strain[:,index2],stress[:,index1], 1)
r, tt, stderr = 0.0, None, 0.0
if verbose:
# print info about the fit
print('Cij (gradient) / GPa : ',cijFitted/units.GPa)
if scipy_stats is not None:
print('Error in Cij / GPa : ', stderr/units.GPa)
if abs(r) > 0.9:
print('Correlation coefficient : ',r)
else:
print('Correlation coefficient : ',r, ' <----- WARNING')
else:
print('scipy.stats not available, no error estimation performed')
if graphics:
# position this plot in a 6x6 grid
sp = plt.subplot(6,6,6*index1+(index2+1))
sp.set_axis_on()
# change the labels on the axes
xlabels = sp.get_xticklabels()
plt.setp(xlabels,'rotation',90,fontsize=7)
ylabels = sp.get_yticklabels()
plt.setp(ylabels,fontsize=7)
# colour the plot depending on the strain pattern
colourDict = {0: '#BAD0EF', 1:'#FFCECE', 2:'#BDF4CB', 3:'#EEF093',4:'#FFA4FF',5:'#75ECFD'}
sp.set_facecolor(colourDict[patt])
# plot the data
plt.plot([strain[0,index2],strain[-1,index2]],
[(cijFitted*strain[0,index2]+intercept)/units.GPa,
(cijFitted*strain[-1,index2]+intercept)/units.GPa])
plt.plot(strain[:,index2],stress[:,index1]/units.GPa,'ro')
plt.xticks(strain[:,index2])
return cijFitted, stderr
if not symmetry in strain_patterns:
raise ValueError('Unknown symmetry %s. Valid options are %s' % (symmetry, strain_patterns.keys()))
# There are 21 independent elastic constants
Cijs = {}
Cij_err = {}
# Construct mapping from (i,j) to index into Cijs in range 1..21
# (upper triangle only to start with)
Cij_map = {}
Cij_map_sym = {}
for i in range(6):
for j in range(i,6):
Cij_map[(i,j)] = Cij_symmetry[None][i,j]
Cij_map_sym[(i,j)] = Cij_symmetry[symmetry][i,j]
# Reverse mapping, index 1..21 -> tuple (i,j) with i, j in range 0..5
Cij_rev_map = dict(zip(Cij_map.values(), Cij_map.keys()))
# Add the lower triangle to Cij_map, e.g. C21 = C12
for (i1,i2) in Cij_map.copy().keys():
Cij_map[(i2,i1)] = Cij_map[(i1,i2)]
Cij_map_sym[(i2,i1)] = Cij_map_sym[(i1,i2)]
N_pattern = len(strain_patterns[symmetry])
configs = iter(strained_configs)
strain = np.zeros((N_pattern, N_steps, 6))
stress = np.zeros((N_pattern, N_steps, 6))
if graphics:
fig = plt.figure(num=1, figsize=(9.5,8),facecolor='white')
fig.clear()
fig.subplots_adjust(left=0.07,right=0.97,top=0.97,bottom=0.07,wspace=0.5,hspace=0.5)
for index1 in range(6):
for index2 in range(6):
# position this plot in a 6x6 grid
sp = plt.subplot(6,6,6*(index1)+index2+1)
sp.set_axis_off()
plt.text(0.4,0.4, "n/a")
# Fill in strain and stress arrays from config Atoms list
for pattern_index, (pattern, fit_pairs) in enumerate(strain_patterns[symmetry]):
for step in range(N_steps):
at = next(configs)
if optimizer is not None:
optimizer(at, logfile=logfile).run(**kwargs)
strain[pattern_index, step, :] = full_3x3_to_Voigt_6_strain(at.info['strain'])
stress[pattern_index, step, :] = at.get_stress()
# Do the linear regression
for pattern_index, (pattern, fit_pairs) in enumerate(strain_patterns[symmetry]):
for (index1, index2) in fit_pairs:
fitted, err = do_fit(index1, index2,
stress[pattern_index,:,:],
strain[pattern_index,:,:],
pattern_index)
index = abs(Cij_map_sym[(index1, index2)])
if not index in Cijs:
if verbose:
print('Setting C%d%d (%d) to %f +/- %f' %
(index1+1, index2+1, index, fitted, err))
Cijs[index] = [fitted]
Cij_err[index] = [err]
else:
if verbose:
print('Updating C%d%d (%d) with value %f +/- %f' %
(index1+1, index2+1, index, fitted, err))
Cijs[index].append(fitted)
Cij_err[index].append(err)
if verbose:
print('\n')
C = np.zeros((6,6))
C_err = np.zeros((6,6))
C_labels = np.zeros((6,6),dtype='S4')
C_labels[:] = ' '
# Convert lists to mean
for k in Cijs:
Cijs[k] = np.mean(Cijs[k])
# Combine statistical errors
for k, v in Cij_err.items():
Cij_err[k] = np.sqrt(np.sum(np.array(v)**2))/np.sqrt(len(v))
if symmetry.startswith('trigonal'):
# Special case for trigonal lattice: C66 = (C11 - C12)/2
Cijs[Cij_map[(5,5)]] = 0.5*(Cijs[Cij_map[(0,0)]]-Cijs[Cij_map[(0,1)]])
Cij_err[Cij_map[(5,5)]] = np.sqrt(Cij_err[Cij_map[(0,0)]]**2 + Cij_err[Cij_map[(0,1)]]**2)
# Generate the 6x6 matrix of elastic constants
# - negative values signify a symmetry relation
for i in range(6):
for j in range(6):
index = Cij_symmetry[symmetry][i,j]
if index > 0:
C[i,j] = Cijs[index]
C_err[i,j] = Cij_err[index]
ii, jj = Cij_rev_map[index]
C_labels[i,j] = ' C%d%d' % (ii+1,jj+1)
C_err[i,j] = Cij_err[index]
elif index < 0:
C[i,j] = -Cijs[-index]
C_err[i,j] = Cij_err[-index]
ii, jj = Cij_rev_map[-index]
C_labels[i,j] = '-C%d%d' % (ii+1, jj+1)
if verbose:
print(np.array2string(C_labels).replace("'",""))
print('\n = \n')
print(np.array2string(C/units.GPa,
suppress_small=True,
precision=2))
print
# Summarise the independent components of C_ij matrix
printed = {}
for i in range(6):
for j in range(6):
index = Cij_symmetry[symmetry][i,j]
if index <= 0 or index in printed: continue
print('C_%d%d = %-4.2f +/- %-4.2f GPa' %
(i+1, j+1, C[i,j]/units.GPa, C_err[i,j]/units.GPa))
printed[index] = 1
return C, C_err
def youngs_modulus(C, l):
"""
Calculate approximate Youngs modulus E_l from 6x6 elastic constants matrix C_ij
This is the modulus for loading in the l direction. For the exact answer, taking
into account elastic anisotropuy, rotate the C_ij matrix to the correct frame,
compute the compliance matrix::
C = ... # 6x6 C_ij matrix in crystal frame
A = ... # rotation matrix
Cr = rotate_elastic_constants(C, A)
S = np.inv(Cr)
E_x = 1/S[0, 0] # Young's modulus for a pull in x direction
E_y = 1/S[1, 1] # Young's modulus for a pull in y direction
E_z = 1/S[0, 0] # Young's modulus for a pull in z direction
Notes
-----
Formula is from W. Brantley, Calculated elastic constants for stress problems associated
with semiconductor devices. J. Appl. Phys., 44, 534 (1973).
"""
S = inv(C) # Compliance matrix
lhat = l/norm(l) # Normalise directions
# Youngs modulus in direction l, ratio of stress sigma_l
# to strain response epsilon_l
E = 1.0/(S[0,0] - 2.0*(S[0,0]-S[0,1]-0.5*S[3,3])*(lhat[0]*lhat[0]*lhat[1]*lhat[1] +
lhat[1]*lhat[1]*lhat[2]*lhat[2] +
lhat[0]*lhat[0]*lhat[2]*lhat[2]))
return E
def poisson_ratio(C, l, m):
"""
Calculate approximate Poisson ratio \nu_{lm} from 6x6 elastic constant matrix C_{ij}
This is the response in `m` direction to pulling in `l` direction. Result is dimensionless.
Notes
-----
Formula is from W. Brantley, Calculated elastic constants for stress problems associated
with semiconductor devices. J. Appl. Phys., 44, 534 (1973).
"""
S = inv(C) # Compliance matrix
lhat = l/norm(l) # Normalise directions
mhat = m/norm(m)
# Poisson ratio v_lm: response in m direction to strain in
# l direction, v_lm = - epsilon_m/epsilon_l
v = -((S[0,1] + (S[0,0]-S[0,1]-0.5*S[3,3])*(lhat[0]*lhat[0]*mhat[0]*mhat[0] +
lhat[1]*lhat[1]*mhat[1]*mhat[1] +
lhat[2]*lhat[2]*mhat[2]*mhat[2])) /
(S[0,0] - 2.0*(S[0,0]-S[0,1]-0.5*S[3,3])*(lhat[0]*lhat[0]*lhat[1]*lhat[1] +
lhat[1]*lhat[1]*lhat[2]*lhat[2] +
lhat[0]*lhat[0]*lhat[2]*lhat[2])))
return v
def elastic_moduli(C, l=np.array([1, 0, 0]), R=None, tol=1e-6):
"""
Calculate elastic moduli from 6x6 elastic constant matrix C_{ij}.
The elastic moduli calculated are: Young's muduli, Poisson's ratios,
shear moduli, bulk mudulus and bulk mudulus tensor.
If a direction l is specified, the system is rotated to have it as its
x direction (see Notes for details). If R is specified the system is
rotated according to it.
Parameters
----------
C : array_like
6x6 stiffness matrix (Voigt notation).
l : array_like, optional
3D direction vector for pull (the default is the x direction
of the original system)
R : array_like, optional
3x3 rotation matrix.
Returns
-------
E : array_like
Young's modulus for a stress in each of the three directions
of the rotated system.
nu : array_like
3x3 matrix with Poisson's ratios.
Gm : array_like
3x3 matrix with shear moduli.
B : float
Bulk modulus.
K : array_like
3x3 matrix with bulk modulus tensor.
Other Parameters
----------------
tol : float, optional
tolerance for checking validity of rotation and comparison
of vectors.
Notes
---
It works by rotating the elastic constant tensor to the desired
direction, so it should be valid for arbitrary crystal structures.
If only l is specified there is an infinite number of possible
rotations. The chosen one is a rotation along the axis orthogonal
to the plane defined by the vectors (1, 0, 0) and l.
Bulk modulus tensor as defined in
O. Rand and V. Rovenski, "Analytical Methods in Anisotropic
Elasticity", Birkh\"auser (2005), pp. 71.
"""
if R is not None:
R = np.asarray(R)
# Is this a rotation matrix?
if np.sometrue(np.abs(np.dot(np.array(R), np.transpose(np.array(R))) -
np.eye(3, dtype=float)) > tol):
raise RuntimeError('Matrix *R* does not describe a rotation.')
else:
u_a = l/norm(l) # Normalise directions
u_b = np.array([1, 0, 0])
R = np.eye(3)
if np.abs(np.dot(u_a, u_b)) < 1.0 - tol:
u_v = np.cross(u_a, u_b)
u_v_mat = np.array([[ 0, -u_v[2], u_v[1]],
[ u_v[2], 0, -u_v[0]],
[-u_v[1], u_v[0], 0]])
R = R + u_v_mat + \
np.dot(u_v_mat, u_v_mat) * (1 - np.dot(u_a, u_b)) / \
np.linalg.norm(u_v)**2
Cr = rotate_elastic_constants(C, R)
S = np.linalg.inv(Cr)
# Young's modulus for a stress in \alpha direction; \alpha = x, y, z
E = np.zeros(3)
E[0] = 1/S[0, 0]
E[1] = 1/S[1, 1]
E[2] = 1/S[2, 2]
# Poisson's ratio ($\alpha$, $\beta$); $\alpha$, $\beta$ = x, y, z
nu = np.array([
[-1, -S[1, 0]/S[0, 0], -S[2, 0]/S[0, 0]],
[-S[0, 1]/S[1, 1], -1, -S[2, 1]/S[1, 1]],
[-S[0, 2]/S[2, 2], -S[1, 2]/S[2, 2], -1]
])
# Shear modulus ($\alpha$, $\beta$); $\alpha$, $\beta$ = x, y, z
G = np.zeros(3)
G[0] = 1/S[3, 3] # Shear modulus yz
G[1] = 1/S[4, 4] # Shear modulus zx
G[2] = 1/S[5, 5] # Shear modulus xy
Gm = np.array([
[E[0]/4, G[2], G[1]],
[G[2], E[1]/4, G[0]],
[G[1], G[0], E[2]/4]
])
# Bulk modulus
B = 1/np.sum(S[0:3, 0:3])
# Bulk modulus tensor
Crt = Voigt_6x6_to_full_3x3x3x3(Cr)
K = np.einsum('ijkk', Crt)
return E, nu, Gm, B, K
def nonaffine_elastic_contribution(atoms,
eigenvalues=None,
eigenvectors=None,
pc_parameters=None,
cg_parameters={
"x0": None,
"tol": 1e-5,
"maxiter": None,
"M": None,
"callback": None,
"atol": 1e-5}):
"""Compute the correction of non-affine displacements to the elasticity tensor.
The computation of the occuring inverse of the Hessian matrix is bypassed by
using a cg solver.
If eigenvalues and and eigenvectors are given the inverse of the Hessian can
be easily computed.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
eigenvalues: array
Eigenvalues in ascending order obtained by diagonalization of Hessian matrix.
If given, use eigenvalues and eigenvectors to compute non-affine contribution.
eigenvectors: array
Eigenvectors corresponding to eigenvalues.
cg_parameters: dict
Dictonary for the conjugate-gradient solver.
x0: {array, matrix}
Starting guess for the solution.
tol/atol: float, optional
Tolerances for convergence, norm(residual) <= max(tol*norm(b), atol).
maxiter: int
Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved.
M: {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A.
callback: function
User-supplied function to call after each iteration.
pc_parameters: dict
Dictonary for the incomplete LU decomposition of the Hessian.
A: array_like
Sparse matrix to factorize.
drop_tol: float
Drop tolerance for an incomplete LU decomposition.
fill_factor: float
Specifies the fill ratio upper bound.
drop_rule: str
Comma-separated string of drop rules to use.
permc_spec: str
How to permute the columns of the matrix for sparsity.
diag_pivot_thresh: float
Threshold used for a diagonal entry to be an acceptable pivot.
relax: int
Expert option for customizing the degree of relaxing supernodes.
panel_size: int
Expert option for customizing the panel size.
options: dict
Dictionary containing additional expert options to SuperLU.
"""
def _sym(C_abab):
"""Symmetrize Hooke tensor."""
symmetry_group = [(0, 1, 2, 3), (1, 0, 2, 3), (0, 1, 3, 2), (1, 0, 3, 2)]
return 0.25 * np.add.reduce([C_abab.transpose(s) for s in symmetry_group])
nat = len(atoms)
naforces_icab = atoms.calc.get_property('nonaffine_forces')
# No solve if eigenvalues are provided
if (eigenvalues is not None) and (eigenvectors is not None):
G_incc = (eigenvectors.T).reshape(-1, 3*nat, 1, 1) * naforces_icab.reshape(1, 3*nat, 3, 3)
G_incc = (G_incc.T/np.sqrt(eigenvalues)).T
G_icc = np.sum(G_incc, axis=1)
C_abab = np.sum(G_icc.reshape(-1,3,3,1,1) * G_icc.reshape(-1,1,1,3,3), axis=0)
return -_sym(C_abab) / atoms.get_volume()
H_nn = atoms.calc.get_property('hessian')
if pc_parameters is not None:
# Transform H to csc
H_nn = H_nn.tocsc()
# Compute incomplete LU
approx_Hinv = spilu(H_nn, **pc_parameters)
operator_Hinv = LinearOperator(H_nn.shape, approx_Hinv.solve)
cg_parameters["M"] = operator_Hinv
D_iab = np.zeros((3*nat, 3, 3))
for i in range(3):
for j in range(3):
x, info = cg(H_nn, naforces_icab[:, :, i, j].flatten(), **cg_parameters)
if info != 0:
print("info: ", info)
raise RuntimeError(" info > 0: CG tolerance not achieved, info < 0: Exceeded number of iterations.")
D_iab[:, i, j] = x
C_abab = np.sum(naforces_icab.reshape(3*nat, 3, 3, 1, 1) * D_iab.reshape(3*nat, 1, 1, 3, 3), axis=0)
return -_sym(C_abab) / atoms.get_volume()
| 45,227 | 34.584579 | 136 | py |
matscipy | matscipy-master/matscipy/opls.py | #
# Copyright 2016-2017, 2023 Andreas Klemenz (Fraunhofer IWM)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import ase
import ase.data
import ase.io
import ase.io.lammpsrun
import matscipy.neighbours
def twochar(name):
"""
Forces the length of the particle names to be two characters.
Parameters
----------
name : str
Particle name.
Returns
-------
str
Particle name with exactly 2 characters.
"""
if len(name) > 1:
return name[:2]
else:
return name + ' '
class LJQData(dict):
"""
Store Lennard-Jones parameters and charges for each particle type. In
the simplest version, each particle type has one set of Lennard-Jones
parameters, with geometric mixing applied between parameters of
different types. Parameters for individual pairs of particle types can
be specified in the 'lj_pairs' dictionary.
====== Example ======
Set the Lennard-Jones and Coulomb cutoffs to 12 and 8 Angstroms, geometric
mixing of the Lennard-Jones parameters for particles 'C1' and 'C2' and
between 'C2' and 'C3', custom parameters and cutoff for the interaction
between 'C1' and 'C3':
LJQData.lj_cutoff = 12.0
LJQData.c_cutoff = 8.0
LJQData['C1'] = [LJ-epsilon (eV), LJ-sigma (A), charge (e)]
LJQData['C2'] = [LJ-epsilon (eV), LJ-sigma (A), charge (e)]
LJQData['C3'] = [LJ-epsilon (eV), LJ-sigma (A), charge (e)]
LJQData.lj_pairs['C1-C3'] = [epsilon (eV), sigma (A), cutoff (A)]
====== End of example ======
"""
def __init__(self, args):
dict.__init__(self, args)
# default cutoffs
self.lj_cutoff = 10.0
self.c_cutoff = 7.4
self.lj_pairs = {}
class BondData:
"""
Store spring constants and equilibrium distances for harmonic potentials
and ensure correct handling of permutations. See documentation of the
LAMMPS 'bond_style harmonic' command for details.
"""
def __init__(self, name_value_hash=None):
if name_value_hash:
self.nvh = name_value_hash
self.set_names(name_value_hash.keys())
def set_names(self, names):
if not hasattr(self, 'names'):
self.names = set()
for name in names:
aname, bname = name.split('-')
name1 = twochar(aname) + '-' + twochar(bname)
name2 = twochar(bname) + '-' + twochar(aname)
if name1 not in self.names and name2 not in self.names:
self.names.add(name1)
def get_name(self, aname, bname):
name1 = twochar(aname) + '-' + twochar(bname)
name2 = twochar(bname) + '-' + twochar(aname)
if name1 in self.names:
return name1
elif name2 in self.names:
return name2
else:
return None
def name_value(self, aname, bname):
name1 = twochar(aname) + '-' + twochar(bname)
name2 = twochar(bname) + '-' + twochar(aname)
if name1 in self.nvh:
return name1, self.nvh[name1]
if name2 in self.nvh:
return name2, self.nvh[name2]
return None, None
def get_value(self, aname, bname):
return self.name_value(aname, bname)[1]
class CutoffList(BondData):
"""
Store cutoffs for pair interactions and ensure correct handling of
permutations. Cutoffs can be used to automatically find all interacting
atoms of an OPLSStructure object based on a simple distance criterion.
"""
def max(self):
return max(self.nvh.values())
class AnglesData:
"""
Store spring constants and equilibrium angles for harmonic potentials
and ensure correct handling of permutations. See documentation of the
LAMMPS 'angle_style harmonic' command for details.
"""
def __init__(self, name_value_hash=None):
if name_value_hash:
self.nvh = name_value_hash
self.set_names(name_value_hash.keys())
def set_names(self, names):
if not hasattr(self, 'names'):
self.names = set()
for name in names:
aname, bname, cname = name.split('-')
name1 = (twochar(aname) + '-' +
twochar(bname) + '-' +
twochar(cname))
name2 = (twochar(cname) + '-' +
twochar(bname) + '-' +
twochar(aname))
if name1 not in self.names and name2 not in self.names:
self.names.add(name1)
def add_name(self, aname, bname, cname):
if not hasattr(self, 'names'):
self.names = set()
name1 = (twochar(aname) + '-' +
twochar(bname) + '-' +
twochar(cname))
name2 = (twochar(cname) + '-' +
twochar(bname) + '-' +
twochar(aname))
if name1 not in self.names and name2 not in self.names:
self.names.add(name1)
def get_name(self, aname, bname, cname):
if not hasattr(self, 'names'):
return None
name1 = (twochar(aname) + '-' +
twochar(bname) + '-' +
twochar(cname))
name2 = (twochar(cname) + '-' +
twochar(bname) + '-' +
twochar(aname))
if name1 in self.names:
return name1
elif name2 in self.names:
return name2
else:
return None
def name_value(self, aname, bname, cname):
for name in [(twochar(aname) + '-' +
twochar(bname) + '-' +
twochar(cname)),
(twochar(cname) + '-' +
twochar(bname) + '-' +
twochar(aname))
]:
if name in self.nvh:
return name, self.nvh[name]
return None, None
class DihedralsData:
"""
Store energy constants for dihedral potentials and ensure correct handling
of permutations. See documentation of the LAMMPS 'dihedral_style opls'
command for details.
"""
def __init__(self, name_value_hash=None):
if name_value_hash:
self.nvh = name_value_hash
self.set_names(name_value_hash.keys())
def set_names(self, names):
if not hasattr(self, 'names'):
self.names = set()
for name in names:
aname, bname, cname, dname = name.split('-')
name1 = (twochar(aname) + '-' + twochar(bname) + '-' +
twochar(cname) + '-' + twochar(dname))
name2 = (twochar(dname) + '-' + twochar(cname) + '-' +
twochar(bname) + '-' + twochar(aname))
if name1 not in self.names and name2 not in self.names:
self.names.add(name1)
def add_name(self, aname, bname, cname, dname):
if not hasattr(self, 'names'):
self.names = set()
name1 = (twochar(aname) + '-' + twochar(bname) + '-' +
twochar(cname) + '-' + twochar(dname))
name2 = (twochar(dname) + '-' + twochar(cname) + '-' +
twochar(bname) + '-' + twochar(aname))
if name1 not in self.names and name2 not in self.names:
self.names.add(name1)
def get_name(self, aname, bname, cname, dname):
if not hasattr(self, 'names'):
return None
name1 = (twochar(aname) + '-' + twochar(bname) + '-' +
twochar(cname) + '-' + twochar(dname))
name2 = (twochar(dname) + '-' + twochar(cname) + '-' +
twochar(bname) + '-' + twochar(aname))
if name1 in self.names:
return name1
elif name2 in self.names:
return name2
else:
return None
def name_value(self, aname, bname, cname, dname):
for name in [(twochar(aname) + '-' + twochar(bname) + '-' +
twochar(cname) + '-' + twochar(dname)),
(twochar(dname) + '-' + twochar(cname) + '-' +
twochar(bname) + '-' + twochar(aname))
]:
if name in self.nvh:
return name, self.nvh[name]
return None, None
class OPLSStructure(ase.Atoms):
"""
Extension of the ASE Atoms class for non-reactive simulations.
"""
default_map = {
'BR': 'Br',
'Be': 'Be',
'C0': 'Ca',
'Li': 'Li',
'Mg': 'Mg',
'Al': 'Al',
'Ar': 'Ar',
}
def __init__(self, *args, **kwargs):
"""
Set a type for each atom to specify the interaction with its neighbors.
This enables atoms of the same element to have different interaction
potentials. During initialization, the types are initially derived from
the chemical symbols of the atoms.
Notes
-----
self._combinations
Angle lists are generated from neighbor lists. Assume an atom with the
number 2 has the three neighbors [4, 6, 9]. Then the following angles
with 2 in the middle are possible: (4-2-6), (4-2-9), (6-2-9) and the
equivalent angles (6-2-4), (9-2-4) and (9-2-6). self._combinations
contains predefined combinations of indices of the neighbor lists for
the most frequently occurring numbers of nearest neighbors. With these,
the list of occurring angles can be determined much faster than if the
combinations had to be calculated in each step.
"""
ase.Atoms.__init__(self, *args, **kwargs)
if len(self) == 0:
self.types = None
else:
types = np.array(self.get_chemical_symbols())
self.types = np.unique(types)
tags = np.zeros(len(self), dtype=int)
for itype, type_str in enumerate(self.types):
tags[types == type_str] = itype
self.set_tags(tags)
self._combinations = {}
self._combinations[0] = []
self._combinations[1] = []
self._combinations[2] = [(0, 1)]
self._combinations[3] = [(0, 1), (0, 2), (1, 2)]
self._combinations[4] = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3),
(2, 3)]
self._combinations[5] = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3),
(2, 3), (0, 4), (1, 4), (2, 4), (3, 4)]
self._combinations[6] = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3),
(2, 3), (0, 4), (1, 4), (2, 4), (3, 4),
(0, 5), (1, 5), (2, 5), (3, 5), (4, 5)]
self._combinations[7] = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3),
(2, 3), (0, 4), (1, 4), (2, 4), (3, 4),
(0, 5), (1, 5), (2, 5), (3, 5), (4, 5),
(0, 6), (1, 6), (2, 6), (3, 6), (4, 6),
(5, 6)]
self._combinations[8] = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3),
(2, 3), (0, 4), (1, 4), (2, 4), (3, 4),
(0, 5), (1, 5), (2, 5), (3, 5), (4, 5),
(0, 6), (1, 6), (2, 6), (3, 6), (4, 6),
(5, 6), (0, 7), (1, 7), (2, 7), (3, 7),
(4, 7), (5, 7), (6, 7)]
def _get_combinations(self, n):
"""
Fallback for a large number of neighbors for which the
possible combinations are not included in self._combinations
Parameters
----------
n : int
Number of next neighbors of an atom.
Returns
-------
list
See documentation of self._combinations for details.
"""
r = range(n)
i = np.tile(r, n)
j = np.repeat(r, n)
return zip(i[i < j], j[i < j])
def append(self, atom):
"""
Append atom to end.
Parameters
----------
atom : ase.Atoms
"""
self.extend(ase.Atoms([atom]))
def set_types(self, types):
"""
Set a type for each atom to specify the interaction with its
neighbors. This enables atoms of the same element to have different
interaction potentials.
Parameters
----------
types : list
A list of strings that specify the type of each atom.
"""
types = np.array(types)
self.types = np.unique(types)
tags = np.zeros(len(self), dtype=int)
for itype, type_str in enumerate(self.types):
tags[types == type_str] = itype
self.set_tags(tags)
def get_types(self):
"""
Returns a unique list of atom types.
Returns
-------
numpy.ndarray
Each element is a str with two characters.
"""
return self.types
def set_cutoffs(self, cutoffs):
"""
Add a CutoffList object to the structure. This allows the
'get_neighbors' method to find all interacting atoms of the structure
based on a simple distance criterion.
Parameters
----------
cutoffs : opls.CutoffList
"""
self.cutoffs = cutoffs
def get_neighbors(self):
"""
Find all atoms which might interact with each
other based on a simple distance criterion.
"""
atoms = ase.Atoms(self)
types = np.array(self.get_types())
tags = atoms.get_tags()
cut = self.cutoffs.max()
ni, nj, dr = matscipy.neighbours.neighbour_list('ijd', atoms, cut)
tags2cutoffs = np.full([len(types), len(types)], -1.)
for i, itype in enumerate(types):
for j, jtype in enumerate(types):
cutoff = self.cutoffs.get_value(itype, jtype)
if cutoff is not None:
tags2cutoffs[i, j] = self.cutoffs.get_value(itype, jtype)
cutoff_undef = np.where(tags2cutoffs < 0.)
if np.shape(cutoff_undef)[1] > 0:
for i in range(np.shape(cutoff_undef)[1]):
iname = types[cutoff_undef[0][i]]
jname = types[cutoff_undef[1][i]]
raise RuntimeError(f'Cutoff {iname}-{jname} not found')
cut = tags2cutoffs[tags[ni], tags[nj]]
self.ibond = ni[dr <= cut]
self.jbond = nj[dr <= cut]
def set_atom_data(self, atom_data):
"""
Set Lennard-Jones parameters and atomic charges. Notice that each
atom has exactly one set of Lennard-Jones parameters. Parameters
for interactions between different types of atoms are calculated
by geometric mixing. See documentation of the LAMMPS 'pair_modify'
command for details.
Parameters
----------
atom_data : dict
Dictionary containing Lennard-Jones parameters and charges for
each particle type. key: Particle type, one or two characters,
value: [LJ-epsilon, LJ-sigma, charge].
"""
self.atom_data = atom_data
def get_charges(self):
"""
Return an array of atomic charges. Same functionality as the
'get_charges' method of the 'ase.Atoms' class, but atomic charges
are taken from a user definition instead of the result of a
calculation.
Returns
-------
numpy.ndarray
"""
self.charges = np.zeros(len(self), dtype=float)
for i, itype in enumerate(self.types):
self.charges[self.get_tags() == i] = self.atom_data[itype][2]
return self.charges
def get_bonds(self, bonds=None):
"""
Returns an array of all bond types and
an array of all bonds in the system.
Parameters
----------
bonds : opls.BondData, optional
Returns
-------
bond_types : numpy.ndarray
Array of strings characterizing all present bond types.
Example: A system consists of particles with the types 'A1' and
'A2'. If all particles interact with each other, bond_types
will be ['A1-A1', 'A1-A2', 'A2-A2']. If there were no
interactions between the types 'A1' and 'A2', bond_types would
be ['A1-A1', 'A2-A2'].
bond_list : numpy.ndarray
bond_list.shape = (n, 3) where n is the number of particles in
the system. Contains arrays of 3 integers for each bond in the
system. First number: interaction type, index of bond_types,
second and third numbers: indicees of participating particles.
Example: A system consists of 3 particles of type 'AA', all
particles are interacting. bond_types would be ['AA-AA'] and
bond_list would be [[0, 0, 1], [0, 0, 2], [0, 1, 2]].
Raises
------
RuntimeError
If 'bonds' is present and bonds are found for which no
parameters are defined. In this case a warning a full list of
all affected bonds will be printed on STDOUT.
"""
types = np.array(self.get_types())
tags = self.get_tags()
if bonds is not None:
self.bonds = bonds
if not hasattr(self, 'ibond'):
self.get_neighbors()
ibond = self.ibond
jbond = self.jbond
# remove duplicates from neighbor list
mask = jbond <= ibond
ibond = ibond[mask]
jbond = jbond[mask]
tags2bond_names = np.full([len(types), len(types)], '', dtype=object)
for i, itype in enumerate(types):
for j, jtype in enumerate(types):
name = self.cutoffs.get_name(itype, jtype)
if name is None:
raise RuntimeError(f'Cutoff {itype}-{jtype} not found')
tags2bond_names[i, j] = name
names = tags2bond_names[tags[ibond], tags[jbond]]
self.bond_types = np.unique(names)
self.bond_list = np.empty([0, 3], dtype=int)
for n, bond_type in enumerate(self.bond_types):
mask = names == bond_type
bond_list_n = np.empty([len(np.where(mask)[0]), 3], dtype=int)
bond_list_n[:, 0] = n
bond_list_n[:, 1] = ibond[np.where(mask)]
bond_list_n[:, 2] = jbond[np.where(mask)]
self.bond_list = np.append(self.bond_list, bond_list_n, axis=0)
if hasattr(self, 'bonds'):
potential_unknown = False
for nb, bond_type in enumerate(self.bond_types):
itype, jtype = bond_type.split('-')
if self.bonds.get_value(itype, jtype) is None:
print('ERROR: Pair potential %s-%s not found' %
(itype, jtype))
print('List of affected bonds:')
mask = self.bond_list.T[0] == nb
print(self.bond_list[mask].T[1:].T)
potential_unknown = True
if potential_unknown:
raise RuntimeError('Undefined pair potentials.')
return self.bond_types, self.bond_list
def get_angles(self, angles=None):
"""
Returns an array of all angle types and
an array of all angles in the system.
Parameters
----------
angles : opls.BondData, optional
Returns
-------
ang_types : list
Array of strings characterizing all present angle types.
Example: A system consists of atoms of types 'A1' and 'A2',
all conceivable angle types are present in the system.
ang_types would be ['A1-A1-A1', 'A1-A1-A2', 'A1-A2-A1',
'A1-A2-A2', 'A2-A1-A2', 'A2-A2-A2'].
ang_list : list
len(ang_list) = n where n is the number of particles in the
system. Each list entry is a list of 4 integers, characterizing
the angles present in the system. Example: A system contains 7
atoms, (0,1,2) of type 'A1' and (3,4,5,6) of type 'A2'. If
there are angles between (0,1,2), (0,3,4) and (0,5,6), ang_list
would be ['A1-A1-A1', 'A2-A1-A2'] and ang list would be
[[0, 0, 1, 2], [1, 0, 3, 4], [1, 0, 5, 6]].
Raises
------
RuntimeError
If 'angles' is present and angles are found for which no
parameters are defined. In this case a warning a full list of
all affected angles will be printed on STDOUT.
"""
types = np.array(self.get_types())
tags = self.get_tags()
self.ang_list = []
self.ang_types = []
if not hasattr(self, 'ibond'):
self.get_neighbors()
ibond = self.ibond
jbond = self.jbond
if angles:
self.angles = angles
if not hasattr(self, 'angles'):
self.angles = AnglesData()
for itype in types:
for jtype in types:
for ktype in types:
self.angles.add_name(itype, jtype, ktype)
angles_undef = AnglesData()
angles_undef_lists = {}
for i in range(len(self)):
iname = types[tags[i]]
ineigh = jbond[ibond == i]
n_ineigh = np.shape(ineigh)[0]
if n_ineigh not in self._combinations.keys():
self._combinations[n_ineigh] = self._get_combinations(n_ineigh)
for nj, nk in self._combinations[len(ineigh)]:
j = ineigh[nj]
k = ineigh[nk]
jname = types[tags[j]]
kname = types[tags[k]]
name = self.angles.get_name(jname, iname, kname)
if hasattr(self, 'angles') and name is None:
# Angle found without matching parameter definition
# Add to list anyway to get meaningful error messages
if not angles_undef.get_name(jname, iname, kname):
angles_undef.add_name(jname, iname, kname)
angles_undef_lists[
angles_undef.get_name(jname, iname, kname)
] = [[j, i, k]]
else:
angles_undef_lists[
angles_undef.get_name(jname, iname, kname)
].append([j, i, k])
continue
if name not in self.ang_types:
self.ang_types.append(name)
self.ang_list.append([self.ang_types.index(name), j, i, k])
if len(angles_undef_lists) > 0:
for name in angles_undef_lists:
print('ERROR: Angular potential %s not found' % (name))
print('List of affected angles:')
for angle in angles_undef_lists[name]:
print(angle)
raise RuntimeError('Undefined angular potentials.')
return self.ang_types, self.ang_list
def get_dihedrals(self, dihedrals=None, full_output=False):
"""
Returns an array of all dihedral types and
an array of all dihedrals in the system.
Parameters
----------
dihedrals : opls.BondData, optional
full_output : bool, optional
Returns
-------
dih_types : list
Array of strings characterizing all present dihedral types.
Example: Consider a system consisting of one benzene molecule.
There are three possible types of dihedrals and dih_type would
be ['C-C-C-C', 'C-C-C-H', 'H-C-C-H'].
dih_list : list
len(dih_list) = n where n is the number of particles in the
system. Each list entry is a list of 5 integers, characterizing
the dihedrals present in the system. Example: Consider a system
consisting of one benzene molecule with the 'C' atoms
(0,1,2,3,4,5) and the 'H' atoms (6,7,8,9,10,11). dih_type would
be ['C-C-C-C', 'C-C-C-H', 'H-C-C-H'] and dih_list would be
[[0, 0, 1, 2, 3], [0, 1, 2, 3, 4], ... , [1, 6, 0, 1, 2],
[1, 7, 1, 2, 3], ... , [2, 6, 0, 1, 7], [2, 7, 1, 2, 8], ...].
Notes
-----
Prints a warning to STDOUT if dihedrals are found for which no
parameters are defined. If full_output is set, a complete list of
all affected dihedrals is printed, if not, an example is printed
for each dihedral type.
"""
types = self.get_types()
tags = self.get_tags()
if dihedrals:
self.dihedrals = dihedrals
if not hasattr(self, 'dihedrals'):
self.dihedrals = DihedralsData()
for itype in types:
for jtype in types:
for ktype in types:
for ltype in types:
self.dihedrals.add_name(itype, jtype, ktype, ltype)
self.dih_list = []
self.dih_types = []
if not hasattr(self, 'ibond'):
self.get_neighbors()
ibond = self.ibond
jbond = self.jbond
dihedrals_undef_lists = {}
for j, k in zip(ibond, jbond):
if j < k:
jname = types[tags[j]]
kname = types[tags[k]]
i_dihed = jbond[ibond == j]
l_dihed = jbond[ibond == k]
i_dihed = i_dihed[i_dihed != k]
l_dihed = l_dihed[l_dihed != j]
for i in i_dihed:
iname = types[tags[i]]
for l in l_dihed:
lname = types[tags[l]]
name = self.dihedrals.get_name(iname, jname,
kname, lname)
if hasattr(self, 'dihedrals') and not name:
# Dihedral found without matching parameter
# definition. Add to list anyway to get
# meaningful error messages.
name = (iname + '-' + jname + '-' +
kname + '-' + lname)
if name not in dihedrals_undef_lists:
dihedrals_undef_lists[name] = [[i, j, k, l]]
else:
dihedrals_undef_lists[name].append([i, j,
k, l])
continue
if name not in self.dih_types:
self.dih_types.append(name)
self.dih_list.append([self.dih_types.index(name),
i, j, k, l])
if len(dihedrals_undef_lists) > 0:
# "dihedrals_undef_lists" might contain duplicates,
# i.e. A-B-C-D and D-C-B-A. This could be avoided by
# using a "DihedralsData" object to store dihedral
# names, similar to the way the "AnglesData" object
# is used in the "get_angles()" method. For performance
# reasons, this is not done here.
for name in dihedrals_undef_lists:
print('WARNING: Dihedral potential %s not found' % (name))
if not full_output:
print('Example for affected atoms: %s' %
(str(dihedrals_undef_lists[name][0])))
else:
print('Full list of affected atoms:')
for dihed in dihedrals_undef_lists[name]:
print(dihed)
return self.dih_types, self.dih_list
| 28,475 | 35.743226 | 79 | py |
matscipy | matscipy-master/matscipy/drift.py | #
# Copyright 2017 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from matscipy.neighbours import mic
###
class RemoveDrift:
"""
Remove drift of the center of mass motion of an atomic system that may
occur in periodic simulations.
"""
def __init__(self, traj):
self.traj = traj
# Stores the global displacement vectors (shift vectors) required to
# move all positions back to original.
self.shifts = [np.zeros(3)]
def _fill_shifts_upto(self, i):
# Iterate up to frame i the full trajectory first and generate a list
# of displacement vectors.
while len(self.shifts) <= i:
j = len(self.shifts)
a0 = self.traj[j-1]
a1 = self.traj[j]
s0 = a0.get_scaled_positions()%1.0
s1 = a1.get_scaled_positions()%1.0
sdisps = mic(s1-s0, np.eye(3), pbc=a0.pbc)
self.shifts += [self.shifts[-1]+sdisps.mean(axis=0)]
def __getitem__(self, i=-1):
if i < 0:
i = len(self) + i
if i < 0 or i >= len(self):
raise IndexError('Trajectory index out of range.')
self._fill_shifts_upto(i)
a = self.traj[i]
a.set_scaled_positions(a.get_scaled_positions()%1.0-self.shifts[i])
return a
def __len__(self):
return len(self.traj)
| 2,109 | 30.029412 | 77 | py |
matscipy | matscipy-master/matscipy/cli/__init__.py | # ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2020) Johannes Hörmann, University of Freiburg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ====================================================================== | 926 | 47.789474 | 72 | py |
matscipy | matscipy-master/matscipy/cli/electrochemistry/c2d.py | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2019) Johannes Hoermann, University of Freiburg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""Generate discrete coordinate sets from continuous distributions.
Export as atom positions in .xyz of LAMMPS data file format.
Copyright 2019 IMTEK Simulation
University of Freiburg
Authors:
Johannes Hoermann <[email protected]>
Lukas Elflein <[email protected]>
"""
import argparse
import logging
import os
import os.path
import sys
import numpy as np
import ase
import ase.io
import scipy.constants as sc
from scipy import interpolate, integrate
from matscipy.electrochemistry import continuous2discrete
logger = logging.getLogger(__name__)
def main():
"""Generate discrete coordinate sets from continuous distributions.
Export as atom positions in .xyz of LAMMPS data file format.
Plot continuous and discrete distributions if wanted.
ATTENTION: LAMMPS data file export (atom style 'full') requires
ase>=3.19.0b1 (> 6th Nov 2019) due to recently reseolved issue"""
# in order to have both:
# * preformatted help text and ...
# * automatic display of defaults
class ArgumentDefaultsAndRawDescriptionHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class StoreAsNumpyArray(argparse._StoreAction):
def __call__(self, parser, namespace, values, option_string=None):
values = np.array(values, ndmin=1)
return super().__call__(parser, namespace, values, option_string)
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsAndRawDescriptionHelpFormatter)
parser.add_argument('infile', metavar='IN', nargs='?',
help='binary numpy .npz or plain text .txt input file')
parser.add_argument('outfile', metavar='OUT', nargs='?',
help='.xyz format output file')
parser.add_argument('--box', '-b', default=[50.0e-9, 50.0e-9, 100.0e-9], nargs=3,
action=StoreAsNumpyArray,
metavar=('X', 'Y', 'Z'), required=False, type=float,
dest="box", help='Box dimensions')
parser.add_argument('--names', default=['Na', 'Cl'], type=str, nargs='+',
metavar=('NAME'), required=False, dest="names",
help='Atom names')
parser.add_argument('--charges', default=[1, -1], type=float, nargs='+',
action=StoreAsNumpyArray,
metavar=('NAME'), required=False, dest="charges",
help='Atom charges')
parser.add_argument('--mol-id-offset', default=[-1, -1],
type=int, nargs='+',
action=StoreAsNumpyArray,
metavar=('OFFSET'), required=False,
dest="mol_id_offset",
help=('When storing as LAMMPS data, this tool uses'
' atom sytle "full", assigning a molecule id to'
' each atom. Per default, a unique 1-indexed'
' molecule id corrsponding to its atom id is'
' assigned to each atom. Specify a species-wise'
' offset here. Specifying 0 (zero) for each'
' species will result in moleucules containing '
' one atom per species.'))
# sampling
parser.add_argument('--ngridpoints', default=np.nan, type=float, nargs='+',
action=StoreAsNumpyArray,
metavar=('N'), required=False, dest="n_gridpoints",
help=('Number of grid points for discrete support. '
'Continuous support for all sampes per default. '
'Specify "NaN" explicitly for continuous support '
'in particular species, i.e. '
'"--n_gridpoints 100 NaN 50"'))
parser.add_argument('--sample-size', default=np.nan, type=float, nargs='+',
action=StoreAsNumpyArray,
metavar=('N'), required=False, dest="sample_size",
help=('Sample size. Specify '
'multiple values for specific number of atom '
'positions for each species. Per default, infer '
'sample size from distributions, assuming '
'concentrations in SI units (i.e. mM or mol / m^3).'
'Specify "NaN" explicitly for inference in certain '
'species only, i.e. '
'"--sample-size 100 NaN 50"'))
# output
parser.add_argument('--nbins', default=100, type=int,
metavar=('N'), required=False, dest="nbins",
help='Number of bins for histogram plots')
parser.add_argument('--debug', default=False, required=False,
action='store_true', dest="debug", help='debug flag')
parser.add_argument('--verbose', default=False, required=False,
action='store_true', dest="verbose", help='verbose flag')
parser.add_argument('--log', required=False, nargs='?', dest="log",
default=None, const='c2d.log', metavar='LOG',
help='Write log file c2d.log, optionally specify log file name')
try:
import argcomplete
argcomplete.autocomplete(parser)
# This supports bash autocompletion. To enable this, pip install
# argcomplete, activate global completion, or add
# eval "$(register-python-argcomplete lpad)"
# into your .bash_profile or .bashrc
except ImportError:
pass
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.verbose:
loglevel = logging.INFO
else:
loglevel = logging.WARNING
logformat = "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"
logging.basicConfig(level=loglevel,
format=logformat)
# explicitly modify the root logger (necessary?)
logger = logging.getLogger()
logger.setLevel(loglevel)
# remove all handlers
for h in logger.handlers:
logger.removeHandler(h)
# create and append custom handles
ch = logging.StreamHandler()
formatter = logging.Formatter(logformat)
ch.setFormatter(formatter)
ch.setLevel(loglevel)
logger.addHandler(ch)
if args.log:
fh = logging.FileHandler(args.log)
fh.setFormatter(formatter)
fh.setLevel(loglevel)
logger.addHandler(fh)
logger.info('This is `{}` : `{}`.'.format(__file__, __name__))
if not isinstance(args.box, np.ndarray):
args.box = np.array(args.box, ndmin=1)
if not isinstance(args.sample_size, np.ndarray):
args.sample_size = np.array(args.sample_size, ndmin=1)
if not isinstance(args.n_gridpoints, np.ndarray):
args.n_gridpoints = np.array(args.n_gridpoints, ndmin=1)
if not args.infile:
infile = sys.stdin
infile_format = '.txt'
else:
infile = args.infile
_, infile_format = os.path.splitext(infile)
if infile_format == '.npz':
file = np.load(infile)
x = file['x']
u = file['u']
c = file['c']
else: # elif infile_format == 'txt'
data = np.loadtxt(infile, unpack=True)
x = data[0, :]
u = data[1, :]
c = data[2:, :]
if c.ndim > 1:
C = [c[k, :] for k in range(c.shape[0])]
else:
C = [c]
del c
logger.info('Read {:d} concentration distributions.'.format(len(C)))
sample_size = args.sample_size
sample_size = (sample_size.repeat(len(C)) if sample_size.shape == (1,)
else sample_size)
# distribution functions from concentrations;
D = [interpolate.interp1d(x, c) for c in C]
# infer sample size from integral over concentration distribution if
# no explicit sample size given
# TODO: over-estimates sample size when distribution highly nonlinear
for i, s in enumerate(sample_size):
if np.isnan(s):
# average concentration in distribution over interval
cave, _ = integrate.quad(D[i], 0, args.box[-1]) / args.box[-1] # z direction
# [V] = m^3, [c] = mol / m^3, [N_A] = 1 / mol
sample_size[i] = int(np.round(args.box.prod()*cave * sc.Avogadro))
logger.info('Inferred {} samples on interval [{},{}] m'.format(
sample_size[i], 0, args.box[-1]))
logger.info('for average concentration {} mM.'.format(cave))
n_gridpoints = args.n_gridpoints # assume n_gridpoints is np.ndarray
n_gridpoints = n_gridpoints.repeat(len(C)) if n_gridpoints.shape == (1,) else n_gridpoints
logger.info('Generating {} positions on {} support for species {}.'.format(
sample_size, n_gridpoints, args.names))
logger.info('Generating structure from distribution ...')
struc = [continuous2discrete(
distribution=d,
box=args.box, count=sample_size[k],
n_gridpoints=n_gridpoints[k]) for k, d in enumerate(D)]
logger.info('Generated {:d} coordinate sets.'.format(len(struc)))
logger.info('Creating ase.Atom objects ...')
system = ase.Atoms(
cell=args.box/sc.angstrom,
pbc=[1, 1, 0])
# 1-indexed LAMMPS data molecule id within ASE's io module.
# We assume here that 'mol-id' is 0-indexed internally but converted to a
system.new_array('mol-id', [], dtype=int)
for i, s in enumerate(struc):
logger.info('{:d} samples in coordinate set {:d}.'.format(len(s), i))
new_species = ase.Atoms(
symbols=args.names[i]*int(sample_size[i]),
charges=[args.charges[i]]*int(sample_size[i]),
positions=s/sc.angstrom)
# per default, consecutive numbering, otherwise custom offest:
mol_id_offset = len(system) \
if args.mol_id_offset[i] < 0 else args.mol_id_offset[i]
# without offset, LAMMPS molecule ids are 1-indexed
# mol-id 0 is reserved for 'non-bonded' atoms
new_species.new_array(
'mol-id', 1 + mol_id_offset + np.arange(
start=0, stop=len(new_species), step=1, dtype=int), dtype=int)
system += new_species
logger.info('Writing output file ...')
if not args.outfile:
outfile = sys.stdout
outfile_format = '.xyz'
else:
outfile = args.outfile
_, outfile_format = os.path.splitext(outfile)
logger.info('Output format {} to {}.'.format(outfile_format, outfile))
if outfile_format == '.lammps':
ase.io.write(
outfile, system,
format='lammps-data',
units="real",
atom_style='full',
specorder=args.names)
# specorder shoudl make sure ASE assigns types in the same order as
# species names have been specified on command line.
else: # elif outfile_format == '.xyz'
ase.io.write(outfile, system, format='extxyz')
logger.info('Done.')
if __name__ == '__main__':
# Execute everything else
main()
| 12,367 | 38.514377 | 100 | py |
matscipy | matscipy-master/matscipy/cli/electrochemistry/pnp.py | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2019) Johannes Hoermann, University of Freiburg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""Solves 1D Poisson-Nernst-Planck system
Copyright 2019 IMTEK Simulation
University of Freiburg
Authors:
Johannes Hoermann <[email protected]>
"""
import argparse
import datetime
import logging
import os
import sys
import numpy as np
def main():
"""Solve Poisson-Nernst-Planck system and store distribution.
Specify quantities in SI units at this command line interface."""
# in order to have both:
# * preformatted help text and ...
# * automatic display of defaults
class ArgumentDefaultsAndRawDescriptionHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=ArgumentDefaultsAndRawDescriptionHelpFormatter)
parser.add_argument('outfile', metavar='OUT', default=None, nargs='?',
help='binary numpy .npz or plain text .txt output file')
# physical system parameters
parser.add_argument('--concentrations', '-c',
default=[0.1, 0.1], type=float, nargs='+',
metavar='c', required=False, dest="concentrations",
help='Ion species concentrations c (mol m^-3, mM)')
parser.add_argument('--charges', '-z',
default=[1, -1], type=float, nargs='+',
metavar='z', required=False, dest="charges",
help='Ion species number charges z')
parser.add_argument('--potential', '-u',
default=0.05, type=float,
metavar='U', required=False, dest="potential",
help='Potential drop from left to right dU (V)')
parser.add_argument('--length', '-l',
default=100.0e-9, type=float,
metavar='L', required=False, dest="length",
help='Domain length (m)')
parser.add_argument('--temperature', '-T',
default=298.15, type=float,
metavar='T', required=False, dest="temperature",
help='Temperature (K)')
parser.add_argument('--relative-permittivity', '--epsilon-r', '--eps',
default=79.0, type=float,
metavar='eps', required=False,
dest="relative_permittivity",
help='Relative permittivity')
parser.add_argument('--compact-layer', '--stern-layer', '--lambda-s',
default=0.0, type=float,
metavar='L', required=False,
dest="lambda_S",
help='Stern or compact layer thickness (for Robin BC)')
parser.add_argument('--boundary-conditions', '-bc',
default='cell', type=str,
metavar='BC', required=False,
dest="boundary_conditions",
choices=(
'interface', # open half-space
'cell', # 1D electorchemical cell with zero flux BC
'cell-stern', # 1D cell with linear compact layer regime
'cell-robin', # 1D cell with implict compact layer by Robin BC
),
help='Boundary conditions')
# technical settings
parser.add_argument('--segments', '-N',
default=200, type=int,
metavar='N', required=False,
dest="segments",
help='Number of discretization segments')
parser.add_argument('--maximum-iterations', '--maxit',
default=20, type=int,
metavar='N', required=False,
dest="maxit",
help='Maximum number of Newton iterations')
parser.add_argument('--absolute-tolerance',
default=1e-8, type=float,
metavar='e', required=False,
dest="absolute_tolerance",
help='Absolute tolerance Newton solver convergence criterion')
parser.add_argument('--convergence-stats', default=False, required=False,
action='store_true', dest="convergence",
help='Record and store Newton solver convergence statistics')
parser.add_argument('--debug', default=False, required=False,
action='store_true', dest="debug", help='debug flag')
parser.add_argument('--verbose', default=False, required=False,
action='store_true', dest="verbose", help='verbose flag')
parser.add_argument('--log', required=False, nargs='?', dest="log",
default=None, const='pnp.log', metavar='LOG',
help='Write log file pnp.log, optionally specify log file name')
try:
import argcomplete
argcomplete.autocomplete(parser)
# This supports bash autocompletion. To enable this, pip install
# argcomplete, activate global completion, or add
# eval "$(register-python-argcomplete lpad)"
# into your .bash_profile or .bashrc
except ImportError:
pass
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.verbose:
loglevel = logging.INFO
else:
loglevel = logging.WARNING
# PoissonNernstPlanckSystem makes extensive use of Python's logging module
logformat = "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"
logging.basicConfig(level=loglevel,
format=logformat)
# explicitly modify the root logger (necessary?)
logger = logging.getLogger()
logger.setLevel(loglevel)
# remove all handlers
for h in logger.handlers:
logger.removeHandler(h)
# create and append custom handles
ch = logging.StreamHandler()
formatter = logging.Formatter(logformat)
ch.setFormatter(formatter)
ch.setLevel(loglevel)
logger.addHandler(ch)
if args.log:
fh = logging.FileHandler(args.log)
fh.setFormatter(formatter)
fh.setLevel(loglevel)
logger.addHandler(fh)
# use FEniCS finite element solver if available,
# otherwise own controlled volume scheme
try:
import fenics
from matscipy.electrochemistry.poisson_nernst_planck_solver_fenics \
import PoissonNernstPlanckSystemFEniCS as PoissonNernstPlanckSystem
import dolfin
dolfin.cpp.log.set_log_level(loglevel)
logger.info("Will use FEniCS finite element solver.")
except ModuleNotFoundError:
logger.warning(
"No FEniCS finite element solver found,"
" falling back to internal controlled-volume implementation."
" ATTENTION: Number conservation not exact.")
from matscipy.electrochemistry import PoissonNernstPlanckSystem
# set up system
pnp = PoissonNernstPlanckSystem(
c=np.array(args.concentrations, dtype=float),
z=np.array(args.charges, dtype=float),
L=float(args.length),
T=float(args.temperature),
delta_u=float(args.potential),
lambda_S=float(args.lambda_S),
N=args.segments,
maxit=args.maxit,
e=args.absolute_tolerance,
relative_permittivity=float(args.relative_permittivity))
if args.boundary_conditions in ('cell-robin') and float(args.lambda_S) > 0:
pnp.useSternLayerCellBC()
elif ((args.boundary_conditions in ('cell-robin') and float(args.lambda_S) == 0)
or args.boundary_conditions == 'cell'):
pnp.useStandardCellBC()
elif args.boundary_conditions == 'interface':
pnp.useStandardInterfaceBC()
else:
raise ValueError("Boundary conditions '{}' not implemented!".format(
args.boundary_conditions))
pnp.solve()
extra_kwargs = {}
if args.convergence:
extra_kwargs.update({
'convergence_step_absolute': pnp.convergenceStepAbsolute,
'convergence_step_relative': pnp.convergenceStepRelative,
'convergence_residual_absolute': pnp.convergenceResidualAbsolute})
if not args.outfile:
outfile = sys.stdout
format = 'txt'
else:
outfile = args.outfile
_, format = os.path.splitext(outfile)
if format == '.npz':
np.savez(file=outfile,
x=pnp.grid, u=pnp.potential, c=pnp.concentration, **extra_kwargs)
else: # elif format == '.txt'
comment = '\n'.join((
'Poisson-Nernst-Planck system, generated on {:s}, {:s}'.format(
os.uname().nodename,
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
'All quantities in SI units.',
'grid x (m), potential u (V), concentration c (mM)'))
header = comment + '\n' + '{:20s} {:22s} '.format('x', 'u') + ' '.join(
['{:22s}'.format('c{:02d}'.format(k)) for k in range(pnp.M)])
data = np.column_stack([pnp.grid, pnp.potential, pnp.concentration.T])
np.savetxt(outfile, data, fmt='%22.15e', header=header)
# write out final state as usual, but mark process failed if not converged
# if not pnp.converged:
# sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| 10,487 | 38.577358 | 91 | py |
matscipy | matscipy-master/matscipy/cli/electrochemistry/__init__.py | # ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2020) Johannes Hörmann, University of Freiburg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ====================================================================== | 926 | 47.789474 | 72 | py |
matscipy | matscipy-master/matscipy/cli/electrochemistry/stericify.py | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2019) Johannes Hoermann, University of Freiburg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""
Applies steric correction to coordiante sample.
Copyright 2020 IMTEK Simulation
University of Freiburg
Authors:
Johannes Hoermann <[email protected]>
Examples:
Apply one steric radius for all species:
stericify.py --verbose --radii 2.0 -- input.lammps output.lammps
Apply per-species steric radii (for a system of supposedly two species):
stericify.py --verbose --radii 2.0 5.0 -- input.lammps output.lammps
Overrides other parameters otherwise infered from input file:
stericify.py --verbose --box 48 48 196 --names Na Cl --charges 1 -1 \\
--radii 2.0 5.0 -- input.lammps output.lammps
"""
import logging
import os
import sys
import time
import ase
import ase.io
import numpy as np
try:
import json
import urllib.parse
except ImportError:
pass
from matscipy.electrochemistry.steric_correction import apply_steric_correction
from matscipy.electrochemistry.steric_correction import scipy_distance_based_closest_pair
def main():
"""Applies steric correction to coordiante sample. Assures a certain
minimum pairwiese distance between points in sample and
between points and box boundary.
ATTENTION: LAMMPS data file export (atom style 'full') requires ase>3.20.0
"""
logger = logging.getLogger()
import argparse
# in order to have both:
# * preformatted help text and ...
# * automatic display of defaults
class ArgumentDefaultsAndRawDescriptionHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class StoreAsNumpyArray(argparse._StoreAction):
def __call__(self, parser, namespace, values, option_string=None):
values = np.array(values, ndmin=1)
return super().__call__(
parser, namespace, values, option_string)
class StoreAsDict(argparse._StoreAction):
def __call__(self, parser, namespace, value, option_string=None):
if 'json' not in sys.modules or 'urllib' not in sys.modules:
raise ModuleNotFoundError(
"Modules 'json' and 'urllib' required for parsing dicts.")
try:
parsed_value = json.loads(urllib.parse.unquote(value))
except json.decoder.JSONDecodeError as exc:
logger.error("Failed parsing '{}'".format(value))
raise exc
return super().__call__(
parser, namespace, parsed_value, option_string)
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsAndRawDescriptionHelpFormatter)
parser.add_argument('infile', metavar='IN', nargs='?',
help='.xyz or .lammps (LAMMPS data) format input file')
parser.add_argument('outfile', metavar='OUT', nargs='?',
help='.xyz or .lammps (LAMMPS data) format output file')
parser.add_argument('--radii', '-r', default=[2.0], type=float, nargs='+',
action=StoreAsNumpyArray,
metavar=('R'), required=False, dest="radii",
help=('Steric radii, either one for all or '
'species-wise. Same units as distances in input.'))
parser.add_argument('--box', '-b', default=None, nargs=3,
action=StoreAsNumpyArray,
metavar=('X', 'Y', 'Z'), required=False, type=float,
dest="box", help=('Bounding box, overrides cell from'
'input. Same units as distances in input.'))
parser.add_argument('--names', default=None, type=str, nargs='+',
metavar=('NAME'), required=False, dest="names",
help='Atom names, overrides names from input')
parser.add_argument('--charges', default=None, type=float, nargs='+',
action=StoreAsNumpyArray,
metavar=('Z'), required=False, dest="charges",
help='Atom charges, overrides charges from input')
parser.add_argument('--method', type=str,
metavar=('METHOD'), required=False,
dest="method",
default='L-BFGS-B',
help='Scipy minimizer')
parser.add_argument('--options', type=str,
action=StoreAsDict,
metavar=('JSON DICT'), required=False,
dest="options",
default={
'gtol': 1.e-12,
'ftol': 1.e-12,
'maxiter': 100,
'disp': False,
},
help=(
'Convergence options for scipy minimier.'
' Pass as JSON-formatted key:value dict. See'
' https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html'
' for minimizer-specific options.'))
parser.add_argument('--debug', default=False, required=False,
action='store_true', dest="debug", help='debug flag')
parser.add_argument('--verbose', default=False, required=False,
action='store_true', dest="verbose",
help='verbose flag')
parser.add_argument('--log', required=False, nargs='?', dest="log",
default=None, const='c2d.log', metavar='LOG',
help=(
'Write log file c2d.log, optionally specify log'
' file name'))
try:
import argcomplete
argcomplete.autocomplete(parser)
# This supports bash autocompletion.
# To enable this, 'pip install argcomplete',
# then activate global completion.
except ImportError:
pass
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.verbose:
loglevel = logging.INFO
else:
loglevel = logging.WARNING
# logformat = ''.join(("%(asctime)s",
# "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"))
logformat = "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"
logging.basicConfig(level=loglevel,
format=logformat)
# explicitly modify the root logger (necessary?)
logger = logging.getLogger()
logger.setLevel(loglevel)
# remove all handlers
for h in logger.handlers:
logger.removeHandler(h)
# create and append custom handles
ch = logging.StreamHandler()
formatter = logging.Formatter(logformat)
ch.setFormatter(formatter)
ch.setLevel(loglevel)
logger.addHandler(ch)
if args.log:
fh = logging.FileHandler(args.log)
fh.setFormatter(formatter)
fh.setLevel(loglevel)
logger.addHandler(fh)
logger.info('This is `{}` : `{}`.'.format(__file__, __name__))
logger.debug('Args: {}'.format(args))
# input validation
if not args.infile:
infile = sys.stdin
infile_format = 'xyz'
else:
infile = args.infile
_, infile_format = os.path.splitext(infile)
logger.info('Input format {} from {}.'.format(infile_format, infile))
if infile_format == '.lammps':
system = ase.io.read(
infile, format='lammps-data', units="real", style='full')
else: # elif outfile_format == '.xyz'
system = ase.io.read(infile, format='extxyz')
n = len(system) # total number of particles
logger.info('Read "{}" system within bounding box'.format(system.symbols))
for l in system.cell:
logger.info(' [ {:> 8.2e},{:> 8.2e},{:> 8.2e} ]'.format(*l))
species_atomic_numbers = np.unique(system.get_atomic_numbers())
species_symbols = [
ase.data.chemical_symbols[i] for i in species_atomic_numbers]
n_species = len(species_atomic_numbers)
logger.info(' containing {:d} particles of {:d} species'.format(
n, n_species))
if not isinstance(args.radii, np.ndarray):
args.radii = np.array(args.radii, ndmin=1)
r = np.zeros(n)
if len(args.radii) == 1:
logger.info('Applying steric radius r = {:.2e} to all species.'.format(
args.radii[0]))
r[:] = args.radii[0]
elif len(args.radii) == n_species:
for i, (a, s) in enumerate(zip(species_atomic_numbers, species_symbols)):
logger.info(
'Applying steric radius r = {:.2e} for species {:s}.'.format(
args.radii[i], s))
r[system.get_atomic_numbers() == a] = args.radii[i]
else:
raise ValueError(
"""Steric radii must either be one value for all species or one value
per species, i.e. {:d} values in your case.""".format(n_species))
if args.box is not None:
if not isinstance(args.box, np.ndarray):
args.box = np.array(args.box, ndmin=1)
logger.info('Box specified on command line')
logger.info(' [ {:> 8.2e},{:> 8.2e},{:> 8.2e} ]'.format(*args.box))
logger.info('overides box from input data.')
system.set_cell(args.box)
if args.charges is not None:
logger.info('Charges specified on command line reassign input charges to')
new_charges = np.ndarray(n, dtype=int)
for a, s, c in zip(species_atomic_numbers, species_symbols, args.charges):
logger.info(
' {:s} -> {}'.format(s, c))
new_charges[system.get_atomic_numbers() == a] = c
system.set_initial_charges(new_charges)
if args.names is not None:
new_species_symbols = args.names
new_species_atomic_numbers = [
ase.data.atomic_numbers[name] for name in new_species_symbols]
logger.info('Species specified on command line reassign input species to')
new_atomic_numbers = np.ndarray(n, dtype=int)
for aold, anew, sold, snew in zip(species_atomic_numbers,
new_species_atomic_numbers,
species_symbols, new_species_symbols):
logger.info(
' {:s} -> {:s}'.format(sold, snew))
new_atomic_numbers[system.get_atomic_numbers() == aold] = anew
system.set_atomic_numbers(new_atomic_numbers)
species_atomic_numbers = new_species_atomic_numbers
species_symbols = new_species_symbols
specorder = args.names # assure type ordering as specified on cmdline
else:
specorder = None # keep ordering as is
# prepare for minimization
x0 = system.get_positions()
# only works for orthogonal box
box3 = np.array(system.get_cell_lengths_and_angles())[0:3]
box6 = np.array([[0., 0., 0], box3]) # needs lower corner
# n = x0.shape[0], set above
# dim = x0.shape[1]
# benchmakr methods
mindsq, (p1, p2) = scipy_distance_based_closest_pair(x0)
pmin = np.min(x0, axis=0)
pmax = np.max(x0, axis=0)
mind = np.sqrt(mindsq)
logger.info("Minimum pair-wise distance in initial sample: {}".format(mind))
logger.info("First sample point in pair: ({:8.4e},{:8.4e},{:8.4e})".format(*p1))
logger.info("Second sample point in pair ({:8.4e},{:8.4e},{:8.4e})".format(*p2))
logger.info("Box lower boundary: ({:8.4e},{:8.4e},{:8.4e})".format(*box6[0]))
logger.info("Minimum coordinates in sample: ({:8.4e},{:8.4e},{:8.4e})".format(*pmin))
logger.info("Maximum coordinates in sample: ({:8.4e},{:8.4e},{:8.4e})".format(*pmax))
logger.info("Box upper boundary: ({:8.4e},{:8.4e},{:8.4e})".format(*box6[1]))
t0 = time.perf_counter()
x1, res = apply_steric_correction(x0, box=box6, r=r,
method=args.method, options=args.options)
# use default method and options
t1 = time.perf_counter()
dt = t1 - t0
logger.info("{} s runtime".format(dt))
mindsq, (p1, p2) = scipy_distance_based_closest_pair(x1)
mind = np.sqrt(mindsq)
pmin = np.min(x1, axis=0)
pmax = np.max(x1, axis=0)
logger.info("Finished with status = {}, success = {}, #it = {}".format(
res.status, res.success, res.nit))
logger.info(" message = '{}'".format(res.message))
logger.info("Minimum pair-wise distance in final configuration: {:8.4e}".format(mind))
logger.info("First sample point in pair: ({:8.4e},{:8.4e},{:8.4e})".format(*p1))
logger.info("Second sample point in pair ({:8.4e},{:8.4e},{:8.4e})".format(*p2))
logger.info("Box lower boundary: ({:8.4e},{:8.4e},{:8.4e})".format(*box6[0]))
logger.info("Minimum coordinates in sample: ({:8.4e},{:8.4e},{:8.4e})".format(*pmin))
logger.info("Maximum coordinates in sample: ({:8.4e},{:8.4e},{:8.4e})".format(*pmax))
logger.info("Box upper boundary: ({:8.4e},{:8.4e},{:8.4e})".format(*box6[1]))
diff = x1 - x0
n_diff = np.count_nonzero(diff)
diffnorm = np.linalg.norm(diff)
logger.info(
'{:d} coords. differ numerically in final and initial config.'.format(
n_diff
))
logger.info('Norm of difference between final and initial config')
logger.info(' || x1 - x0 || = {:.4e}'.format(diffnorm))
system.set_positions(x1)
if not args.outfile:
outfile = sys.stdout
outfile_format = '.xyz'
else:
outfile = args.outfile
_, outfile_format = os.path.splitext(outfile)
logger.info('Output format {} to {}.'.format(outfile_format, outfile))
if outfile_format == '.lammps':
ase.io.write(
outfile, system,
format='lammps-data', units="real", atom_style='full',
specorder=specorder)
else: # elif outfile_format == '.xyz'
ase.io.write(outfile, system, format='extxyz')
logger.info('Done.')
if __name__ == '__main__':
# Execute everything else
main()
| 15,150 | 38.048969 | 112 | py |
matscipy | matscipy-master/matscipy/fracture_mechanics/crackpathsel.py | #
# Copyright 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
function coordination(r, cutoff, transition_width)
if r > cutoff
f = 0.0
df = 0.0
elseif r > cutoff-transition_width
f = 0.5 * ( cos(pi*(r-cutoff+transition_width)/transition_width) + 1.0 )
df = - 0.5 * pi * sin(pi*(r-cutoff+transition_width)/transition_width) / transition_width
else
f = 1.0
df = 0.0
end
f
end
function dcoordination(r, cutoff, transition_width)
if r > cutoff
df = 0.0
elseif r > cutoff-transition_width
df = - 0.5 * pi * sin(pi*(r-cutoff+transition_width)/transition_width) / transition_width
else
df = 0.0
end
df
end
function energy(pos, neighb_j, neighb_rij, cutoff, transition_width, epsilon)
N = size(pos, 2)
n = zeros(Float64, N)
energies = zeros(Float64, N)
for i = 1:N
for (m, j) in enumerate(neighb_j[i])
r_ij = neighb_rij[i][m]
#@printf("i %d j %d r_ij %f\n", i, j, r_ij)
r_ij > cutoff && continue
f_ij = coordination(r_ij, cutoff, transition_width)
n[i] += f_ij
end
energies[i] += (n[i] - 3)^2
end
for i = 1:N
sum_B_ij = 0.0
for (m, j) in enumerate(neighb_j[i])
r_ij = neighb_rij[i][m]
r_ij > cutoff && continue
f_ij = coordination(r_ij, cutoff, transition_width)
B_ij = (n[j] - 3.0)^2*f_ij
sum_B_ij += B_ij
end
Eb_i = epsilon*(n[i] - 3.0)^2*sum_B_ij
energies[i] += Eb_i
end
E = sum(energies)
return (E, energies, n)
end
function force(pos, neighb_j, neighb_rij, cutoff, transition_width, epsilon, dx)
N = size(pos, 2)
f = zeros(Float64, (3, N))
p = zeros(Float64, (3, N))
p[:, :] = pos
for i = 1:N
for j = 1:3
p[j, i] += dx
ep, local_e_p, n_p = energy(p, neighb_j, neighb_rij, cutoff, transition_width, epsilon)
p[j, i] -= 2dx
em, local_e_m, n_m = energy(p, neighb_j, neighb_rij, cutoff, transition_width, epsilon)
f[j, i] = -(ep - em)/(2dx)
p[j, i] += dx
end
end
f
end
"""
| 2,974 | 27.605769 | 100 | py |
matscipy | matscipy-master/matscipy/fracture_mechanics/crack.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2014-2016, 2018, 2020-2021 James Kermode (Warwick U.)
# 2015-2017 Punit Patel (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import math
import warnings
import time
import numpy as np
from numpy.linalg import inv
try:
from scipy.optimize import brentq, leastsq, minimize, root
from scipy.sparse import csc_matrix, spdiags
from scipy.sparse.linalg import spsolve, spilu, LinearOperator
except ImportError:
warnings.warn('Warning: no scipy')
import ase.units as units
from ase.optimize.precon import Exp
from matscipy.atomic_strain import atomic_strain
from matscipy.elasticity import (rotate_elastic_constants,
rotate_cubic_elastic_constants,
Voigt_6_to_full_3x3_stress)
from matscipy.surface import MillerDirection, MillerPlane
from matscipy.neighbours import neighbour_list
###
# Constants
PLANE_STRAIN = 'plane strain'
PLANE_STRESS = 'plane stress'
MPa_sqrt_m = 1e6*units.Pascal*np.sqrt(units.m)
###
class RectilinearAnisotropicCrack:
"""
Near field solution for a crack in a rectilinear anisotropic elastic medium.
See:
G. C. Sih, P. C. Paris and G. R. Irwin, Int. J. Frac. Mech. 1, 189 (1965)
"""
def __init__(self):
self.a11 = None
self.a22 = None
self.a12 = None
self.a16 = None
self.a26 = None
self.a66 = None
def set_plane_stress(self, a11, a22, a12, a16, a26, a66):
self.a11 = a11
self.a22 = a22
self.a12 = a12
self.a16 = a16
self.a26 = a26
self.a66 = a66
self._init_crack()
def set_plane_strain(self, b11, b22, b33, b12, b13, b23, b16, b26, b36,
b66):
self.a11 = b11 - (b13*b13)/b33
self.a22 = b22 - (b23*b23)/b33
self.a12 = b12 - (b13*b23)/b33
self.a16 = b16 - (b13*b36)/b33
self.a26 = b26 - (b23*b36)/b33
self.a66 = b66
self._init_crack()
def _init_crack(self):
"""
Initialize dependent parameters.
"""
p = np.poly1d( [ self.a11, -2*self.a16, 2*self.a12+self.a66,
-2*self.a26, self.a22 ] )
mu1, mu2, mu3, mu4 = p.r
if mu1 != mu2.conjugate():
raise RuntimeError('Roots not in pairs.')
if mu3 != mu4.conjugate():
raise RuntimeError('Roots not in pairs.')
self.mu1 = mu1
self.mu2 = mu3
self.p1 = self.a11*(self.mu1**2) + self.a12 - self.a16*self.mu1
self.p2 = self.a11*(self.mu2**2) + self.a12 - self.a16*self.mu2
self.q1 = self.a12*self.mu1 + self.a22/self.mu1 - self.a26
self.q2 = self.a12*self.mu2 + self.a22/self.mu2 - self.a26
self.inv_mu1_mu2 = 1/(self.mu1 - self.mu2)
self.mu1_p2 = self.mu1 * self.p2
self.mu2_p1 = self.mu2 * self.p1
self.mu1_q2 = self.mu1 * self.q2
self.mu2_q1 = self.mu2 * self.q1
def displacements(self, r, theta, k):
"""
Displacement field in mode I fracture. Positions are passed in cylinder
coordinates.
Parameters
----------
r : array_like
Distances from the crack tip.
theta : array_like
Angles with respect to the plane of the crack.
k : float
Stress intensity factor.
Returns
-------
u : array
Displacements parallel to the plane of the crack.
v : array
Displacements normal to the plane of the crack.
"""
h1 = k * np.sqrt(2.0*r/math.pi)
h2 = np.sqrt( np.cos(theta) + self.mu2*np.sin(theta) )
h3 = np.sqrt( np.cos(theta) + self.mu1*np.sin(theta) )
u = h1*( self.inv_mu1_mu2*( self.mu1_p2*h2 - self.mu2_p1*h3 ) ).real
v = h1*( self.inv_mu1_mu2*( self.mu1_q2*h2 - self.mu2_q1*h3 ) ).real
return u, v
def deformation_gradient(self, r, theta, k):
"""
Deformation gradient tensor in mode I fracture. Positions are passed in
cyclinder coordinates.
Parameters
----------
r : array_like
Distances from the crack tip.
theta : array_like
Angles with respect to the plane of the crack.
k : float
Stress intensity factor.
Returns
-------
du_dx : array
Derivatives of displacements parallel to the plane within the plane.
du_dy : array
Derivatives of displacements parallel to the plane perpendicular to
the plane.
dv_dx : array
Derivatives of displacements normal to the plane of the crack within
the plane.
dv_dy : array
Derivatives of displacements normal to the plane of the crack
perpendicular to the plane.
"""
f = k / np.sqrt(2*math.pi*r)
h1 = (self.mu1*self.mu2)*self.inv_mu1_mu2
h2 = np.sqrt( np.cos(theta) + self.mu2*np.sin(theta) )
h3 = np.sqrt( np.cos(theta) + self.mu1*np.sin(theta) )
du_dx = f*( self.inv_mu1_mu2*( self.mu1_p2/h2 - self.mu2_p1/h3 ) ).real
du_dy = f*( h1*( self.p2/h2 - self.p1/h3 ) ).real
dv_dx = f*( self.inv_mu1_mu2*( self.mu1_q2/h2 - self.mu2_q1/h3 ) ).real
dv_dy = f*( h1*( self.q2/h2 - self.q1/h3 ) ).real
# We need to add unity matrix to turn this into the deformation gradient
# tensor.
du_dx += np.ones_like(du_dx)
dv_dy += np.ones_like(dv_dy)
return np.transpose([[du_dx, du_dy], [dv_dx, dv_dy]])
def stresses(self, r, theta, k):
"""
Stress field in mode I fracture. Positions are passed in cylinder
coordinates.
Parameters
----------
r : array_like
Distances from the crack tip.
theta : array_like
Angles with respect to the plane of the crack.
k : float
Stress intensity factor.
Returns
-------
sig_x : array
Diagonal component of stress tensor parallel to the plane of the
crack.
sig_y : array
Diagonal component of stress tensor normal to the plane of the
crack.
sig_xy : array
Off-diagonal component of the stress tensor.
"""
f = k / np.sqrt(2.0*math.pi*r)
h1 = (self.mu1*self.mu2)*self.inv_mu1_mu2
h2 = np.sqrt( np.cos(theta) + self.mu2*np.sin(theta) )
h3 = np.sqrt( np.cos(theta) + self.mu1*np.sin(theta) )
sig_x = f*(h1*(self.mu2/h2 - self.mu1/h3)).real
sig_y = f*(self.inv_mu1_mu2*(self.mu1/h2 - self.mu2/h3)).real
sig_xy = f*(h1*(1/h3 - 1/h2)).real
return sig_x, sig_y, sig_xy
def _f(self, theta, v):
h2 = ( cos(theta) + self.mu2*sin(theta) )**0.5
h3 = ( cos(theta) + self.mu1*sin(theta) )**0.5
return v - ( self.mu1_p2 * h2 - self.mu2_p1 * h3 ).real/ \
( self.mu1_q2 * h2 - self.mu2_q1 * h3 ).real
def rtheta(self, u, v, k):
"""
Invert displacement field in mode I fracture, i.e. compute r and theta
from displacements.
"""
# u/v = (self.mu1_p2*h2 - self.mu2_p1*h3)/(self.mu1_q2*h2-self.mu2_q1*h3)
theta = brentq(self._f, -pi, pi, args=(u/v))
h1 = k * sqrt(2.0*r/math.pi)
h2 = ( cos(theta) + self.mu2*sin(theta) )**0.5
h3 = ( cos(theta) + self.mu1*sin(theta) )**0.5
sqrt_2_r = ( self.inv_mu1_mu2 * ( self.mu1_p2 * h2 - self.mu2_p1 * h3 ) ).real/k
r1 = sqrt_2_r**2/2
sqrt_2_r = ( self.inv_mu1_mu2 * ( self.mu1_q2 * h2 - self.mu2_q1 * h3 ) ).real/k
r2 = sqrt_2_r**2/2
return ( (r1+r2)/2, theta )
def k1g(self, surface_energy):
"""
K1G, Griffith critical stress intensity in mode I fracture
"""
return math.sqrt(-4*surface_energy / \
(self.a22*
((self.mu1+self.mu2)/(self.mu1*self.mu2)).imag))
def k1gsqG(self):
return -2/(self.a22*((self.mu1+self.mu2)/(self.mu1*self.mu2)).imag)
###
def displacement_residuals(r0, crack, x, y, ref_x, ref_y, k, power=1):
"""
Return actual displacement field minus ideal displacement field,
divided by r**alpha.
"""
x0, y0 = r0
u1x = x - ref_x
u1y = y - ref_y
dx = ref_x - x0
dy = ref_y - y0
abs_dr = np.sqrt(dx*dx+dy*dy)
theta = np.arctan2(dy, dx)
u2x, u2y = crack.displacements_from_cylinder_coordinates(abs_dr, theta, k)
if abs(power) < 1e-12:
power_of_abs_dr = 1.0
else:
power_of_abs_dr = abs_dr**power
return (u1x - u2x)/power_of_abs_dr, (u1y - u2y)/power_of_abs_dr
def displacement_residual(r0, crack, x, y, ref_x, ref_y, k, mask=None, power=1):
dux, duy = displacement_residuals(r0, crack, x, y, ref_x, ref_y, k,
power=power)
if mask is None:
return np.transpose([dux, duy]).flatten()
else:
return np.transpose([dux[mask], duy[mask]]).flatten()
def deformation_gradient_residuals(r0, crack, x, y, cur, ref_x, ref_y, ref, k,
cutoff):
"""
Return actual displacement field minus ideal displacement field.
"""
x0, y0 = r0
cur = cur.copy()
cur.set_positions(np.transpose([x, y, cur.positions[:, 2]]))
ref = ref.copy()
ref.set_positions(np.transpose([ref_x, ref_y, ref.positions[:, 2]]))
F1, res1 = atomic_strain(cur, ref, cutoff=cutoff)
# F1 is a 3x3 tensor, we throw away all z-components
[F1xx, F1yx, F1zx], [F1xy, F1yy, F1zy], [F1xz, F1yz, F1zz] = F1.T
F1 = np.array([[F1xx, F1xy], [F1yx, F1yy]]).T
F2 = crack.deformation_gradient(ref.positions[:, 0], ref.positions[:, 1],
x0, y0, k)
return F1-F2
def deformation_gradient_residual(r0, crack, x, y, cur, ref_x, ref_y, ref, k,
cutoff, mask=None):
dF = deformation_gradient_residuals(r0, crack, x, y, cur, ref_x, ref_y, ref,
k, cutoff)
if mask is None:
return dF.flatten()
else:
#return (dF[mask]*dF[mask]).sum(axis=2).sum(axis=1)
return dF[mask].flatten()
###
class CubicCrystalCrack:
"""
Crack in a cubic crystal.
"""
def __init__(self, crack_surface, crack_front, C11=None, C12=None,
C44=None, stress_state=PLANE_STRAIN, C=None, Crot=None):
"""
Initialize a crack in a cubic crystal with elastic constants C11, C12
and C44 (or optionally a full 6x6 elastic constant matrix C).
The crack surface is given by crack_surface, the cracks runs
in the plane given by crack_front.
"""
# x (third_dir) - direction in which the crack is running
# y (crack_surface) - free surface that forms due to the crack
# z (crack_front) - direction of the crack front
third_dir = np.cross(crack_surface, crack_front)
third_dir = np.array(third_dir) / np.sqrt(np.dot(third_dir,
third_dir))
crack_surface = np.array(crack_surface) / \
np.sqrt(np.dot(crack_surface, crack_surface))
crack_front = np.array(crack_front) / \
np.sqrt(np.dot(crack_front, crack_front))
A = np.array([third_dir, crack_surface, crack_front])
if np.linalg.det(A) < 0:
third_dir = -third_dir
A = np.array([third_dir, crack_surface, crack_front])
if Crot is not None:
C6 = Crot
elif C is not None:
C6 = rotate_elastic_constants(C, A)
else:
C6 = rotate_cubic_elastic_constants(C11, C12, C44, A)
self.crack = RectilinearAnisotropicCrack()
S6 = inv(C6)
self.C = C6
self.S = S6
if stress_state == PLANE_STRESS:
self.crack.set_plane_stress(S6[0, 0], S6[1, 1], S6[0, 1],
S6[0, 5], S6[1, 5], S6[5, 5])
elif stress_state == PLANE_STRAIN:
self.crack.set_plane_strain(S6[0, 0], S6[1, 1], S6[2, 2],
S6[0, 1], S6[0, 2], S6[1, 2],
S6[0, 5], S6[1, 5], S6[2, 5],
S6[5, 5])
def k1g(self, surface_energy):
"""
Compute Griffith critical stress intensity in mode I fracture.
Parameters
----------
surface_energy : float
Surface energy of the respective crystal surface.
Returns
-------
k1g : float
Stress intensity factor.
"""
return self.crack.k1g(surface_energy)
def k1gsqG(self):
return self.crack.k1gsqG()
def displacements_from_cylinder_coordinates(self, r, theta, k):
"""
Displacement field in mode I fracture from cylindrical coordinates.
"""
return self.crack.displacements(r, theta, k)
def displacements_from_cartesian_coordinates(self, dx, dy, k):
"""
Displacement field in mode I fracture from cartesian coordinates.
"""
abs_dr = np.sqrt(dx*dx+dy*dy)
theta = np.arctan2(dy, dx)
return self.displacements_from_cylinder_coordinates(abs_dr, theta, k)
def displacements(self, ref_x, ref_y, x0, y0, k):
"""
Displacement field for a list of cartesian positions.
Parameters
----------
ref_x : array_like
x-positions of the reference crystal.
ref_y : array_like
y-positions of the reference crystal.
x0 : float
x-coordinate of the crack tip.
y0 : float
y-coordinate of the crack tip.
k : float
Stress intensity factor.
Returns
-------
ux : array_like
x-displacements.
uy : array_like
y-displacements.
"""
dx = ref_x - x0
dy = ref_y - y0
return self.displacements_from_cartesian_coordinates(dx, dy, k)
def deformation_gradient_from_cylinder_coordinates(self, r, theta, k):
"""
Displacement field in mode I fracture from cylindrical coordinates.
"""
return self.crack.deformation_gradient(r, theta, k)
def deformation_gradient_from_cartesian_coordinates(self, dx, dy, k):
"""
Displacement field in mode I fracture from cartesian coordinates.
"""
abs_dr = np.sqrt(dx*dx+dy*dy)
theta = np.arctan2(dy, dx)
return self.deformation_gradient_from_cylinder_coordinates(abs_dr, theta, k)
def deformation_gradient(self, ref_x, ref_y, x0, y0, k):
"""
Deformation gradient for a list of cartesian positions.
Parameters
----------
ref_x : array_like
x-positions of the reference crystal.
ref_y : array_like
y-positions of the reference crystal.
x0 : float
x-coordinate of the crack tip.
y0 : float
y-coordinate of the crack tip.
k : float
Stress intensity factor.
Returns
-------
ux : array_like
x-displacements.
uy : array_like
y-displacements.
"""
dx = ref_x - x0
dy = ref_y - y0
return self.deformation_gradient_from_cartesian_coordinates(dx, dy, k)
def crack_tip_position(self, x, y, ref_x, ref_y, x0, y0, k, mask=None,
residual_func=displacement_residual,
method='Powell', return_residuals=False):
"""
Return an estimate of the real crack tip position by minimizing the
mean square error of the current configuration relative to the
displacement field obtained for a stress intensity factor k from linear
elastic fracture mechanics.
Parameters
----------
x : array_like
x-positions of the atomic system containing the crack.
y : array_like
y-positions of the atomic system containing the crack.
ref_x : array_like
x-positions of the reference crystal.
ref_y : array_like
y-positions of the reference crystal.
x0 : float
Initial guess for the x-coordinate of the crack tip.
y0 : float
Initial guess for the y-coordinate of the crack tip.
k : float
Stress intensity factor.
mask : array_like, optional
Marks the atoms to use for this calculation.
residual_func : function
Function returning per-atom residuals to be minimized.
method : str
Optimization method. See method argument of scipy.optimize.minimize.
Additionally, 'leastsq' invokes scipy.optimize.leastsq.
return_residuals : bool
Function returns residuals if set to True.
Returns
-------
x0 : float
x-coordinate of the crack tip.
y0 : float
y-coordinate of the crack tip.
residuals : array, optional
Per-atom residuals at end of optimization.
"""
if mask is None:
mask = np.ones(len(x), dtype=bool)
if method == 'leastsq':
(x1, y1), ier = leastsq(residual_func, (x0, y0),
args=(self, x, y, ref_x, ref_y, k, mask))
if ier not in [ 1, 2, 3, 4 ]:
raise RuntimeError('Could not find crack tip')
else:
opt = minimize(lambda *args: (residual_func(*args)**2).sum(),
(x0, y0), args=(self, x, y, ref_x, ref_y, k, mask),
method=method)
if not opt.success:
raise RuntimeError('Could not find crack tip. Reason: {}'
.format(opt.message))
x1, y1 = opt.x
if return_residuals:
return x1, y1, residual_func((x0, y0), self, x, y, ref_x, ref_y, k)
else:
return x1, y1
def _residual_y(self, y0, x0, x, y, ref_x, ref_y, k, mask):
dux, duy = self.displacement_residuals(x, y, ref_x, ref_y, x0, y0, k)
return dux[mask]*dux[mask]+duy[mask]*duy[mask]
def crack_tip_position_y(self, x, y, ref_x, ref_y, x0, y0, k, mask=None):
"""
Return an estimate of the y-coordinate of the real crack tip position
assuming the stress intensity factor is k.
Parameters
----------
x : array_like
x-positions of the atomic system containing the crack.
y : array_like
y-positions of the atomic system containing the crack.
ref_x : array_like
x-positions of the reference crystal.
ref_y : array_like
y-positions of the reference crystal.
x0 : float
Initial guess for the x-coordinate of the crack tip.
y0 : float
Initial guess for the y-coordinate of the crack tip.
k : float
Stress intensity factor.
mask : array_like, optional
Marks the atoms to use for this calculation.
Returns
-------
y0 : float
y-coordinate of the crack tip
"""
if mask is None:
mask = np.ones(len(a), dtype=bool)
( y0, ), ier = leastsq(self._residual_y, y0,
args=(x0, x, y, ref_x, ref_y, k, mask))
if ier not in [ 1, 2, 3, 4 ]:
raise RuntimeError('Could not find crack tip')
return y0
def scale_displacements(self, x, y, ref_x, ref_y, old_k, new_k):
"""
Rescale atomic positions from stress intensity factor old_k to the new
stress intensity factor new_k. This is useful for extrapolation of
relaxed positions.
Parameters
----------
x : array_like
x-positions of the atomic system containing the crack.
y : array_like
y-positions of the atomic system containing the crack.
ref_x : array_like
x-positions of the reference crystal.
ref_y : array_like
y-positions of the reference crystal.
old_k : float
Current stress intensity factor.
new_k : float
Stress intensity factor for the output positions.
Returns
-------
x : array_like
New x-positions of the atomic system containing the crack.
y : array_like
New y-positions of the atomic system containing the crack.
"""
return ref_x + new_k/old_k*(x-ref_x), ref_y + new_k/old_k*(y-ref_y)
def stresses_from_cylinder_coordinates(self, r, theta, k):
"""
Stress field in mode I fracture from cylindrical coordinates
"""
return self.crack.stresses(r, theta, k)
def stresses_from_cartesian_coordinates(self, dx, dy, k):
"""
Stress field in mode I fracture from cartesian coordinates
"""
abs_dr = np.sqrt(dx*dx+dy*dy)
theta = np.arctan2(dy, dx)
return self.stresses_from_cylinder_coordinates(abs_dr, theta, k)
def stresses(self, ref_x, ref_y, x0, y0, k):
"""
Stress field for a list of cartesian positions
Parameters
----------
ref_x : array_like
x-positions of the reference crystal.
ref_y : array_like
y-positions of the reference crystal.
x0 : float
x-coordinate of the crack tip.
y0 : float
y-coordinate of the crack tip.
k : float
Stress intensity factor.
Returns
-------
sig_x : array_like
xx-component of the stress tensor
sig_y : array_like
yy-component of the stress tensor
sig_xy : array_like
xy-component of the stress tensor
"""
dx = ref_x - x0
dy = ref_y - y0
sig_x, sig_y, sig_xy = self.stresses_from_cartesian_coordinates(dx, dy, k)
return sig_x, sig_y, sig_xy
class SinclairCrack:
"""
Flexible boundary conditions for a Mode I crack as described in
Sinclair, J. E. The Influence of the Interatomic Force Law and of Kinks on the
Propagation of Brittle Cracks. Philos. Mag. 31, 647–671 (1975)
"""
def __init__(self, crk, cryst, calc, k, alpha=0.0, vacuum=6.0,
variable_alpha=True, variable_k=False,
alpha_scale=None, k_scale=None,
extended_far_field=False):
"""
Parameters
----------
crk - instance of CubicCrystalCrack
cryst - Atoms object representing undeformed crystal
calc - ASE-compatible calculator
k - stress intensity factor
alpha - crack tip position
vacuum - amount of vacuum to add to unit cell
variable_alpha - if True, crack tip position can vary (flexible BCs)
variable_k - if True, stress intensity factor can vary
(needed for arc-length continuation)
extended_far_field - if True, crack tip force includes region III contrib
"""
self.crk = crk # instance of CubicCrystalCrack
self.cryst = cryst # Atoms object representing crystal
self.regionI = self.cryst.arrays['region'] == 1
self.regionII = self.cryst.arrays['region'] == 2
self.regionIII = self.cryst.arrays['region'] == 3
self.regionIV = self.cryst.arrays['region'] == 4
self.regionI_II = self.regionI | self.regionII
self.N1 = self.regionI.sum()
self.N2 = self.N1 + self.regionII.sum()
self.N3 = self.N2 + self.regionIII.sum()
self.calc = calc
self.vacuum = vacuum
self.variable_alpha = variable_alpha
self.variable_k = variable_k
self.alpha_scale = alpha_scale
self.k_scale = k_scale
self.extended_far_field = extended_far_field
self.u = np.zeros((self.N1, 3))
self.alpha = alpha
self.k = k
# check atoms are sorted by distance from centre so we can use N1,N2,N3
tip_x = cryst.cell.diagonal()[0] / 2.0
tip_y = cryst.cell.diagonal()[1] / 2.0
x = cryst.positions[:, 0]
y = cryst.positions[:, 1]
self.r = np.sqrt((x - tip_x) ** 2 + (y - tip_y) ** 2)
if not np.all(np.diff(self.r) >= 0):
warnings.warn('Radial distances do not increase monotonically!')
self.atoms = self.cryst.copy()
self.update_atoms() # apply CLE displacements for initial (alpha, k)
a0 = self.atoms.copy()
self.x0 = a0.get_positions()
self.E0 = self.calc.get_potential_energies(a0)[self.regionI_II].sum()
a0_II_III = a0[self.regionII | self.regionIII]
f0bar = self.calc.get_forces(a0_II_III)
self.f0bar = f0bar[a0_II_III.arrays['region'] == 2]
self.precon = None
self.precon_count = 0
def pack(self, u, alpha, k):
dofs = list(u.reshape(-1))
if self.variable_alpha:
dofs.append(alpha)
if self.variable_k:
dofs.append(k)
return np.array(dofs)
def unpack(self, x, reshape=False, defaults=None):
assert len(x) == len(self)
if defaults is None:
defaults = {}
u = x[:3 * self.N1]
if reshape:
u = u.reshape(self.N1, 3)
offset = 3 * self.N1
if self.variable_alpha:
alpha = float(x[offset])
offset += 1
else:
alpha = defaults.get('alpha', self.alpha)
if self.variable_k:
k = float(x[offset])
else:
k = defaults.get('k', self.k)
return u, alpha, k
def get_dofs(self):
return self.pack(self.u, self.alpha, self.k)
def set_dofs(self, x):
self.u[:], self.alpha, self.k = self.unpack(x, reshape=True)
self.update_atoms()
def __len__(self):
N_dofs = 3 * self.regionI.sum()
if self.variable_alpha:
N_dofs += 1
if self.variable_k:
N_dofs += 1
return N_dofs
def u_cle(self, alpha=None):
"""
Returns CLE displacement solution at current (alpha, K)
Note that this is NOT pre-multipled by the stress intensity factor k
"""
if alpha is None:
alpha = self.alpha
tip_x = self.cryst.cell.diagonal()[0] / 2.0 + alpha
tip_y = self.cryst.cell.diagonal()[1] / 2.0
ux, uy = self.crk.displacements(self.cryst.positions[:, 0],
self.cryst.positions[:, 1],
tip_x, tip_y, 1.0)
u = np.c_[ux, uy, np.zeros_like(ux)] # convert to 3D field
return u
def fit_cle(self, r_fit=20.0, variable_alpha=True, variable_k=True, x0=None,
grid=None):
def residuals(x, mask):
idx = 0
if variable_alpha:
alpha = x[idx]
idx += 1
else:
alpha = self.alpha
if variable_k:
k = x[idx]
idx += 1
else:
k = self.k
u = np.zeros((len(self.atoms), 3))
u[self.regionI] = self.u
du = (self.k * self.u_cle(self.alpha) + u - k * self.u_cle(alpha))
return du[mask, :].reshape(-1)
mask = self.r < r_fit
if x0 is None:
x0 = []
if variable_alpha:
x0.append(self.alpha)
if variable_k:
x0.append(self.k)
if grid:
alpha_grid, k_grid = grid
vals = np.zeros((len(alpha_grid), len(k_grid)))
for i, alpha in enumerate(alpha_grid):
for j, k in enumerate(k_grid):
vals[i, j] = (residuals([alpha, k], mask) ** 2).sum()
i_min, j_min = np.unravel_index(vals.argmin(), vals.shape)
return alpha_grid[i_min], k_grid[j_min]
else:
res, ier = leastsq(residuals, x0, args=(mask,))
if ier not in [1, 2, 3, 4]:
raise RuntimeError('CLE fit failed')
return res
def update_atoms(self):
"""
Update self.atoms from degrees of freedom (self.u, self.alpha, self.k)
"""
self.atoms.set_pbc([False, False, True])
self.atoms.calc = self.calc
self.atoms.info['k'] = self.k
self.atoms.info['alpha'] = self.alpha
# x = x_cryst + K * u_cle + u
self.atoms.positions[:, :] = self.cryst.positions
self.atoms.positions[:, :] += self.k * self.u_cle()
self.atoms.positions[self.regionI, :] += self.u
# add vacuum
self.atoms.cell = self.cryst.cell
self.atoms.cell[0, 0] += self.vacuum
self.atoms.cell[1, 1] += self.vacuum
def set_atoms(self, atoms):
N1_in = (atoms.arrays['region'] == 1).sum()
if 'alpha' in atoms.info:
self.alpha = atoms.info['alpha']
else:
self.alpha = 0.0
self.k = atoms.info['k']
self.u[:] = np.zeros(3, self.N1)
self.update_atoms() # now we have same u_cle in atoms and self.atoms
min_len = min(N1_in, self.N1)
# FIXME this assumes stable sort order for atoms and self.atoms
u = atoms.positions[:min_len] - self.atoms.positions[:min_len]
shift = np.diag(self.atoms.cell)/2 - np.diag(atoms.cell)/2
u += shift
self.u[:min_len] = u
self.update_atoms()
def get_crack_tip_force(self, forces=None, mask=None):
# V_alpha = -\nabla_1 U_CLE(alpha)
tip_x = self.cryst.cell.diagonal()[0] / 2.0 + self.alpha
tip_y = self.cryst.cell.diagonal()[1] / 2.0
dg = self.crk.deformation_gradient(self.cryst.positions[:, 0],
self.cryst.positions[:, 1],
tip_x, tip_y, self.k)
V = np.zeros((len(self.cryst), 3))
V[:, 0] = -(dg[:, 0, 0] - 1.0)
V[:, 1] = -(dg[:, 0, 1])
# eps = 1e-5
# V_fd = np.zeros((len(self.cryst), 3))
# u, v = self.crk.displacements(self.cryst.positions[:, 0],
# self.cryst.positions[:, 1],
# tip_x, tip_y, self.k)
# xp = self.cryst.positions[:, 0].copy()
# for i in range(len(self.cryst)):
# xp[i] += eps
# up, vp = self.crk.displacements(xp,
# self.cryst.positions[:, 1],
# tip_x, tip_y, self.k)
# xp[i] -= eps
# V_fd[i, 0] = - (up[i] - u[i]) / eps
# V_fd[i, 1] = - (vp[i] - v[i]) / eps
#
# print('|V - V_fd|', np.linalg.norm(V - V_fd, np.inf))
if forces is None:
forces = self.atoms.get_forces()
if mask is None:
mask = self.regionII
if self.extended_far_field:
mask = self.regionII | self.regionIII
return np.tensordot(forces[mask, :], V[mask, :])
def get_xdot(self, x1, x2, ds=None):
u1, alpha1, k1 = self.unpack(x1)
u2, alpha2, k2 = self.unpack(x2)
if ds is None:
# for the first step, assume kdot = 1.0
udot = (u2 - u1) / (k2 - k1)
alphadot = (alpha2 - alpha1) / (k2 - k1)
kdot = 1.0
else:
udot = (u2 - u1) / ds
alphadot = (alpha2 - alpha1) / ds
kdot = (k2 - k1) / ds
print(f' XDOT: |udot| = {np.linalg.norm(udot, np.inf):.3f},'
f' alphadot = {alphadot:.3f}, kdot = {kdot:.3f}')
xdot = self.pack(udot, alphadot, kdot)
return xdot
def get_k_force(self, x1, xdot1, ds):
assert self.variable_k
u1, alpha1, k1 = self.unpack(x1)
x2 = self.get_dofs()
u2, alpha2, k2 = self.unpack(x2)
udot1, alphadot1, kdot1 = self.unpack(xdot1,
defaults={'alpha': 0,
'k': 0})
f_k = (np.dot(u2 - u1, udot1) +
(alpha2 - alpha1) * alphadot1 +
(k2 - k1) * kdot1 - ds)
return f_k
def get_forces(self, x1=None, xdot1=None, ds=None, forces=None, mask=None):
if forces is None:
forces = self.atoms.get_forces()
F = list(forces[self.regionI, :].reshape(-1))
if self.variable_alpha:
f_alpha = self.get_crack_tip_force(forces, mask=mask)
F.append(f_alpha)
if self.variable_k:
f_k = self.get_k_force(x1, xdot1, ds)
F.append(f_k)
return np.array(F)
def update_precon(self, x, F=None):
self.precon_count += 1
if self.precon is not None and self.precon_count % 100 != 0:
return
self.set_dofs(x)
# build a preconditioner using regions I+II of the atomic system
a = self.atoms[:self.N2]
a.calc = self.calc
# a.write('atoms.xyz')
if self.precon is None:
self.precon = Exp(apply_cell=False)
print('Updating atomistic preconditioner...')
self.precon.make_precon(a)
P_12 = self.precon.P
# np.savetxt('P_12.txt', P_12.todense())
# filter to include region I only
regionI = a.arrays['region'] == 1
mask = np.c_[regionI, regionI, regionI].reshape(-1)
P_1 = P_12[mask, :][:, mask].tocsc()
P_1_coo = P_1.tocoo()
I, J, Z = list(P_1_coo.row), list(P_1_coo.col), list(P_1_coo.data)
Fu, Falpha, Fk = self.unpack(F)
Pf_1 = spilu(P_1).solve(Fu)
if self.variable_alpha:
alpha_scale = self.alpha_scale
if alpha_scale is None:
alpha_scale = abs(Falpha) / np.linalg.norm(Pf_1, np.inf)
print(f'alpha_scale = {alpha_scale}')
if self.variable_k:
k_scale = self.k_scale
if k_scale is None:
k_scale = abs(Fk) / np.linalg.norm(Pf_1, np.inf)
print(f'k_scale = {k_scale}')
# extend diagonal of preconditioner for additional DoFs
N_dof = len(self)
offset = 3 * self.N1
if self.variable_alpha:
I.append(offset)
J.append(offset)
Z.append(alpha_scale)
offset += 1
if self.variable_k:
I.append(offset)
J.append(offset)
Z.append(k_scale)
P_ext = csc_matrix((Z, (I, J)), shape=(N_dof, N_dof))
# data = [1.0 for i in range(3 * self.N1)]
# data.append(alpha_scale)
# P_ext = spdiags(data, [0], 3 * self.N1 + 1, 3 * self.N1 + 1)
self.P_ilu = spilu(P_ext)
if F is not None:
Pf = self.P_ilu.solve(F)
print(f'norm(F) = {np.linalg.norm(F)}, norm(P^-1 F) = {np.linalg.norm(Pf)}')
Pfu, Pfalpha, Pfk = self.unpack(Pf)
print(f'|P^-1 f_I| = {np.linalg.norm(Pfu, np.inf)}, P^-1 f_alpha = {Pfalpha}')
def get_precon(self, x, F):
self.update_precon(x, F)
M = LinearOperator(shape=(len(x), len(x)), matvec=self.P_ilu.solve)
M.update = self.update_precon
return M
def optimize(self, ftol=1e-3, steps=20, dump=False, args=None, precon=False,
method='krylov', check_grad=True, dump_interval=10):
self.step = 0
def log(x, f=None):
u, alpha, k = self.unpack(x)
if f is None:
# CG doesn't pass forces to callback, so we need to recompute
f = cg_jacobian(x)
f_I, f_alpha, f_k = self.unpack(f)
message = f'STEP {self.step:-5d} |f_I| ={np.linalg.norm(f_I, np.inf):.8f}'
if self.variable_alpha:
message += f' alpha={alpha:.8f} f_alpha={f_alpha:.8f}'
if self.variable_k:
message += f' k={k:.8f} f_k={f_k:.8f} '
print(message)
if dump and self.step % dump_interval == 0:
self.atoms.write('dump.xyz')
self.step += 1
def residuals(x, *args):
self.set_dofs(x)
return self.get_forces(*args)
def cg_objective(x):
u, alpha, k = self.unpack(x)
u0 = np.zeros(3 * self.N1)
self.set_dofs(self.pack(u0, alpha, k))
E0 = self.atoms.get_potential_energy()
# print(f'alpha = {alpha} E0 = {E0}')
# return E0
self.set_dofs(x)
E = self.atoms.get_potential_energy()
# print('objective', E - E0)
return E - E0
def cg_jacobian(x, verbose=False):
u, alpha, k = self.unpack(x)
u0 = np.zeros(3 * self.N1)
self.set_dofs(self.pack(u0, alpha, k))
f_alpha0 = self.get_crack_tip_force(mask=self.regionI | self.regionII)
self.set_dofs(x)
F = self.get_forces(mask=self.regionI | self.regionII)
if verbose:
print(f'alpha {alpha} f_alpha {F[-1]} f_alpha0 {f_alpha0}')
F[-1] -= f_alpha0
# print('jacobian', np.linalg.norm(F[:-1], np.inf), F[-1])
return -F
def cg2_objective(x):
self.set_dofs(x)
return self.get_potential_energy()
def cg2_jacobian(x):
self.set_dofs(x)
return -self.get_forces(mask=self.regionI | self.regionII)
x0 = self.get_dofs()
# np.random.seed(0)
# x0[:] += 0.01*np.random.uniform(-1,1, size=len(x0))
# print('norm(u) = ', np.linalg.norm(x0[:-1]))
if args is not None:
f0 = self.get_forces(*args)
else:
f0 = self.get_forces()
if precon:
M = self.get_precon(x0, f0)
else:
M = None
if method == 'cg' or method == 'cg2':
assert self.variable_alpha
assert not self.variable_k
assert not precon
if method == 'cg':
objective = cg_objective
jacobian = cg_jacobian
else:
objective = cg2_objective
jacobian = cg2_jacobian
if check_grad:
eps = 1e-5
F = jacobian(x0)
E0 = objective(x0)
x = x0.copy()
x[-1] += eps
Ep = objective(x)
F_fd = (Ep - E0) / eps
print(
f'CHECK_GRAD: F = {F[-1]} F_fd = {F_fd} |F - F_fd| = {abs(F[-1] - F_fd)} F/F_fd = {F[-1] / F_fd}')
res = minimize(objective, x0,
method='cg',
jac=jacobian,
options={'disp': True,
'gtol': ftol,
'maxiter': steps},
callback=log)
if check_grad:
F = jacobian(res.x, verbose=True)
E0 = objective(res.x)
x = res.x.copy()
x[-1] += eps
Ep = objective(x)
F_fd = (Ep - E0) / eps
print(
f'CHECK_GRAD: F = {F[-1]} F_fd = {F_fd} |F - F_fd| = {abs(F[-1] - F_fd)} F/F_fd = {F[-1] / F_fd}')
F = residuals(res.x)
print(f'Full residual at end of CG pre-relaxation: |F|_2 = {np.linalg.norm(F, 2)} '
f'|F|_inf = {np.linalg.norm(F, np.inf)} '
f'f_alpha = {F[-1]}')
elif method == 'krylov':
res = root(residuals, x0,
args=args,
method='krylov',
options={'disp': True,
'fatol': ftol,
'maxiter': steps,
'jac_options': {'inner_M': M}},
callback=log)
else:
raise RuntimeError(f'unknown method {method}')
if res.success:
self.set_dofs(res.x)
else:
self.atoms.write('no_convergence.xyz')
raise RuntimeError(f"no convergence of scipy optimizer {method}")
def get_potential_energy(self):
# E1: energy of region I and II atoms
E = self.atoms.get_potential_energies()[self.regionI_II].sum()
E1 = E - self.E0
# E2: energy of far-field (region III)
regionII_III = self.regionII | self.regionIII
a_II_III = self.atoms[regionII_III]
fbar = self.calc.get_forces(a_II_III)
fbar = fbar[a_II_III.arrays['region'] == 2]
E2 = - 0.5 * np.tensordot(fbar + self.f0bar,
self.atoms.positions[self.regionII, :] -
self.x0[self.regionII, :])
# print(f'E1={E1} E2={E2} total E={E1 + E2}')
return E1 + E2
def rescale_k(self, new_k):
ref_x = self.cryst.positions[:, 0]
ref_y = self.cryst.positions[:, 1]
ref_z = self.cryst.positions[:, 2]
# get atomic positions corresponding to current (u, alpha, k)
x, y, z = self.atoms.get_positions().T
# rescale full displacement field (CLE + atomistic corrector)
x = ref_x + new_k / self.k * (x - ref_x)
y = ref_y + new_k / self.k * (y - ref_y)
self.k = new_k
u_cle = new_k * self.u_cle() # CLE solution at new K
self.u[:] = np.c_[x - u_cle[:, 0] - ref_x,
y - u_cle[:, 1] - ref_y,
z - ref_z][self.regionI, :]
def arc_length_continuation(self, x0, x1, N=10, ds=0.01, ftol=1e-2,
direction=1, steps=100,
continuation=False, traj_file='x_traj.h5',
traj_interval=1,
precon=False):
import h5py
assert self.variable_k # only makes sense if K can vary
if continuation:
xdot1 = self.get_xdot(x0, x1, ds)
else:
xdot1 = self.get_xdot(x0, x1)
# ensure we start moving in the correct direction
if self.variable_alpha:
_, alphadot1, _ = self.unpack(xdot1)
if direction * np.sign(alphadot1) < 0:
xdot1 = -xdot1
row = 0
with h5py.File(traj_file, 'a') as hf:
if 'x' in hf.keys():
x_traj = hf['x']
else:
x_traj = hf.create_dataset('x', (0, len(self)),
maxshape=(None, len(self)),
compression='gzip')
x_traj.attrs['ds'] = ds
x_traj.attrs['ftol'] = ftol
x_traj.attrs['direction'] = direction
x_traj.attrs['traj_interval'] = traj_interval
row = x_traj.shape[0]
for i in range(N):
x2 = x1 + ds * xdot1
print(f'ARC LENGTH step={i} ds={ds}, k1 = {x1[-1]:.3f}, k2 = {x2[-1]:.3f}, '
f' |F| = {np.linalg.norm(self.get_forces(x1=x1, xdot1=xdot1, ds=ds)):.4f}')
self.set_dofs(x2)
self.optimize(ftol, steps, args=(x1, xdot1, ds), precon=precon)
x2 = self.get_dofs()
xdot2 = self.get_xdot(x1, x2, ds)
# monitor sign of \dot{alpha} and flip if necessary
if self.variable_alpha:
_, alphadot2, _ = self.unpack(xdot2)
if direction * np.sign(alphadot2) < 0:
xdot2 = -xdot2
if i % traj_interval == 0:
for nattempt in range(1000):
try:
with h5py.File(traj_file, 'a') as hf:
x_traj = hf['x']
x_traj.resize((row + 1, x_traj.shape[1]))
x_traj[row, :] = x2
row += 1
break
except OSError:
print('hdf5 file not accessible, trying again in 1s')
time.sleep(1.0)
continue
else:
raise IOError("ran out of attempts to access trajectory file")
x1[:] = x2
xdot1[:] = xdot2
def plot(self, ax=None, regions='1234', styles=None, bonds=None, cutoff=2.8,
tip=False, atoms_args=None, bonds_args=None, tip_args=None):
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
if atoms_args is None:
atoms_args = dict(marker='o', color='b', linestyle="None")
if bonds_args is None:
bonds_args = dict(linewidths=3, antialiased=True)
if tip_args is None:
tip_args = dict(color='r', ms=20, marker='x', mew=5)
if ax is None:
fig, ax = plt.subplots()
if isinstance(regions, str):
regions = [int(r) for r in regions]
a = self.atoms
region = a.arrays['region']
if styles is None:
styles = ['bo', 'ko', 'r.', 'rx']
plot_elements = []
for i, fmt in zip(regions, styles):
(p,) = ax.plot(a.positions[region == i, 0],
a.positions[region == i, 1], **atoms_args)
plot_elements.append(p)
if bonds:
if isinstance(bonds, bool):
bonds = regions
if isinstance(bonds, str):
bonds = [int(b) for b in bonds]
i, j = neighbour_list('ij', a, cutoff)
i, j = np.array([(I, J) for I, J in zip(i, j) if
region[I] in bonds and region[J] in bonds]).T
lines = list(zip(a.positions[i, 0:2], a.positions[j, 0:2]))
lc = LineCollection(lines, **bonds_args)
ax.add_collection(lc)
plot_elements.append(lc)
if tip:
(tip,) = ax.plot(self.cryst.cell[0, 0] / 2.0 + self.alpha,
self.cryst.cell[1, 1] / 2.0, **tip_args)
plot_elements.append(tip)
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
return plot_elements
def animate(self, x, k1g, regions='12', cutoff=2.8, frames=None,
callback=None):
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
if isinstance(regions, str):
regions = [int(r) for r in regions]
self.set_dofs(x[0])
a = self.atoms
region = a.arrays['region']
i = 0
ax1.plot(x[:, -2], x[:, -1] / k1g, 'b-')
(blob,) = ax1.plot([x[i, -2]], [x[i, -1] / k1g], 'rx', mew=5, ms=20)
ax1.set_xlabel(r'Crack position $\alpha$')
ax1.set_ylabel(r'Stress intensity factor $K/K_{G}$');
self.set_dofs(x[i, :])
plot_elements = self.plot(ax2, regions=regions, bonds=regions, tip=True)
tip = plot_elements.pop(-1)
lc = plot_elements.pop(-1)
if callback:
callback(ax1, ax2)
def frame(idx):
# move the indicator blob in left panel
blob.set_data([x[idx, -2]], [x[idx, -1] / k1g])
self.set_dofs(x[idx])
a = self.atoms
# update positions in right panel
for r, p in zip(regions, plot_elements):
p.set_data(
[a.positions[region == r, 0], a.positions[region == r, 1]])
# update bonds - requires a neighbour list recalculation
i, j = neighbour_list('ij', a, cutoff)
i, j = np.array([(I, J) for I, J in zip(i, j) if
region[I] in regions and region[J] in regions]).T
lines = list(zip(a.positions[i, 0:2], a.positions[j, 0:2]))
lc.set_segments(lines)
# update crack tip indicator
tip.set_data([self.cryst.cell[0, 0] / 2.0 + x[idx, -2]],
[self.cryst.cell[1, 1] / 2.0])
return blob, plot_elements, lc, tip
if frames is None:
frames = range(0, len(x), 100)
return FuncAnimation(fig, frame, frames)
def isotropic_modeI_crack_tip_stress_field(K, r, t, xy_only=True,
nu=0.5, stress_state=PLANE_STRAIN):
"""
Compute Irwin singular crack tip stress field for mode I fracture.
Parameters
----------
K : float
Mode I stress intensity factor. Units should match units of `r`.
r : array_like
Radial distances from crack tip. Can be a multidimensional
array to evaluate stress field on a grid.
t : array_like
Angles from horzontal line y=0 ahead of crack tip,
measured anticlockwise. Should have same shape as `r`.
xy_only : bool
If True (default) only xx, yy, xy and yx components will be set.
nu : float
Poisson ratio. Used only when ``xy_only=False``, to determine zz stresses
stress_state : str
One of"plane stress" or "plane strain". Used if xyz_only=False to
determine zz stresses.
Returns
-------
sigma : array with shape ``r.shape + (3,3)``
"""
if r.shape != t.shape:
raise ValueError('Shapes of radial and angular arrays "r" and "t" '
'must match.')
if stress_state not in [PLANE_STRAIN, PLANE_STRESS]:
raise ValueError('"stress_state" should be either "{0}" or "{1}".'
.format(PLANE_STRAIN, PLANE_STRESS))
sigma = np.zeros(r.shape + (3, 3))
radial = K/np.sqrt(2*math.pi*r)
sigma[...,0,0] = radial*np.cos(t/2.0)*(1.0 - np.sin(t/2.0)*np.sin(3.0*t/2.0)) # xx
sigma[...,1,1] = radial*np.cos(t/2.0)*(1.0 + np.sin(t/2.0)*np.sin(3.0*t/2.0)) # yy
sigma[...,0,1] = radial*np.sin(t/2.0)*np.cos(t/2.0)*np.cos(3.0*t/2.0) # xy
sigma[...,1,0] = sigma[...,0,1] # yx=xy
if not xy_only and stress_state == PLANE_STRAIN:
sigma[...,2,2] = nu*(sigma[...,0,0] + sigma[...,1,1]) # zz
return sigma
def isotropic_modeI_crack_tip_displacement_field(K, G, nu, r, t,
stress_state=PLANE_STRAIN):
"""
Compute Irwin singular crack tip displacement field for mode I fracture.
Parameters
----------
K : float
Mode I stress intensity factor. Units should match units of `G` and `r`.
G : float
Shear modulus. Units should match units of `K` and `r`.
nu : float
Poisson ratio.
r : array_like
Radial distances from crack tip. Can be a multidimensional
array to evaluate stress field on a grid.
t : array_like
Angles from horizontal line y=0 ahead of crack tip,
measured anticlockwise. Should have same shape as `r`.
stress_state : str
One of"plane stress" or "plane strain". Used if xyz_only=False to
determine zz stresses.
Returns
-------
u : array
v : array
Displacements. Same shape as `r` and `t`.
"""
if r.shape != t.shape:
raise ValueError('Shapes of radial and angular arrays "r" and "t" '
'must match.')
if stress_state == PLANE_STRAIN:
kappa = 3-4*nu
elif stress_state == PLANE_STRESS:
kappa = (3.-nu)/(1.+nu)
else:
raise ValueError('"stress_state" should be either "{0}" or "{1}".'
.format(PLANE_STRAIN, PLANE_STRESS))
radial = K*np.sqrt(r/(2.*math.pi))/(2.*G)
u = radial*np.cos(t/2)*(kappa-1+2*np.sin(t/2)**2)
v = radial*np.sin(t/2)*(kappa+1-2*np.cos(t/2)**2)
# Form in Lawn book is equivalent:
#radial = K/(4*G)*np.sqrt(r/(2.*math.pi))
#u = radial*((2*kappa - 1)*np.cos(t/2) - np.cos(3*t/2))
#v = radial*((2*kappa + 1)*np.sin(t/2) - np.sin(3*t/2))
return u, v
class IsotropicStressField(object):
"""
Calculator to return Irwin near-tip stress field at atomic sites
"""
def __init__(self, K=None, x0=None, y0=None, sxx0=0.0, syy0=0., sxy0=0., nu=0.5,
stress_state='plane strain'):
self.K = K
self.x0 = x0
self.y0 = y0
self.sxx0 = sxx0
self.syy0 = syy0
self.sxy0 = sxy0
self.nu = nu
self.stress_state = stress_state
def get_stresses(self, atoms):
K = self.K
if K is None:
K = get_stress_intensity_factor(atoms)
x0, y0 = self.x0, self.y0
if x0 is None:
x0 = atoms.info['CrackPos'][0]
if y0 is None:
y0 = atoms.info['CrackPos'][1]
x = atoms.positions[:, 0]
y = atoms.positions[:, 1]
r = np.sqrt((x - x0)**2 + (y - y0)**2)
t = np.arctan2(y - y0, x - x0)
sigma = isotropic_modeI_crack_tip_stress_field(K, r, t, self.nu,
self.stress_state)
sigma[:,0,0] += self.sxx0
sigma[:,1,1] += self.syy0
sigma[:,0,1] += self.sxy0
sigma[:,1,0] += self.sxy0
return sigma
def strain_to_G(strain, E, nu, orig_height):
"""
Convert from strain to energy release rate G for thin strip geometry
Parameters
----------
strain : float
Dimensionless ratio ``(current_height - orig_height)/orig_height``
E : float
Young's modulus relevant for a pull in y direction sigma_yy/eps_yy
nu : float
Poission ratio -eps_yy/eps_xx
orig_height : float
Unstrained height of slab
Returns
-------
G : float
Energy release rate in units consistent with input
(i.e. in eV/A**2 if eV/A/fs units used)
"""
return 0.5 * E / (1.0 - nu * nu) * strain * strain * orig_height
def G_to_strain(G, E, nu, orig_height):
"""
Convert from energy release rate G to strain for thin strip geometry
Parameters
----------
G : float
Energy release rate in units consistent with `E` and `orig_height`
E : float
Young's modulus relevant for a pull in y direction sigma_yy/eps_yy
nu : float
Poission ratio -eps_yy/eps_xx
orig_height : float
Unstrained height of slab
Returns
-------
strain : float
Dimensionless ratio ``(current_height - orig_height)/orig_height``
"""
return np.sqrt(2.0 * G * (1.0 - nu * nu) / (E * orig_height))
def get_strain(atoms):
"""
Return the current strain on thin strip configuration `atoms`
Requires unstrained height of slab to be stored as ``OrigHeight``
key in ``atoms.info`` dictionary.
Also updates value stored in ``atoms.info``.
"""
orig_height = atoms.info['OrigHeight']
current_height = atoms.positions[:, 1].max() - atoms.positions[:, 1].min()
strain = current_height / orig_height - 1.0
atoms.info['strain'] = strain
return strain
def get_energy_release_rate(atoms):
"""
Return the current energy release rate G for `atoms`
Result is computed assuming thin strip geometry, and using
stored Young's modulus and Poission ratio and original slab height
from `atoms.info` dictionary.
Also updates `G` value stored in ``atoms.info`` dictionary.
"""
current_strain = get_strain(atoms)
orig_height = atoms.info['OrigHeight']
E = atoms.info['YoungsModulus']
nu = atoms.info['PoissonRatio_yx']
G = strain_to_G(current_strain, E, nu, orig_height)
atoms.info['G'] = G
return G
def get_stress_intensity_factor(atoms, stress_state=PLANE_STRAIN):
"""
Compute stress intensity factor K_I
Calls :func:`get_energy_release_rate` to compute `G`, then
uses stored `YoungsModulus` and `PoissionRatio_yz` values from
`atoms.info` dictionary to compute K_I.
Also updates value stored in ``atoms.info`` dictionary.
"""
G = get_energy_release_rate(atoms)
E = atoms.info['YoungsModulus']
nu = atoms.info['PoissonRatio_yx']
if stress_state == PLANE_STRAIN:
Ep = E/(1-nu**2)
elif stress_state == PLANE_STRESS:
Ep = E
else:
raise ValueError('"stress_state" should be either "{0}" or "{1}".'
.format(PLANE_STRAIN, PLANE_STRESS))
K = np.sqrt(G*Ep)
atoms.info['K'] = K
return K
def fit_crack_stress_field(atoms, r_range=(0., 50.), initial_params=None, fix_params=None,
sigma=None, avg_sigma=None, avg_decay=0.005, calc=None, verbose=False):
"""
Perform a least squares fit of near-tip stress field to isotropic solution
Stresses on the atoms are fit to the Irwin K-field singular crack tip
solution, allowing the crack position, stress intensity factor and
far-field stress components to vary during the fit.
Parameters
----------
atoms : :class:`~.Atoms` object
Crack system. For the initial fit, the following keys are used
from the :attr:`~Atoms.info` dictionary:
- ``YoungsModulus``
- ``PossionRatio_yx``
- ``G`` --- current energy release rate
- ``strain`` --- current applied strain
- ``CrackPos`` --- initial guess for crack tip position
The initial guesses for the stress intensity factor ``K`` and
far-field stress ``sigma0`` are computed from
``YoungsModulus``, ``PoissonRatio_yx``, ``G`` and ``strain``,
assuming plane strain in thin strip boundary conditions.
On exit, new ``K``, ``sigma0`` and ``CrackPos`` entries are set
in the :attr:`~Atoms.info` dictionary. These values are then
used as starting guesses for subsequent fits.
r_range : sequence of two floats, optional
If present, restrict the stress fit to an annular region
``r_range[0] <= r < r_range[1]``, centred on the previous crack
position (from the ``CrackPos`` entry in ``atoms.info``). If
r_range is ``None``, fit is carried out for all atoms.
initial_params : dict
Names and initial values of parameters. Missing initial values
are guessed from Atoms object.
fix_params : dict
Names and values of parameters to fix during the fit,
e.g. ``{y0: 0.0}`` to constrain the fit to the line y=0
sigma : None or array with shape (len(atoms), 3, 3)
Explicitly provide the per-atom stresses. Avoids calling Atoms'
calculators :meth:`~.get_stresses` method.
avg_sigma : None or array with shape (len(atoms), 3, 3)
If present, use this array to accumulate the time-averaged
stress field. Useful when processing a trajectory.
avg_decay : real
Factor by which average stress is attenuated at each step.
Should be set to ``dt/tau`` where ``dt`` is MD time-step
and ``tau`` is a characteristic averaging time.
calc : Calculator object, optional
If present, override the calculator used to compute stresses
on the atoms. Default is ``atoms.get_calculator``.
To use the atom resolved stress tensor pass an instance of the
:class:`~quippy.elasticity.AtomResolvedStressField` class.
verbose : bool, optional
If set to True, print additional information about the fit.
Returns
-------
params : dict with keys ``[K, x0, y0, sxx0, syy0, sxy0]``
Fitted parameters, in a form suitable for passing to
:class:`IsotropicStressField` constructor. These are the stress intensity
factor `K`, the centre of the stress field ``(x0, y0)``, and the
far field contribution to the stress ``(sxx0, syy0, sxy0)``.
"""
params = {}
if initial_params is not None:
params.update(initial_params)
if 'K' not in params:
# Guess for stress intensity factor K
if 'K' in atoms.info:
params['K'] = atoms.info['K']
else:
try:
params['K'] = get_stress_intensity_factor(atoms)
except KeyError:
params['K'] = 1.0*MPa_sqrt_m
if 'sxx0' not in params or 'syy0' not in params or 'sxy0' not in params:
# Guess for far-field stress
if 'sigma0' in atoms.info:
params['sxx0'], params['syy0'], params['sxy0'] = atoms.info['sigma0']
else:
try:
E = atoms.info['YoungsModulus']
nu = atoms.info['PoissonRatio_yx']
Ep = E/(1-nu**2)
params['syy0'] = Ep*atoms.info['strain']
params['sxx0'] = nu*params['syy0']
params['sxy0'] = 0.0
except KeyError:
params['syy0'] = 0.0
params['sxx0'] = 0.0
params['sxy0'] = 0.0
if 'x0' not in params or 'y0' not in params:
# Guess for crack position
try:
params['x0'], params['y0'], _ = atoms.info['CrackPos']
except KeyError:
params['x0'] = (atoms.positions[:, 0].min() +
(atoms.positions[:, 0].max() - atoms.positions[:, 0].min())/3.0)
params['y0'] = 0.0
# Override any fixed parameters
if fix_params is None:
fix_params = {}
params.update(fix_params)
x = atoms.positions[:, 0]
y = atoms.positions[:, 1]
r = np.sqrt((x - params['x0'])**2 + (y - params['y0'])**2)
# Get local stresses
if sigma is None:
if calc is None:
calc = atoms.get_calculator()
sigma = calc.get_stresses(atoms)
if sigma.shape != (len(atoms), 3, 3):
sigma = Voigt_6_to_full_3x3_stress(sigma)
# Update avg_sigma in place
if avg_sigma is not None:
avg_sigma[...] = np.exp(-avg_decay)*avg_sigma + (1.0 - np.exp(-avg_decay))*sigma
sigma = avg_sigma.copy()
# Zero components out of the xy plane
sigma[:,2,2] = 0.0
sigma[:,0,2] = 0.0
sigma[:,2,0] = 0.0
sigma[:,1,2] = 0.0
sigma[:,2,1] = 0.0
mask = Ellipsis # all atoms
if r_range is not None:
rmin, rmax = r_range
mask = (r > rmin) & (r < rmax)
if verbose:
print('Fitting on %r atoms' % sigma[mask,1,1].shape)
def objective_function(params, x, y, sigma, var_params):
params = dict(zip(var_params, params))
if fix_params is not None:
params.update(fix_params)
isotropic_sigma = IsotropicStressField(**params).get_stresses(atoms)
delta_sigma = sigma[mask,:,:] - isotropic_sigma[mask,:,:]
return delta_sigma.reshape(delta_sigma.size)
# names and values of parameters which can vary in this fit
var_params = sorted([key for key in params.keys() if key not in fix_params.keys() ])
initial_params = [params[key] for key in var_params]
from scipy.optimize import leastsq
fitted_params, cov, infodict, mesg, success = leastsq(objective_function,
initial_params,
args=(x, y, sigma, var_params),
full_output=True)
params = dict(zip(var_params, fitted_params))
params.update(fix_params)
# estimate variance in parameter estimates
if cov is None:
# singular covariance matrix
err = dict(zip(var_params, [0.]*len(fitted_params)))
else:
s_sq = (objective_function(fitted_params, x, y, sigma, var_params)**2).sum()/(sigma.size-len(fitted_params))
cov = cov * s_sq
err = dict(zip(var_params, np.sqrt(np.diag(cov))))
if verbose:
print('K = %.3f MPa sqrt(m)' % (params['K']/MPA_SQRT_M))
print('sigma^0_{xx,yy,xy} = (%.1f, %.1f, %.1f) GPa' % (params['sxx0']*GPA,
params['syy0']*GPA,
params['sxy0']*GPA))
print('Crack position (x0, y0) = (%.1f, %.1f) A' % (params['x0'], params['y0']))
atoms.info['K'] = params['K']
atoms.info['sigma0'] = (params['sxx0'], params['syy0'], params['sxy0'])
atoms.info['CrackPos'] = np.array((params['x0'], params['y0'], atoms.cell[2,2]/2.0))
return params, err
def find_tip_coordination(a, bondlength=2.6, bulk_nn=4):
"""
Find position of tip in crack cluster from coordination
"""
i, j = neighbour_list("ij", a, bondlength)
nn = np.bincount(i, minlength=len(a))
a.set_array('n_neighb', nn)
g = a.get_array('groups')
y = a.positions[:, 1]
above = (nn < bulk_nn) & (g != 0) & (y > a.cell[1,1]/2.0)
below = (nn < bulk_nn) & (g != 0) & (y < a.cell[1,1]/2.0)
a.set_array('above', above)
a.set_array('below', below)
bond1 = above.nonzero()[0][a.positions[above, 0].argmax()]
bond2 = below.nonzero()[0][a.positions[below, 0].argmax()]
# These need to be ints, otherwise they are no JSON serializable.
a.info['bond1'] = bond1
a.info['bond2'] = bond2
return bond1, bond2
def find_tip_broken_bonds(atoms, cutoff, bulk_nn=4, boundary_thickness=None):
"""
Find position of the tip from the atom coordination, i.e. broken bonds.
Using the C implementation of 'neighbour_list'.
Returns the tip's position in cartesian coordinates.
Parameters
----------
atoms : ase.Atoms
Atomic configuration.
cutoff : float
Cutoff distance for neighbour search.
bulk_nn : integer
Number of nearest neighbours for the standard bulk configuration.
boundary_buffer : float
Thickness of the boundaries.
Defaults to cutoff distance.
Returns
-------
tip_position : numpy array
The x and y values are found.
The z value is calculated as the midpoint of the depth.
"""
# initialisation of the boundaries
if boundary_thickness is None:
boundary_thickness = cutoff
right_boundary = atoms.positions[(np.argmax(atoms.positions[:,0], axis=0)), 0] - boundary_thickness
top_boundary = atoms.positions[(np.argmax(atoms.positions[:,1], axis=0)), 1] - boundary_thickness
bottom_boundary = atoms.positions[(np.argmin(atoms.positions[:,1], axis=0)), 1] + boundary_thickness
left_boundary = atoms.positions[(np.argmin(atoms.positions[:,0], axis=0)), 0] + boundary_thickness
# calculating the coordination from the neighbours list
i = neighbour_list("i", atoms, cutoff)
coordination_list = np.bincount(i, minlength=len(atoms))
# list of atom numbers with at least one broken bond
broken_bonds_array = np.where(coordination_list <= bulk_nn-1)
# finds the atom number with the most positive x-valued position with a broken bond(s)
# within the bounded section
atom_number = 0
for m in range(0, len(broken_bonds_array[0])):
temp_atom_pos = atoms.positions[broken_bonds_array[0][m]]
if temp_atom_pos[0] > atoms.positions[atom_number,0]:
if left_boundary < temp_atom_pos[0] < right_boundary:
if bottom_boundary < temp_atom_pos[1] < top_boundary:
atom_number = m
tip_position = atoms.positions[broken_bonds_array[0][atom_number]]
return np.array((tip_position[0], tip_position[1], atoms.cell[2,2]/2.0))
def find_tip_stress_field(atoms, r_range=None, initial_params=None, fix_params=None,
sigma=None, avg_sigma=None, avg_decay=0.005, calc=None):
"""
Find the position of crack tip by fitting to the isotropic `K`-field stress
Fit is carried out using :func:`fit_crack_stress_field`, and parameters
have the same meaning as there.
See also
--------
fit_crack_stress_field
"""
params, err = fit_crack_stress_field(atoms, r_range, initial_params, fix_params, sigma,
avg_sigma, avg_decay, calc)
return np.array((params['x0'], params['y0'], atoms.cell[2,2]/2.0))
def plot_stress_fields(atoms, r_range=None, initial_params=None, fix_params=None,
sigma=None, avg_sigma=None, avg_decay=0.005, calc=None):
r"""
Fit and plot atomistic and continuum stress fields
Firstly a fit to the Irwin `K`-field solution is carried out using
:func:`fit_crack_stress_field`, and parameters have the same
meaning as for that function. Then plots of the
:math:`\sigma_{xx}`, :math:`\sigma_{yy}`, :math:`\sigma_{xy}`
fields are produced for atomistic and continuum cases, and for the
residual error after fitting.
"""
from pylab import griddata, meshgrid, subplot, cla, contourf, colorbar, draw, title, clf, gca
params, err = fit_crack_stress_field(atoms, r_range, initial_params, fix_params, sigma,
avg_sigma, avg_decay, calc)
K, x0, y0, sxx0, syy0, sxy0 = (params['K'], params['x0'], params['y0'],
params['sxx0'], params['syy0'], params['sxy0'])
x = atoms.positions[:, 0]
y = atoms.positions[:, 1]
X = np.linspace((x-x0).min(), (x-x0).max(), 500)
Y = np.linspace((y-y0).min(), (y-y0).max(), 500)
t = np.arctan2(y-y0, x-x0)
r = np.sqrt((x-x0)**2 + (y-y0)**2)
if r_range is not None:
rmin, rmax = r_range
mask = (r > rmin) & (r < rmax)
else:
mask = Ellipsis
atom_sigma = sigma
if atom_sigma is None:
atom_sigma = atoms.get_stresses()
grid_sigma = np.dstack([griddata(x[mask]-x0, y[mask]-y0, atom_sigma[mask,0,0], X, Y),
griddata(x[mask]-x0, y[mask]-y0, atom_sigma[mask,1,1], X, Y),
griddata(x[mask]-x0, y[mask]-y0, atom_sigma[mask,0,1], X, Y)])
X, Y = meshgrid(X, Y)
R = np.sqrt(X**2+Y**2)
T = np.arctan2(Y, X)
grid_sigma[((R < rmin) | (R > rmax)),:] = np.nan # mask outside fitting region
isotropic_sigma = isotropic_modeI_crack_tip_stress_field(K, R, T, x0, y0)
isotropic_sigma[...,0,0] += sxx0
isotropic_sigma[...,1,1] += syy0
isotropic_sigma[...,0,1] += sxy0
isotropic_sigma[...,1,0] += sxy0
isotropic_sigma = ma.masked_array(isotropic_sigma, mask=grid_sigma.mask)
isotropic_sigma[((R < rmin) | (R > rmax)),:,:] = np.nan # mask outside fitting region
contours = [np.linspace(0, 20, 10),
np.linspace(0, 20, 10),
np.linspace(-10,10, 10)]
dcontours = [np.linspace(0, 5, 10),
np.linspace(0, 5, 10),
np.linspace(-5, 5, 10)]
clf()
for i, (ii, jj), label in zip(range(3),
[(0,0), (1,1), (0,1)],
[r'\sigma_{xx}', r'\sigma_{yy}', r'\sigma_{xy}']):
subplot(3,3,i+1)
gca().set_aspect('equal')
contourf(X, Y, grid_sigma[...,i]*GPA, contours[i])
colorbar()
title(r'$%s^\mathrm{atom}$' % label)
draw()
subplot(3,3,i+4)
gca().set_aspect('equal')
contourf(X, Y, isotropic_sigma[...,ii,jj]*GPA, contours[i])
colorbar()
title(r'$%s^\mathrm{Isotropic}$' % label)
draw()
subplot(3,3,i+7)
gca().set_aspect('equal')
contourf(X, Y, abs(grid_sigma[...,i] -
isotropic_sigma[...,ii,jj])*GPA, dcontours[i])
colorbar()
title(r'$|%s^\mathrm{atom} - %s^\mathrm{isotropic}|$' % (label, label))
draw()
def thin_strip_displacement_y(x, y, strain, a, b):
"""
Return vertical displacement ramp used to apply initial strain to slab
Strain is increased from 0 to strain over distance :math:`a <= x <= b`.
Region :math:`x < a` is rigidly shifted up/down by ``strain*height/2``.
Here is an example of how to use this function on an artificial
2D square atomic lattice. The positions are plotted before (left)
and after (right) applying the displacement, and the horizontal and
vertical lines show the `strain` (red), `a` (green) and `b` (blue)
parameters. ::
import matplotlib.pyplot as plt
import numpy as np
w = 1; h = 1; strain = 0.1; a = -0.5; b = 0.0
x = np.linspace(-w, w, 20)
y = np.linspace(-h, h, 20)
X, Y = np.meshgrid(x, y)
u_y = thin_strip_displacement_y(X, Y, strain, a, b)
for i, disp in enumerate([0, u_y]):
plt.subplot(1,2,i+1)
plt.scatter(X, Y + disp, c='k', s=5)
for y in [-h, h]:
plt.axhline(y, color='r', linewidth=2, linestyle='dashed')
plt.axhline(y*(1+strain), color='r', linewidth=2)
for x, c in zip([a, b], ['g', 'b']):
plt.axvline(x, color=c, linewidth=2)
.. image:: thin-strip-displacement-y.png
:width: 600
:align: center
Parameters
----------
x : array
y : array
Atomic positions in unstrained slab, centered on origin x=0,y=0
strain : float
Far field strain to apply
a : float
x coordinate for beginning of strain ramp
b : float
x coordinate for end of strain ramp
"""
u_y = np.zeros_like(y)
height = y.max() - y.min() # measure height of slab
shift = strain * height / 2.0 # far behind crack, shift = strain*height/2
u_y[x < a] = np.sign(y[x < a]) * shift # region shift for x < a
u_y[x > b] = strain * y[x > b] # constant strain for x > b
middle = (x >= a) & (x <= b) # interpolate for a <= x <= b
f = (x[middle] - a) / (b - a)
u_y[middle] = (f * strain * y[middle] +
(1 - f) * shift * np.sign(y[middle]))
return u_y
def print_crack_system(directions):
"""
Pretty printing of crack crystallographic coordinate system
Specified by list of Miller indices for crack_direction (x),
cleavage_plane (y) and crack_front (z), each of which should be
a sequence of three floats
"""
crack_direction, cleavage_plane, crack_front = directions
crack_direction = MillerDirection(crack_direction)
cleavage_plane = MillerPlane(cleavage_plane)
crack_front = MillerDirection(crack_front)
print('Crack system %s%s' % (cleavage_plane, crack_front))
print('Crack direction (x-axis) %s' % crack_direction)
print('Cleavage plane (y-axis) %s' % cleavage_plane)
print('Crack front (z-axis) %s\n' % crack_front)
class ConstantStrainRate(object):
"""
Constraint which increments epsilon_yy at a constant strain rate
Rescaling is applied only to atoms where `mask` is True (default is all atoms)
"""
def __init__(self, orig_height, delta_strain, mask=None):
self.orig_height = orig_height
self.delta_strain = delta_strain
if mask is None:
mask = Ellipsis
self.mask = mask
def adjust_forces(self, atoms, forces):
pass
def adjust_positions(self, atoms, newpos):
current_height = newpos[:, 1].max() - newpos[:, 1].min()
current_strain = current_height / self.orig_height - 1.0
new_strain = current_strain + self.delta_strain
alpha = (1.0 + new_strain) / (1.0 + current_strain)
newpos[self.mask, 1] = newpos[self.mask, 1]*alpha
def copy(self):
return ConstantStrainRate(self.orig_height,
self.delta_strain,
self.mask)
def apply_strain(self, atoms, rigid_constraints=False):
"""
Applies a constant strain to the system.
Parameters
----------
atoms : ASE.atoms
Atomic configuration.
rigid_constraints : boolean
Apply (or not apply) strain to every atom.
i.e. allow constrainted atoms to move during strain application
"""
if rigid_constraints == False:
initial_constraints = atoms.constraints
atoms.constraints = None
newpos = atoms.get_positions()
self.adjust_positions(atoms, newpos)
atoms.set_positions(newpos)
if rigid_constraints == False:
atoms.constraints = initial_constraints
| 77,061 | 34.091985 | 119 | py |
matscipy | matscipy-master/matscipy/fracture_mechanics/clusters.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2014-2015, 2020 James Kermode (Warwick U.)
# 2020 Petr Grigorev (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from ase.lattice.cubic import Diamond, FaceCenteredCubic, SimpleCubic, BodyCenteredCubic
###
def set_groups(a, n, skin_x, skin_y, central_x=-1./2, central_y=-1./2,
invert_central=False):
nx, ny, nz = n
sx, sy, sz = a.cell.diagonal()
print('skin_x = {0}*a0, skin_y = {1}*a0'.format(skin_x, skin_y))
skin_x = skin_x*sx/nx
skin_y = skin_y*sy/ny
print('skin_x = {0}, skin_y = {1}'.format(skin_x, skin_y))
r = a.positions
g = np.ones(len(a), dtype=int)
mask = np.logical_or(
np.logical_or(
np.logical_or(
r[:, 0]/sx < (1.-central_x)/2,
r[:, 0]/sx > (1.+central_x)/2),
r[:, 1]/sy < (1.-central_y)/2),
r[:, 1]/sy > (1.+central_y)/2)
if invert_central:
mask = np.logical_not(mask)
g = np.where(mask, g, 2*np.ones_like(g))
mask = np.logical_or(
np.logical_or(
np.logical_or(
r[:, 0] < skin_x, r[:, 0] > sx-skin_x),
r[:, 1] < skin_y),
r[:, 1] > sy-skin_y)
g = np.where(mask, np.zeros_like(g), g)
a.set_array('groups', g)
def set_regions(cryst, r_I, cutoff, r_III):
sx, sy, sz = cryst.cell.diagonal()
x, y = cryst.positions[:, 0], cryst.positions[:, 1]
cx, cy = sx/2, sy/2
r = np.sqrt((x - cx)**2 + (y - cy)**2)
# Regions I-III defined by radial distance from center
regionI = r < r_I
regionII = (r >= r_I) & (r < (r_I + cutoff))
regionIII = (r >= (r_I + cutoff)) & (r < r_III)
regionIV = (r >= r_III) & (r < (r_III + cutoff))
cryst.new_array('region', np.zeros(len(cryst), dtype=int))
region = cryst.arrays['region']
region[regionI] = 1
region[regionII] = 2
region[regionIII] = 3
region[regionIV] = 4
# keep only cylinder defined by regions I - IV
cryst = cryst[regionI | regionII | regionIII | regionIV]
# order by radial distance from tip
order = r[regionI | regionII | regionIII | regionIV ].argsort()
cryst = cryst[order]
return cryst
def cluster(el, a0, n, crack_surface=[1,1,0], crack_front=[0,0,1],
lattice=None, shift=None):
nx, ny, nz = n
third_dir = np.cross(crack_surface, crack_front)
directions = [ third_dir, crack_surface, crack_front ]
if np.linalg.det(directions) < 0:
third_dir = -third_dir
directions = [ third_dir, crack_surface, crack_front ]
unitcell = lattice(el, latticeconstant=a0, size=[1, 1, 1],
directions=directions )
if shift is not None:
unitcell.translate(np.dot(shift, unitcell.cell))
# Center cluster in unit cell
x, y, z = (unitcell.get_scaled_positions()%1.0).T
x += (1.0-x.max()+x.min())/2 - x.min()
y += (1.0-y.max()+y.min())/2 - y.min()
z += (1.0-z.max()+z.min())/2 - z.min()
unitcell.set_scaled_positions(np.transpose([x, y, z]))
a = unitcell.copy()
a *= (nx, ny, nz)
#a.info['unitcell'] = unitcell
a.set_pbc([False, False, True])
return a
def diamond(*args, **kwargs):
kwargs['lattice'] = Diamond
return cluster(*args, **kwargs)
def fcc(*args, **kwargs):
kwargs['lattice'] = FaceCenteredCubic
return cluster(*args, **kwargs)
def bcc(*args, **kwargs):
kwargs['lattice'] = BodyCenteredCubic
return cluster(*args, **kwargs)
def sc(*args, **kwargs):
kwargs['lattice'] = SimpleCubic
return cluster(*args, **kwargs)
| 4,414 | 32.195489 | 88 | py |
matscipy | matscipy-master/matscipy/fracture_mechanics/__init__.py | """
Calculation of continuum linear elastic displacement fields near crack tips,
including support for anisotropy in the elastic response.
"""
| 143 | 27.8 | 76 | py |
matscipy | matscipy-master/matscipy/fracture_mechanics/idealbrittlesolid.py | #
# Copyright 2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2014-2015, 2017-2018 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from ase.atoms import Atoms
from ase.calculators.calculator import Calculator
from ase.constraints import FixAtoms
from ase.geometry import cellpar_to_cell
from matscipy.neighbours import neighbour_list
from matscipy.fracture_mechanics.crack import (ConstantStrainRate,
get_strain)
def triangular_lattice_slab(a, n, m):
# primitive unit cell
## a = Atoms('H', [(0, 0, 0)],
## cell=cellpar_to_cell([a, a, 10*a, 90, 90, 120]),
## pbc=[True, True, False])
# cubic unit cell
c = 10*a
a = Atoms('H2',[(0, 0, c/2),
(0.5*a, np.sqrt(3)*a/2, c/2)],
cell=[[a, 0, 0],
[0, np.sqrt(3)*a, 0],
[0, 0, c]],
pbc=[True, True, True])
# we use unit masses
a.set_masses([1]*len(a))
return a * (n, m, 1)
def find_triangles_2d(atoms, cutoff, minangle=30*np.pi/180, maxangle=120*np.pi/180,
xdim=0, ydim=1):
"""
Return a list of all triangles of a triangular lattice sitting in the x-y
plane.
"""
# Contains atom indices that border the triangle
corner1 = []
corner2 = []
corner3 = []
# Find triangles
i, j, D = neighbour_list('ijD', atoms, cutoff)
coord = np.bincount(i)
for k in range(len(atoms)):
firstn = np.searchsorted(i, k, side='left')
lastn = np.searchsorted(i, k, side='right')
# Sort six neighbors by angle
angles = np.arctan2(D[firstn:lastn, xdim], D[firstn:lastn, ydim])
s = np.argsort(angles)
# Only pick triangles with angles between min and max angle
trangles = (angles[np.roll(s, -1)]-angles[s]) % (2*np.pi)
m = (trangles > minangle) & (trangles < maxangle)
# Add corners of triangle to lists
corner1 += list(np.array([k]*(lastn-firstn))[m])
corner2 += list(j[firstn:lastn][s][m])
corner3 += list(j[firstn:lastn][np.roll(s, -1)][m])
# Sort corners
corner1, corner2, corner3 = np.sort([corner1, corner2, corner3], axis=0)
# Remove duplicate triangles
uniqueid = corner3+len(atoms)*(corner2+len(atoms)*corner1)
_, s = np.unique(uniqueid, return_index=True)
return corner1[s], corner2[s], corner3[s]
class IdealBrittleSolid(Calculator):
"""
Implementation of force field for an ideal brittle solid
Described in Marder, Int. J. Fract. 130, 517-555 (2004)
"""
implemented_properties = ['energy', 'free_energy', 'energies', 'stress',
'forces']
default_parameters = {'a': 1.0, # lattice constant
'rc': 1.01, # cutoff
'k': 1.0, # spring constant
'beta': 0.01, # Kelvin dissipation
'b': 0.01, # Stokes dissipation
'linear': False # Linearized response
}
def __init__(self, *args, **kwargs):
Calculator.__init__(self, *args, **kwargs)
self.crystal_bonds = 0
def set_reference_crystal(self, crystal):
rc = self.parameters['rc']
self.crystal = crystal.copy()
i = neighbour_list('i', self.crystal, rc)
self.crystal_bonds = len(i)
def calculate(self, atoms, properties, system_changes):
Calculator.calculate(self, atoms, properties, system_changes)
a = self.parameters['a']
rc = self.parameters['rc']
k = self.parameters['k']
beta = self.parameters['beta']
linear = self.parameters['linear']
energies = np.zeros(len(atoms))
forces = np.zeros((len(atoms), 3))
velocities = (atoms.get_momenta().T/atoms.get_masses()).T
if not linear:
i, j, dr, r = neighbour_list('ijDd', atoms, rc)
dr_hat = (dr.T/r).T
dv = velocities[j] - velocities[i]
de = 0.5*k*(r - a)**2 # spring energies
e = 0.5*de # half goes to each end of spring
f = (k*(r - a)*dr_hat.T).T + beta*dv
else:
# Linearized response
i, j, D, S = neighbour_list('ijDS', atoms, rc)
# Displacements
u = atoms.positions - self.crystal.positions
# Bond vector taken from reference configuration
#dr = self.crystal.positions[j] - self.crystal.positions[i] + \
# S.dot(self.crystal.cell)
dr = self.crystal.positions[j] - self.crystal.positions[i] + S.dot(self.crystal.cell)
r = np.sqrt((dr*dr).sum(axis=-1))
dr_hat = (dr.T/r).T
dv = velocities[j] - velocities[i]
de = 0.5*k*(((u[j] - u[i])*dr_hat).sum(axis=-1))**2 # spring energies
e = 0.5*de # half goes to each end of spring
f = ((k*(u[j] - u[i])*dr_hat).sum(axis=-1)*dr_hat.T).T + beta*dv
energies[:] = np.bincount(i, e, minlength=len(atoms))
for kk in range(3):
forces[:, kk] = np.bincount(i, weights=f[:, kk],
minlength=len(atoms))
energy = energies.sum()
# add energy 0.5*k*(rc - a)**2 for each broken bond
if not linear and len(i) < self.crystal_bonds:
de = 0.5*k*(rc - a)**2
energy += 0.5*de*(self.crystal_bonds - len(i))
# Stokes dissipation
if 'stokes' in atoms.arrays:
b = atoms.get_array('stokes')
forces -= (velocities.T*b).T
self.results = {'energy': energy,
'free_energy': energy,
'energies': energies,
'forces': forces}
# Virial
if not linear:
virial = np.zeros(6)
if len(i) > 0:
virial = 0.5*np.array([dr[:,0]*f[:,0], # xx
dr[:,1]*f[:,1], # yy
dr[:,2]*f[:,2], # zz
dr[:,1]*f[:,2], # yz
dr[:,0]*f[:,2], # xz
dr[:,0]*f[:,1]]).sum(axis=1) # xy
self.results['stress'] = virial/atoms.get_volume()
def get_wave_speeds(self, atoms):
"""
Return longitudinal, shear and Rayleigh wave speeds
"""
k = self.parameters['k']
a = self.parameters['a']
m = atoms.get_masses()[0]
ka2_over_m = np.sqrt(k*a**2/m)
c_l = np.sqrt(9./8.*ka2_over_m)
c_s = np.sqrt(3./8.*ka2_over_m)
c_R = 0.563*ka2_over_m
return c_l, c_s, c_R
def get_elastic_moduli(self):
"""
Return Lam\'e constants lambda and mu
"""
k = self.parameters['k']
a = self.parameters['a']
lam = np.sqrt(3.0)/2.0*k/a
mu = lam
return lam, mu
def get_youngs_modulus(self):
k = self.parameters['k']
a = self.parameters['a']
return 5.0*np.sqrt(3.0)/4.0*k/a
def get_poisson_ratio(self):
return 0.25
def find_crack_tip(atoms, dt=None, store=True, results=None):
"""
Return atom at the crack tip and its x-coordinate
Crack tip is defined to be location of rightmost atom
whose nearest neighbour is at distance > 2.5*a
"""
calc = atoms.get_calculator()
a = calc.parameters['a']
rc = calc.parameters['rc']
i = neighbour_list('i', atoms, rc)
nn = np.bincount(i) # number of nearest neighbours, equal to 6 in bulk
x = atoms.positions[:, 0]
y = atoms.positions[:, 1]
bottom = y.min()
left = x.min()
width = x.max() - x.min()
height = y.max() - y.min()
old_tip_x = atoms.info.get('tip_x', left + 0.3*width)
# crack cannot have advanced more than c_R*dt
if dt is not None:
cl, ct, cR = calc.get_wave_speeds(atoms)
tip_max_x = old_tip_x + 10.0*cR*dt # FIXME definition of cR seems wrong, shouldn't need factor of 10 here...
else:
tip_max_x = left + 0.8*width
broken = ((nn != 6) &
(x > left + 0.2*width) & (x < tip_max_x) &
(y > bottom + 0.1*height) & (y < bottom + 0.9*height))
index = atoms.positions[broken, 0].argmax()
tip_atom = broken.nonzero()[0][index]
tip_x = atoms.positions[tip_atom, 0]
strain = get_strain(atoms)
eps_G = atoms.info['eps_G']
print('tip_x: %.3f strain: %.4f delta: %.3f' % (tip_x, strain, strain/eps_G))
if store:
atoms.info['tip_atom'] = tip_atom
atoms.info['tip_x'] = tip_x
if results is not None:
results.append(tip_x)
return (tip_atom, tip_x, broken)
def set_initial_velocities(c):
"""
Initialise a dynamical state by kicking some atoms behind tip
"""
tip_atom, tip_x, broken = find_crack_tip(c, store=False)
init_atoms = broken.nonzero()[0][c.positions[broken, 0].argsort()[-8:]]
upper = list(init_atoms[c.positions[init_atoms, 1] > 0])
lower = list(init_atoms[c.positions[init_atoms, 1] < 0])
calc = c.get_calculator()
cl, ct, cR = calc.get_wave_speeds(c)
v0 = cl/10.
v = np.zeros((len(c), 3))
v[upper, 1] = +v0
v[lower, 1] = -v0
c.set_velocities(v)
print('Setting velocities of upper=%s, lower=%s to +/- %.2f' % (upper, lower, v0))
return (upper, lower, v0)
def set_constraints(c, a):
# fix atoms in the top and bottom rows
top = c.positions[:, 1].max()
bottom = c.positions[:, 1].min()
left = c.positions[:, 0].min()
right = c.positions[:, 0].max()
fixed_mask = ((abs(c.positions[:, 1] - top) < 0.5*a) |
(abs(c.positions[:, 1] - bottom) < 0.5*a))
fix_atoms = FixAtoms(mask=fixed_mask)
if 'fix' in c.arrays:
c.set_array('fix', fixed_mask)
else:
c.new_array('fix', fixed_mask)
print('Fixed %d atoms' % fixed_mask.sum())
c.set_constraint(fix_atoms)
# Stokes damping regions at left and right of slab
stokes = np.zeros(len(c))
x = c.positions[:, 0]
stokes[:] = 0.0
stokes[x < left + 5.0*a] = (1.0 - (x-left)/(5.0*a))[x < left + 5.0*a]
stokes[x > right - 10.0*a] = (1.0 - (right-x)/(10.0*a))[x > right - 10.0*a]
if 'stokes' in c.arrays:
c.set_array('stokes', stokes)
else:
c.new_array('stokes', stokes)
print('Applying Stokes damping to %d atoms' % (stokes != 0.0).sum())
def extend_strip(atoms, a, N, M, vacuum):
x = atoms.positions[:, 0]
left = x.min()
width = x.max() - x.min()
tip_x = atoms.info['tip_x']
if tip_x < left + 0.6*width:
# only need to extend strip when crack gets near end
return False
print('tip_x (%.2f) > left + 0.75*width (%.2f)' % (tip_x, left + 0.75*width))
# extra material for pasting onto end
a = atoms.get_calculator().parameters['a']
extra = triangular_lattice_slab(a, M, N)
# apply uniform strain and append to slab
strain = get_strain(atoms)
extra.center(vacuum, axis=1)
fix = atoms.get_array('fix')
extra.positions[:, 0] += atoms.positions[fix, 0].max() + a/2.0
extra.positions[:, 1] -= extra.positions[:, 1].mean()
extra.positions[:, 1] *= (1.0 + strain)
print('Adding %d atoms' % len(extra))
atoms += extra
atoms.set_constraint([])
discard = atoms.positions[:, 0].argsort()[:len(extra)]
print('Discarding %d atoms' % len(discard))
del atoms[discard]
return True
| 12,355 | 32.215054 | 116 | py |
matscipy | matscipy-master/matscipy/fracture_mechanics/energy_release.py | #
# Copyright 2014-2015, 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from math import pi
import numpy as np
###
def J_integral(a, deformation_gradient, virial, epot, e0, tip_x, tip_y, r1, r2,
mask=None):
"""
Compute the energy release rate from the J-integral. Converts contour
integral into a domain integral.
See: Li, Shih, Needleman, Eng. Fract. Mech. 21, 405 (1985);
Jin, Yuan, J. Nanosci. Nanotech. 5, 2099 (2005)
Domain function is currently fixed: q(r) = (r-r1)/(r2-r1)
Parameters
----------
a : ase.Atoms
Relaxed atomic configuration of the crack.
deformation_gradient : array_like
len(a) x 3x3 array of atomic deformation gradients.
virial : array_like
len(a) x 3x3 array of atomic virials.
epot : float
Potential energy per atom of the cracked configuration.
e0 : float
Reference energy (cohesive energy per atom of the bulk crystal at
equilibrium).
tip_x, tip_y : float
Position of the crack tip.
r1, r2 : float
Volume integration is carried out in region at a distance between r1
and r2 from the crack tip.
mask : array_like
Include only a subset of all atoms into J-integral computation.
Returns
-------
J : float
Value of the J-integral.
"""
if mask is None:
mask = np.ones(len(a), dtype=bool)
# Cell size
sx, sy, sz = a.cell.diagonal()
# Positions
x, y, z = a.positions.T.copy()
x -= tip_x
y -= tip_y
r = np.sqrt(x**2+y**2)
# Derivative of the domain function q
nonzero = np.logical_and(r > r1, r < r2)
# q = (r-r1)/(r2-r1)
if 0:
gradq = np.transpose([
np.where(nonzero,
x/((r2-r1)*r),
np.zeros_like(x)),
np.where(nonzero,
y/((r2-r1)*r),
np.zeros_like(y)),
np.zeros_like(z)])
elif 1:
# q = (1-cos(pi*(r-r1)/(r2-r1)))/2
gradq = np.transpose([
np.where(nonzero,
x/r * pi/(2*(r2-r1)) * np.sin(pi*(r-r1)/(r2-r1)),
np.zeros_like(x)),
np.where(nonzero,
y/r * pi/(2*(r2-r1)) * np.sin(pi*(r-r1)/(r2-r1)),
np.zeros_like(y)),
np.zeros_like(z)])
else:
# q = (2*pi*(r-r1) - (r2-r1)*sin(2*pi*(r-r1)/(r2-r1))) / (2*pi*(r2-r1))
# dq/dq = (1 - cos(2*pi*(r-r1)/(r2-r1))) / (r2-r1)
gradq = np.transpose([
np.where(nonzero,
x/r * (1. - np.cos(2*pi*(r-r1)/(r2-r1))) / (r2-r1),
np.zeros_like(x)),
np.where(nonzero,
y/r * (1. - np.cos(2*pi*(r-r1)/(r2-r1))) / (r2-r1),
np.zeros_like(y)),
np.zeros_like(z)])
# Potential energy
Jpot = ((epot[mask]-e0[mask])*gradq[mask,0]).sum()
# Strain energy
Jstrain = np.einsum('aij,ai,aj->', virial[mask,:,:],
deformation_gradient[mask,:,0], gradq[mask,:])
# Compute J-integral
return (Jpot-Jstrain)/sz
| 3,951 | 32.210084 | 79 | py |
matscipy | matscipy-master/matscipy/io/lammpsdata.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Helper class LAMMPSData to read/write LAMMPS text files."""
from functools import wraps, reduce
from operator import mul
from io import TextIOBase
from os import PathLike
import typing as ts
import numpy as np
from ..molecules import Molecules
FileDescriptor = ts.Union[str, PathLike, TextIOBase]
def read_molecules_from_lammps_data(fd: FileDescriptor, style="full"):
"""Read molecules information from LAMMPS data file.
Parameters
----------
fd :
File descriptor, i.e. file path or text stream.
style :
LAMMPS atomic style.
Returns
-------
Molecules
An object containing the molecule connectivity data.
Notes
-----
For the connectivity data to make sense with atoms ids read by ASE, the
``sort_by_id`` flag of ``read()`` must be set to ``True``.
"""
data = LAMMPSData(style=style)
data.read(fd)
return Molecules(
bonds_connectivity=data["bonds"]["atoms"] - 1,
bonds_types=data["bonds"]["type"],
angles_connectivity=data["angles"]["atoms"][:, (1, 0, 2)] - 1,
angles_types=data["angles"]["type"],
dihedrals_connectivity=data["dihedrals"]["atoms"] - 1,
dihedrals_types=data["dihedrals"]["type"],
)
def check_legal_name(func):
"""Check proper dataset name."""
@wraps(func)
def inner(*args, **kwargs):
data_key = args[1]
legal_names = args[0]._legal_names
if data_key not in legal_names:
raise Exception(
f'Requested data "{args[1]}" is not recognized by LAMMPSData')
return func(*args, **kwargs)
return inner
def column_size(dtype: np.dtype):
"""Compute number of columns from dtype."""
if dtype.fields is None:
return 1
numcols = 0
for tup, _ in dtype.fields.values():
if tup.subdtype is None:
numcols += 1
else:
_, shape = tup.subdtype
numcols += reduce(mul, shape, 1)
return numcols
class LAMMPSData:
"""Main class to interact with LAMMPS text files."""
_data_names = [
# Sections which have types
"atoms",
"bonds",
"angles",
"dihedrals",
# Sections without types
"velocities",
"masses",
]
_header_data_names = _data_names[0:4]
_type_names = {
s[:-1] + ' types': s for s in _header_data_names
}
__headers = {s: s.capitalize() for s in _data_names}
_legal_names = _data_names + list(_type_names.keys())
_write_formats = {
"bonds": "%d %d %d",
"angles": "%d %d %d %d",
"dihedrals": "%d %d %d %d %d",
"velocities": "%.18e %.18e %.18e",
"masses": "%.18e",
}
_atom_write_formats = {
"atomic": "%d %.18e %.18e %.18e",
"bond": "%d %d %.18e %.18e %.18e",
"angle": "%d %d %.18e %.18e %.18e",
"charge": "%d %.18e %.18e %.18e %.18e",
"full": "%d %d %.18e %.18e %.18e %.18e",
}
_dtypes = {
"bonds": np.dtype([('type', np.int32), ('atoms', np.int32, 2)]),
"angles": np.dtype([('type', np.int32), ('atoms', np.int32, 3)]),
"dihedrals": np.dtype([('type', np.int32), ('atoms', np.int32, 4)]),
"velocities": np.dtype([('vel', np.double, 3)]),
"masses": np.double,
}
_atom_dtypes = {
"atomic": np.dtype([('type', np.int32), ('pos', np.double, 3)]),
"bond": np.dtype([('mol', np.int32),
('type', np.int32),
('pos', np.double, 3)]),
"angle": np.dtype([('mol', np.int32),
('type', np.int32),
('pos', np.double, 3)]),
"charge": np.dtype([('type', np.int32),
('charge', np.double),
('pos', np.double, 3)]),
"full": np.dtype([('mol', np.int32),
('type', np.int32),
('charge', np.double),
('pos', np.double, 3)]),
}
def __init__(self, style="atomic", image_flags=False):
"""Initialize data object with atom style."""
self.style = style
# Add a flags field to atoms array
if image_flags:
for k, dtype in self._atom_dtypes.items():
dtype_dict = dict(dtype.fields)
dtype_dict['image_flags'] = (np.dtype("3<i8"), dtype.itemsize)
self._atom_dtypes[k] = np.dtype(dtype_dict)
self._atom_write_formats[k] += " %d %d %d"
self._dtypes['atoms'] = self._atom_dtypes[style]
self._write_formats['atoms'] = self._atom_write_formats[style]
self.__data = {k: np.array([], dtype=self._dtypes[k])
for k in self._data_names}
self.ranges = []
@check_legal_name
def __getitem__(self, name: str):
"""Get data component."""
if name in self._type_names:
name = self._type_names[name]
return self.__data[name]['type']
elif name in self._data_names:
return self.__data[name]
@check_legal_name
def __setitem__(self, name: str, value: ts.Any):
"""Set data component."""
if name in self._type_names:
name = self._type_names[name]
self.__data[name].resize(len(value))
self.__data[name]['type'] = value
elif name in self._data_names:
self.__data[name].resize(len(value))
data = self.__data[name]
try:
data[data.dtype.names[-1]] = np.array(value)
except TypeError:
data[:] = np.array(value)
def write(self, fd: FileDescriptor):
"""Write data to text file or stream."""
if isinstance(fd, (str, PathLike)):
with open(fd, 'w') as stream:
return self.write(stream)
if not isinstance(fd, TextIOBase):
raise TypeError("File should be path or text stream.")
def null_filter(measure, generator):
return filter(lambda t: t[1] != 0,
map(lambda x: (x[0], measure(x[1])), generator))
# Writer header
fd.write('\n')
# Write data numbers
for key, value in null_filter(len, [
(k, self[k])
for k in self._header_data_names
]):
fd.write(f'{value} {key}\n')
# Write unique type numbers (for non-zero types)
for key, value in null_filter(
lambda x: len(set(x)),
[(k, self[k]) for k in self._type_names]
):
fd.write(f'{value} {key}\n')
fd.write('\n\n')
# Write system size
for span, label in zip(self.ranges, "xyz"):
fd.write('{0} {1} {2}lo {2}hi\n'.format(*span, label))
fd.write('\n')
# Write masses
fd.write('Masses\n\n')
for i, m in enumerate(self['masses']):
fd.write(f'{i+1} {m}\n')
fd.write('\n')
# Write data categories
for label, header in self.__headers.items():
if not len(self[label]) or label == "masses":
continue
if label == "atoms":
fd.write(header + f' # {self.style}\n\n')
else:
fd.write(header + '\n\n')
for i, line in enumerate(self[label]):
flatline = []
for component in line:
if isinstance(component, np.ndarray):
flatline += component.tolist()
else:
flatline.append(component)
fd.write('%d ' % (i+1)
+ self._write_formats[label] % tuple(flatline))
fd.write('\n')
fd.write('\n')
def read(self, fd: FileDescriptor):
"""Read data from text file or stream."""
if isinstance(fd, (str, PathLike)):
with open(fd, 'r') as stream:
return self.read(stream)
if not isinstance(fd, TextIOBase):
raise TypeError("File should be path or text stream.")
def header_info(fd, names):
counts = {}
box = [[], [], []]
bounds = ['xlo xhi', 'ylo yhi', 'zlo zhi']
has_bounds = False
for line in fd:
if line == "\n" and has_bounds:
break
for label in filter(lambda x: x in line, names):
counts[label] = int(line.split()[0])
for i, label in filter(lambda x: x[1] in line,
enumerate(bounds)):
box[i] = [
float(x) for x in
line.replace(label, "").strip().split()
]
has_bounds = True
return counts, box
counts, self.ranges = header_info(
fd, self._data_names + list(self._type_names.keys()))
data_counts = {k: v for k, v in counts.items()
if k in self._data_names}
type_counts = {k: v for k, v in counts.items()
if k in self._type_names}
# If velocities are present
data_counts['velocities'] = data_counts['atoms']
for linum, line in enumerate(fd):
if 'Masses' in line:
ntypes = type_counts['atom types']
self['masses'].resize(ntypes)
self['masses'][:] = \
np.genfromtxt(fd, skip_header=1,
max_rows=ntypes, usecols=(1,))
else:
for label in self._data_names:
if self.__headers[label] in line:
nlines = data_counts[label]
self[label].resize(nlines)
dtype = self[label].dtype
raw_dtype = np.dtype([('num', np.int32)] + [
(k, v[0]) for k, v in dtype.fields.items()
])
raw_data = \
np.genfromtxt(fd,
skip_header=1,
max_rows=nlines,
dtype=raw_dtype)
# Correct for 1-numbering
raw_data['num'] -= 1
self[label][raw_data['num']] = \
raw_data[list(dtype.fields)]
| 11,364 | 32.724036 | 78 | py |
matscipy | matscipy-master/matscipy/io/metis.py | #
# Copyright 2014-2016, 2021 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def save_metis(fn, a, i, j):
"""
Save neighbour list as the METIS graph file format.
See here: http://glaros.dtc.umn.edu/gkhome/views/metis
Parameters
----------
fn : str
File name.
a : Atoms
Atoms object.
i, j : array_like
Neighbour list.
"""
f = open(fn, 'w')
# Output number of vertices and number of edges
print('{} {}'.format(len(a), len(i)//2), file=f)
s = ''
lasti = i[0]
for _i, _j in zip(i, j):
if _i != lasti:
print(s.strip(), file=f)
s = ''
s += '{} '.format(_j+1)
lasti = _i
print(s.strip(), file=f)
f.close()
| 1,511 | 28.076923 | 71 | py |
matscipy | matscipy-master/matscipy/io/__init__.py | #
# Copyright 2014-2016, 2021 Lars Pastewka (U. Freiburg)
# 2015-2016 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .metis import save_metis
from .tbl import savetbl, loadtbl | 948 | 38.541667 | 71 | py |
matscipy | matscipy-master/matscipy/io/tbl.py | #
# Copyright 2014-2017, 2021 Lars Pastewka (U. Freiburg)
# 2015 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import io
import re
import numpy as np
###
def savetbl(fn, **kwargs):
"""
Save tabulated data and write column header strings.
Example:
savetbl('file.dat', time=time, strain=strain, energy=energy)
Parameters
----------
fn : str
Name of file to write to.
kwargs : dict
Keyword argument pass data and column header names.
"""
sorted_kwargs = sorted(kwargs.items(), key=lambda t: t[0])
header = ''
for i, (x, y) in enumerate(sorted_kwargs):
header = '{0} {1}:{2}'.format(header, i + 1, x)
data = np.transpose([y for x, y in sorted_kwargs])
fmt = ['%s' if x.dtype.kind == 'U' else '%.18e' for x in data.T]
np.savetxt(fn, data, header=header, fmt=fmt)
def loadtbl(fn, usecols=None, types=None, fromfile=False, **kwargs):
"""
Load tabulated data from column header strings.
Example data file:
# time strain energy
1.0 0.01 283
2.0 0.02 398
...
strain, energy = loadtbl('file.dat', usecols=['strain', 'energy'])
Parameters
----------
fn : str
Name of file to load.
usecols : list of strings
List of column names.
types : dictionary
Types per column.
fromfile : bool
Use numpy.fromfile instead of numpy.loadtxt if set to True. Can be
faster in some circumstances.
Returns
-------
data : tuple of arrays
Return tuple of array with data for each colume in usecols if
usecols specified. For usecols=None, return dictionary with header
keys and arrays as data entries.
"""
f = open(fn)
line = f.readline()
column_labels = None
while line.startswith('#'):
line = line[1:].strip()
column_labels = [s.strip() for s in re.split(r'[\s,]+', line)]
pos = f.tell()
line = f.readline()
f.seek(pos)
if column_labels is None:
f.close()
raise RuntimeError("No header found in file '{}'".format(fn))
sep_i = [x.find(':') for x in column_labels]
column_labels = [s[i + 1:] if i >= 0 else s for s, i
in zip(column_labels, sep_i)]
if fromfile:
if types is not None:
raise ValueError('`types` argument cannot be used with fromfile=True')
data = np.fromfile(f, sep=' ')
f.close()
data.shape = (-1, len(column_labels))
if usecols is None:
return dict((s, d) for s, d in zip(column_labels, data.T))
else:
return [data[:, column_labels.index(s)] for s in usecols]
else:
raw_data = f.read()
f.close()
if usecols is None:
if types is not None:
raise ValueError('`types` argument can only be used when specifying `usecols`')
data = np.loadtxt(io.StringIO(raw_data), unpack=True, **kwargs)
return dict((s, d) for s, d in zip(column_labels, data))
else:
if types is None:
types = {}
return (np.loadtxt(io.StringIO(raw_data),
usecols=[column_labels.index(s)],
dtype=types[s] if s in types else np.float64,
unpack=True,
**kwargs)
for s in usecols)
| 4,171 | 31.850394 | 95 | py |
matscipy | matscipy-master/matscipy/io/opls.py | #
# Copyright 2016-2017, 2020 Andreas Klemenz (Fraunhofer IWM)
# 2020 Thomas Reichenbach (Fraunhofer IWM)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import copy
import sys
import re
import distutils.version
import numpy as np
import ase
import ase.data
import ase.io
import ase.io.lammpsrun
import ase.calculators.lammpsrun
import matscipy.neighbours
import matscipy.opls
try:
import ase.version
ase_version_str = ase.version.version
except:
ase_version_str = ase.__version__
def read_extended_xyz(fileobj):
"""
Read extended xyz file with labeled atoms. The number of atoms should
be given in the first line, the second line contains the cell
dimensions and the definition of the columns. The file should contain
the following columns: element (1 or 2 characters), x(float), y(float),
z (float), molecule id (int), name (1 or 2 characters). A full
description of the extended xyz format can be found for example in the
ASE documentation. An example for a file defining an H2 molecule is
given below.
====== Example ======
2
Lattice="10.0 0.0 0.0 0.0 10.0 0.0 0.0 0.0 10.0" Properties=species:S:1:pos:R:3:molid:I:1:type:S:1
H 4.5 5.0 5.0 1 H1
H 5.5 5.0 5.0 1 H1
====== End of example ======
Parameters
----------
filename : str
Returns
-------
opls_struct : matscipy.opls.OPLSStructure
"""
atoms = ase.io.read(fileobj)
opls_struct = matscipy.opls.OPLSStructure(atoms)
opls_struct.arrays = atoms.arrays
types = opls_struct.get_array('type')
opls_struct.types = np.unique(types)
tags = np.zeros(len(opls_struct), dtype=int)
for it, type in enumerate(opls_struct.types):
tags[types == type] = it
opls_struct.set_tags(tags)
return opls_struct
def read_block(filename, name):
"""
Read a named data block from a parameter file for a non-reactive
potential. Blocks begin with '# name' and are terminated by empty
lines. More information and an example can be found in the
documentation of the 'read_parameter_file' function.
Parameters
----------
filename : str
name : str
Returns
-------
data : dict
Name-Value pairs. Each value is a list of arbitrary length.
Raises
------
RuntimeError
If data block 'name' is not found in the file.
"""
data = {}
if isinstance(filename, str):
with open(filename, 'r') as fileobj:
block = False
for line in fileobj.readlines():
line = line.split()
# find data block
if len(line) >= 2:
if line[1] == name:
block = True
# end of data block
if block == True and len(line) == 0:
block = False
# read data
if block:
if line[0][0] == '#':
continue
else:
symbol = line[0]
data[symbol] = []
for word in line[1:]:
if word[0] == '#':
break
else:
data[symbol].append(float(word))
if len(data[symbol]) == 1:
data[symbol] = data[symbol][0]
if len(data) == 0:
raise RuntimeError('Data block \"%s\" not found in file \"%s\"' % (name, filename))
return data
def read_cutoffs(filename):
"""
Read the cutoffs for construction of a non-reactive system from a
file. Comments in the file begin with '#', the file should be
structured like this:
====== Example ======
# Cutoffs
C1-C1 1.23 # name, cutoff (A)
H1-H1 4.56 # name, cutoff (A)
C1-H1 7.89 # name, cutoff (A)
====== End of example ======
Parameters
----------
filename : str
Returns
-------
cutoffs : matscipy.opls.CutoffList
"""
cutoffs = matscipy.opls.CutoffList(read_block(filename, 'Cutoffs'))
return cutoffs
def read_parameter_file(filename):
"""
Read the parameters of a non-reactive potential from a file. An example
for the file structure is given below. The blocks are separated by empty
lines, comments begin with '#'. For more information about the
potentials, refer to the documentation of the LAMMPS commands
bond_style harmonic, angle_style harmonic, dihedral_style harmonic.
The default global cutoffs for Lennard-Jones and Coulomb interactions
are 10.0 and 7.4 A. They can be overridden with the optional
'Cutoffs-LJ-Coulomb' block. By default, geometric mixing is applied
between Lennard-Jones parameters of different particle types and the
global cutoff is used for all pairs. This behavior can be overridden
using the optional 'LJ-pairs' block.
====== Example ======
# Element
C1 0.001 3.5 -0.01 # name, LJ-epsilon (eV), LJ-sigma (A), charge (e)
H1 0.001 2.5 0.01 # name, LJ-epsilon (eV), LJ-sigma (A), charge (e)
# Cutoffs-LJ-Coulomb (this block is optional)
LJ 10.0 # distance (A)
C 10.0 # distance (A)
# LJ-pairs (this block is optional)
C1-H1 0.002 2.1 12.0 # name, epsilon (eV), sigma (A), cutoff (A)
# Bonds
C1-C1 10.0 1.0 # name, spring constant*2 (eV/A**2), distance (A)
# Angles
H1-C1-C1 1.0 100.0 # name, spring constant*2 (eV), equilibrium angle
# Dihedrals
H1-C1-C1-H1 0.0 0.0 0.01 0.0 # name, energy (eV), energy (eV), ...
# Cutoffs
C1-C1 1.85 # name, cutoff (A)
C1-H1 1.15 # name, cutoff (A)
====== End of example ======
Parameters
----------
filename : str
Returns
-------
cutoffs : matscipy.opls.CutoffList
ljq : matscipy.opls.LJQData
bonds : matscipy.opls.BondData
angles : matscipy.opls.AnglesData
dihedrals : matscipy.opls.DihedralsData
"""
ljq = matscipy.opls.LJQData(read_block(filename, 'Element'))
try:
ljq_cut = read_block(filename, 'Cutoffs-LJ-Coulomb')
ljq.lj_cutoff = ljq_cut['LJ']
ljq.c_cutoff = ljq_cut['C']
except:
pass
try:
ljq.lj_pairs = read_block(filename, 'LJ-pairs')
except:
pass
bonds = matscipy.opls.BondData(read_block(filename, 'Bonds'))
angles = matscipy.opls.AnglesData(read_block(filename, 'Angles'))
dihedrals = matscipy.opls.DihedralsData(read_block(filename, 'Dihedrals'))
cutoffs = matscipy.opls.CutoffList(read_block(filename, 'Cutoffs'))
return cutoffs, ljq, bonds, angles, dihedrals
def write_lammps(prefix, atoms):
"""
Convenience function. The functions 'write_lammps_in',
'write_lammps_atoms' and 'write_lammps_definitions' are usually
called at the same time. This function combines them, filenames
will be 'prefix.in', 'prefix.atoms' and 'prefix.opls'.
Parameters
----------
prefix : str
atoms : matscipy.opls.OPLSStructure
"""
write_lammps_in(prefix)
write_lammps_atoms(prefix, atoms)
write_lammps_definitions(prefix, atoms)
def write_lammps_in(prefix):
"""
Writes a simple LAMMPS input script for a structure optimization using
a non-reactive potential. The name of the resulting script is
'prefix.in', while the atomic structure is defined in 'prefix.atoms'
and the definition of the atomic interaction in 'prefix.opls'.
Parameters
----------
prefix : str
"""
if isinstance(prefix, str):
with open(prefix + '.in', 'w') as fileobj:
fileobj.write('# LAMMPS relaxation\n\n')
fileobj.write('units metal\n')
fileobj.write('atom_style full\n')
fileobj.write('boundary p p p\n\n')
fileobj.write('read_data %s.atoms\n' % (prefix))
fileobj.write('include %s.opls\n' % (prefix))
fileobj.write('kspace_style pppm 1e-5\n\n')
fileobj.write('neighbor 1.0 bin\n')
fileobj.write('neigh_modify delay 0 every 1 check yes\n\n')
fileobj.write('thermo 1000\n')
fileobj.write('thermo_style custom step temp press cpu pxx pyy pzz pxy pxz pyz ke pe etotal vol lx ly lz atoms\n\n')
fileobj.write('dump 1 all xyz 1000 dump_relax.xyz\n')
fileobj.write('dump_modify 1 sort id\n\n')
fileobj.write('restart 100000 test_relax\n\n')
fileobj.write('min_style fire\n')
fileobj.write('minimize 1.0e-14 1.0e-5 100000 100000\n')
def write_lammps_atoms(prefix, atoms, units='metal'):
"""
Write atoms input for LAMMPS. Filename will be 'prefix.atoms'.
Parameters
----------
prefix : str
atoms : matscipy.opls.OPLSStructure
units : str
"""
if isinstance(prefix, str):
with open(prefix + '.atoms', 'w') as fileobj:
# header
fileobj.write(fileobj.name + ' (by write_lammps_atoms)\n\n')
fileobj.write('%d atoms\n' % (len(atoms)))
fileobj.write('%d atom types\n' % (len(atoms.types)))
blist = atoms.bond_list
if len(blist):
btypes = atoms.bond_types
fileobj.write('%d bonds\n' % (len(blist)))
fileobj.write('%d bond types\n' % (len(btypes)))
alist = atoms.ang_list
if len(alist):
atypes = atoms.ang_types
fileobj.write('%d angles\n' % (len(alist)))
fileobj.write('%d angle types\n' % (len(atypes)))
dlist = atoms.dih_list
if len(dlist):
dtypes = atoms.dih_types
fileobj.write('%d dihedrals\n' % (len(dlist)))
fileobj.write('%d dihedral types\n' % (len(dtypes)))
# cell
if distutils.version.LooseVersion(ase_version_str) > distutils.version.LooseVersion('3.11.0'):
p = ase.calculators.lammpsrun.Prism(atoms.get_cell())
else:
p = ase.calculators.lammpsrun.prism(atoms.get_cell())
xhi, yhi, zhi, xy, xz, yz = ase.calculators.lammpsrun.convert(
p.get_lammps_prism(), 'distance', 'ASE', units
)
fileobj.write('\n0.0 %f xlo xhi\n' % xhi)
fileobj.write('0.0 %f ylo yhi\n' % yhi)
fileobj.write('0.0 %f zlo zhi\n' % zhi)
# write tilt factors for non-orthogonal cells
if np.abs(xy) > 1e-10 or np.abs(xz) > 1e-10 or np.abs(yz) > 1e-10:
fileobj.write('\n%f %f %f xy xz yz\n' % (xy, xz, yz))
# atoms
fileobj.write('\nAtoms\n\n')
tags = atoms.get_tags()
types = atoms.types
if atoms.has('molid'):
molid = atoms.get_array('molid')
else:
molid = [1] * len(atoms)
pos = ase.calculators.lammpsrun.convert(atoms.get_positions(), 'distance', 'ASE', units)
if distutils.version.LooseVersion(ase_version_str) > distutils.version.LooseVersion('3.17.0'):
positions_lammps_str = p.vector_to_lammps(pos).astype(str)
elif distutils.version.LooseVersion(ase_version_str) > distutils.version.LooseVersion('3.13.0'):
positions_lammps_str = p.positions_to_lammps_strs(pos)
else:
positions_lammps_str = map(p.pos_to_lammps_str, pos)
for i, r in enumerate(positions_lammps_str):
q = ase.calculators.lammpsrun.convert(atoms.atom_data[types[tags[i]]][2], 'charge', 'ASE', units)
fileobj.write('%6d %3d %3d %s %s %s %s' % ((i + 1, molid[i],
tags[i] + 1,
q)
+ tuple(r)))
fileobj.write(' # ' + atoms.types[tags[i]] + '\n')
# velocities
velocities = ase.calculators.lammpsrun.convert(atoms.get_velocities(), 'velocity', 'ASE', units)
if velocities is not None:
fileobj.write('\nVelocities\n\n')
for i, v in enumerate(velocities):
fileobj.write('%6d %g %g %g\n' %
(i + 1, v[0], v[1], v[2]))
# masses
masses = ase.calculators.lammpsrun.convert(atoms.get_masses(), 'mass', 'ASE', units)
tags = atoms.get_tags()
fileobj.write('\nMasses\n\n')
for i, type, tag in zip(range(len(atoms.types)), atoms.types, np.unique(tags)):
fileobj.write('%6d %g # %s\n' %
(i + 1,
masses[tags == tag][0],
type))
# bonds
if len(blist):
fileobj.write('\nBonds\n\n')
for ib, bvals in enumerate(blist):
fileobj.write('%8d %6d %6d %6d ' %
(ib + 1, bvals[0] + 1, bvals[1] + 1,
bvals[2] + 1))
try:
fileobj.write('# ' + btypes[bvals[0]])
except:
pass
fileobj.write('\n')
# angles
if len(alist):
fileobj.write('\nAngles\n\n')
for ia, avals in enumerate(alist):
fileobj.write('%8d %6d %6d %6d %6d ' %
(ia + 1, avals[0] + 1,
avals[1] + 1, avals[2] + 1, avals[3] + 1))
try:
fileobj.write('# ' + atypes[avals[0]])
except:
pass
fileobj.write('\n')
# dihedrals
if len(dlist):
fileobj.write('\nDihedrals\n\n')
for i, dvals in enumerate(dlist):
fileobj.write('%8d %6d %6d %6d %6d %6d ' %
(i + 1, dvals[0] + 1,
dvals[1] + 1, dvals[2] + 1,
dvals[3] + 1, dvals[4] + 1))
try:
fileobj.write('# ' + dtypes[dvals[0]])
except:
pass
fileobj.write('\n')
def write_lammps_definitions(prefix, atoms):
"""
Write force field definitions for LAMMPS.
Filename will be 'prefix.opls'.
Parameters
----------
prefix : str
atoms : matscipy.opls.OPLSStructure
"""
if isinstance(prefix, str):
with open(prefix + '.opls', 'w') as fileobj:
fileobj.write('# OPLS potential\n')
fileobj.write('# write_lammps ' +
str(time.asctime(
time.localtime(time.time()))))
# bonds
if len(atoms.bond_types):
fileobj.write('\n# bonds\n')
fileobj.write('bond_style harmonic\n')
for ib, btype in enumerate(atoms.bond_types):
fileobj.write('bond_coeff %6d' % (ib + 1))
itype, jtype = btype.split('-')
name, values = atoms.bonds.name_value(itype, jtype)
for value in values:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + name + '\n')
# angles
if len(atoms.ang_types):
fileobj.write('\n# angles\n')
fileobj.write('angle_style harmonic\n')
for ia, atype in enumerate(atoms.ang_types):
fileobj.write('angle_coeff %6d' % (ia + 1))
itype, jtype, ktype = atype.split('-')
name, values = atoms.angles.name_value(itype, jtype, ktype)
for value in values:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + name + '\n')
# dihedrals
if len(atoms.dih_types):
fileobj.write('\n# dihedrals\n')
fileobj.write('dihedral_style opls\n')
for id, dtype in enumerate(atoms.dih_types):
fileobj.write('dihedral_coeff %6d' % (id + 1))
itype, jtype, ktype, ltype = dtype.split('-')
name, values = atoms.dihedrals.name_value(itype, jtype, ktype, ltype)
for value in values:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + name + '\n')
# Lennard Jones settings
fileobj.write('\n# L-J parameters\n')
fileobj.write('pair_style lj/cut/coul/long %10.8f %10.8f\n' %
(atoms.atom_data.lj_cutoff, atoms.atom_data.c_cutoff))
fileobj.write('special_bonds lj/coul 0.0 0.0 0.5\n')
for ia, atype in enumerate(atoms.types):
for ib, btype in enumerate(atoms.types):
if len(atype) < 2:
atype = atype + ' '
if len(btype) < 2:
btype = btype + ' '
pair = atype + '-' + btype
if pair in atoms.atom_data.lj_pairs:
if ia < ib:
fileobj.write('pair_coeff %3d %3d' % (ia + 1, ib + 1))
else:
fileobj.write('pair_coeff %3d %3d' % (ib + 1, ia + 1))
for value in atoms.atom_data.lj_pairs[pair]:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + pair + '\n')
elif atype == btype:
fileobj.write('pair_coeff %3d %3d' % (ia + 1, ib + 1))
for value in atoms.atom_data[atype][:2]:
fileobj.write(' ' + str(value))
fileobj.write(' # ' + atype + '\n')
fileobj.write('pair_modify shift yes mix geometric\n')
# Charges
fileobj.write('\n# charges\n')
for ia, atype in enumerate(atoms.types):
if len(atype) < 2:
atype = atype + ' '
fileobj.write('set type ' + str(ia + 1))
fileobj.write(' charge ' + str(atoms.atom_data[atype][2]))
fileobj.write(' # ' + atype + '\n')
def read_lammps_definitions(filename):
"""
Reads force field definitions from a LAMMPS parameter file and
stores the parameters in the LJQData, BondData, AnglesData and
DihedralsData objects. The 'number' of the particles, pairs, ...
for the corresponding interaction parameters is not included in
these objects and is output in dicts. Note that there is an
offset of one between LAMMPS and python numbering.
====== Example ======
Parameter file:
bond_style harmonic
bond_coeff 1 1.2 3.4 # AA-AA
bond_coeff 2 5.6 7.8 # AA-BB
Returned dictionary:
bond_type_index[0] = 'AA-AA'
bond_type_index[1] = 'AA-BB'
====== End of example ======
Parameters
----------
filename : str
Returns
-------
ljq_data : matscipy.opls.LJQData
bond_data : matscipy.opls.BondData
ang_data : matscipy.opls.AnglesData
dih_data : matscipy.opls.DihedralsData
particle_type_index : dict
bond_type_index : dict
ang_type_index : dict
dih_type_index : dict
"""
with open(filename, 'r') as fileobj:
bond_nvh = {}
ang_nvh = {}
dih_nvh = {}
particle_type_index = {}
bond_type_index = {}
ang_type_index = {}
dih_type_index = {}
ljq_data = matscipy.opls.LJQData({})
for line in fileobj.readlines():
re_lj_cut = re.match('^pair_style\s+lj/cut/coul/long\s+(\d+\.?\d*)\s+(\d+\.?\d*)$', line)
if re_lj_cut:
ljq_data.lj_cutoff = float(re_lj_cut.groups()[0])
ljq_data.c_cutoff = float(re_lj_cut.groups()[1])
re_pc = re.match('^pair_coeff\s+(\d+)\s+(\d+)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+\#\s+(\S+)$', line)
re_pc_cut = re.match('^pair_coeff\s+(\d+)\s+(\d+)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(\d+\.?\d*)\s+\#\s+(\S+)$', line)
if re_pc_cut:
lj_pair_type = re_pc_cut.groups()[5]
lj_pair_p1 = float(re_pc_cut.groups()[2])
lj_pair_p2 = float(re_pc_cut.groups()[3])
lj_pair_p3 = float(re_pc_cut.groups()[4])
ljq_data.lj_pairs[lj_pair_type] = [lj_pair_p1, lj_pair_p2, lj_pair_p3]
t1, t2 = lj_pair_type.split('-')
if t1 == t2 and t1 not in ljq_data:
ljq_data[t1] = [lj_pair_p1, lj_pair_p2]
if re_pc:
lj_type = re_pc.groups()[4]
lj_p1 = float(re_pc.groups()[2])
lj_p2 = float(re_pc.groups()[3])
if not lj_type in ljq_data:
ljq_data[lj_type] = [lj_p1, lj_p2]
else:
ljq_data[lj_type] = [lj_p1, lj_p2, ljq_data[lj_type][-1]]
re_q = re.match('^set\s+type\s+(\d+)\s+charge\s+(-?\d+\.?\d*)\s+\#\s+(\S+)$', line)
if re_q:
q_type = re_q.groups()[2]
q_index = int(re_q.groups()[0]) - 1
q_p1 = float(re_q.groups()[1])
if not q_type in ljq_data:
ljq_data[q_type] = [q_p1]
else:
ljq_data[q_type] = [ljq_data[q_type][0], ljq_data[q_type][1], q_p1]
particle_type_index[q_index] = q_type
re_bond_coeff = re.match('^bond_coeff\s+(\d+)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+\#\s+(\S+)$', line)
if re_bond_coeff:
bond_type = re_bond_coeff.groups()[3]
bond_index = int(re_bond_coeff.groups()[0]) - 1
bond_p1 = float(re_bond_coeff.groups()[1])
bond_p2 = float(re_bond_coeff.groups()[2])
bond_nvh[bond_type] = [bond_p1, bond_p2]
bond_type_index[bond_index] = bond_type
re_ang_coeff = re.match('^angle_coeff\s+(\d+)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+\#\s+(\S+)$', line)
if re_ang_coeff:
ang_type = re_ang_coeff.groups()[3]
ang_index = int(re_ang_coeff.groups()[0]) - 1
ang_p1 = float(re_ang_coeff.groups()[1])
ang_p2 = float(re_ang_coeff.groups()[2])
ang_nvh[ang_type] = [ang_p1, ang_p2]
ang_type_index[ang_index] = ang_type
re_dih_coeff = re.match('^dihedral_coeff\s+(\d+)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+\#\s+(\S+)$', line)
if re_dih_coeff:
dih_type = re_dih_coeff.groups()[5]
dih_index = int(re_dih_coeff.groups()[0]) - 1
dih_p1 = float(re_dih_coeff.groups()[1])
dih_p2 = float(re_dih_coeff.groups()[2])
dih_p3 = float(re_dih_coeff.groups()[3])
dih_p4 = float(re_dih_coeff.groups()[4])
dih_nvh[dih_type] = [dih_p1, dih_p2, dih_p3, dih_p4]
dih_type_index[dih_index] = dih_type
bond_data = matscipy.opls.BondData(bond_nvh)
ang_data = matscipy.opls.AnglesData(ang_nvh)
dih_data = matscipy.opls.DihedralsData(dih_nvh)
return (ljq_data, bond_data, ang_data, dih_data,
particle_type_index, bond_type_index, ang_type_index, dih_type_index)
def read_lammps_data(filename, filename_lammps_params=None):
"""
Read positions, bonds, angles and dihedrals from a LAMMPS file.
Optionally, a LAMMPS parameter file can be specified to restore
all interactions from a preceding simulation.
Parameters
----------
filename : str
filename_lammps_params : str
Returns
-------
matscipy.opls.OPLSStructure
"""
atoms = ase.io.read(filename, format='lammps-data', Z_of_type=None,
style='full', sort_by_id=False, units='metal')
tags = copy.deepcopy(atoms.numbers)
# try to guess the atomic numbers from the particle masses
atomic_numbers = np.empty(len(atoms), dtype=int)
ams = ase.data.atomic_masses[:]
ams[np.isnan(ams)] = 0
for i, mass in enumerate(atoms.get_masses()):
m2 = (ams - mass)**2
atomic_numbers[i] = m2.argmin()
atoms.numbers = atomic_numbers
opls_struct = matscipy.opls.OPLSStructure(atoms)
opls_struct.charges = opls_struct.get_array('initial_charges')
opls_struct.set_tags(tags)
opls_struct.set_array('molid', atoms.get_array('mol-id'))
if filename_lammps_params:
if 'bonds' in atoms.arrays:
bond_list = []
for bond_i, bond in enumerate(atoms.get_array('bonds')):
for item in bond.split(','):
re_bond = re.match('(\d+)\((\d+)\)', item)
if re_bond:
bond_j = int(re_bond.groups()[0])
bond_type_num = int(re_bond.groups()[1])-1
bond_list.append([bond_type_num, bond_i, bond_j])
opls_struct.bond_list = np.array(bond_list)
else:
opls_struct.bond_list = []
if 'angles' in atoms.arrays:
ang_list = []
for ang_j, ang in enumerate(atoms.get_array('angles')):
for item in ang.split(','):
re_ang = re.match('(\d+)-(\d+)\((\d+)\)', item)
if re_ang:
ang_i = int(re_ang.groups()[0])
ang_k = int(re_ang.groups()[1])
ang_type_num = int(re_ang.groups()[2])-1
ang_list.append([ang_type_num, ang_i, ang_j, ang_k])
opls_struct.ang_list = np.array(ang_list)
else:
opls_struct.ang_list = []
if 'dihedrals' in atoms.arrays:
dih_list = []
for dih_i, dih in enumerate(atoms.get_array('dihedrals')):
for item in dih.split(','):
re_dih = re.match('(\d+)-(\d+)-(\d+)\((\d+)\)', item)
if re_dih:
dih_j = int(re_dih.groups()[0])
dih_k = int(re_dih.groups()[1])
dih_l = int(re_dih.groups()[2])
dih_type_num = int(re_dih.groups()[3])-1
dih_list.append([dih_type_num, dih_i, dih_j, dih_k, dih_l])
opls_struct.dih_list = np.array(dih_list)
else:
opls_struct.dih_list = []
# further settings require data in 'filename_lammps_params'
lammps_params = read_lammps_definitions(filename_lammps_params)
opls_struct.set_atom_data(lammps_params[0])
part_type_index = lammps_params[4]
part_types = np.full(len(opls_struct), None)
for i, part_type in enumerate(atoms.get_array('type') - 1):
part_types[i] = part_type_index[part_type]
opls_struct.set_types(part_types)
if 'bonds' in atoms.arrays:
opls_struct.bonds = lammps_params[1]
bond_type_index = lammps_params[5]
bond_types = []
for bond_type_num in np.unique(opls_struct.bond_list.T[0]):
bond_types.append(bond_type_index[bond_type_num])
opls_struct.bond_types = bond_types
else:
opls_struct.bond_types = []
if 'angles' in atoms.arrays:
opls_struct.angles = lammps_params[2]
ang_type_index = lammps_params[6]
ang_types = []
for ang_type_num in np.unique(opls_struct.ang_list.T[0]):
ang_types.append(ang_type_index[ang_type_num])
opls_struct.ang_types = ang_types
else:
opls_struct.ang_types = []
if 'dihedrals' in atoms.arrays:
opls_struct.dihedrals = lammps_params[3]
dih_type_index = lammps_params[7]
dih_types = []
for dih_type_num in np.unique(opls_struct.dih_list.T[0]):
dih_types.append(dih_type_index[dih_type_num])
opls_struct.dih_types = dih_types
else:
opls_struct.dih_types = []
return opls_struct
def update_from_lammps_dump(atoms, filename, check=True):
"""
Read simulation cell, positions and velocities from a LAMMPS
dump file and use them to update an existing configuration.
Parameters
----------
atoms : matscipy.opls.OPLSStructure
filename : str
check : bool
Returns
-------
matscipy.opls.OPLSStructure
"""
atoms_dump = ase.io.lammpsrun.read_lammps_dump(filename)
if len(atoms_dump) != len(atoms):
raise RuntimeError('Structure in ' + filename +
' has wrong length: %d != %d' %
(len(atoms_dump), len(atoms)))
if check:
for a, b in zip(atoms, atoms_dump):
# check that the atom types match
if not (a.tag + 1 == b.number):
raise RuntimeError('Atoms index %d are of different '
'type (%d != %d)'
% (a.index, a.tag + 1, b.number))
atoms.set_cell(atoms_dump.get_cell())
atoms.set_positions(atoms_dump.get_positions())
if atoms_dump.get_velocities() is not None:
atoms.set_velocities(atoms_dump.get_velocities())
return atoms
| 30,688 | 36.06401 | 146 | py |
matscipy | matscipy-master/matscipy/calculators/supercell_calculator.py | #
# Copyright 2017 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Run calculation on a supercell of the atomic structure.
"""
import numpy as np
import ase
from ase.calculators.calculator import Calculator
###
class SupercellCalculator(Calculator):
implemented_properties = ['energy', 'stress', 'forces']
default_parameters = {}
name = 'EAM'
def __init__(self, calc, supercell):
Calculator.__init__(self)
self.calc = calc
self.supercell = supercell
def calculate(self, atoms, properties, system_changes):
Calculator.calculate(self, atoms, properties, system_changes)
atoms = self.atoms.copy()
atoms.set_constraint(None)
atoms *= self.supercell
atoms.set_calculator(self.calc)
energy = atoms.get_potential_energy()
stress = atoms.get_stress()
forces = atoms.get_forces()
self.results = {'energy': energy/np.prod(self.supercell),
'stress': stress,
'forces': forces[:len(self.atoms)]}
| 1,783 | 30.298246 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/fitting.py | #
# Copyright 2015, 2021 Lars Pastewka (U. Freiburg)
# 2015-2016 Adrien Gola (KIT)
# 2015 [email protected]
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper routines for potential fitting
"""
from math import atanh, sqrt, tanh, isnan
import sys
import numpy as np
import ase
import ase.constraints
import ase.io
import ase.lattice.compounds as compounds
import ase.lattice.cubic as cubic
import ase.lattice.hexagonal as hexagonal
import ase.optimize
from ase.units import GPa,J,m
import scipy
from scipy.optimize import minimize, leastsq, brute
from scipy.signal import argrelextrema
try:
from scipy.optimize import anneal
except ImportError:
# FIXME! scipy.optimize.anneal decprecated from version 0.14.0, documentation advise to use scipy.optimize.basinhopping instead
pass
try:
from openopt import GLP
have_openopt = True
except ImportError:
have_openopt = False
###
_logfile = None
Jm2 = 1e23/ase.units.kJ
### Parameters
class Parameters(object):
"""
Stores a parameter set for fitting purposes.
In particular, it distinguishes between variable parameters (to be fitted),
constant parameter and derived parameters, the latter ones having a
functional dependence on the other parameters.
"""
__slots__ = [ 'default', 'variable', 'constant', 'derived', 'hidden',
'parameters', 'ranges', 'range_mapping' ]
def __init__(self, default, constant, derived, ranges={}, hidden=[]):
"""
Parameters
----------
default : dict
Dictionary with the default parameter set
constant : list
List of parameters that are constant
derived : dict
Dictionary with derived parameters and a function to get those
derived values.
"""
self.default = default
self.constant = set(constant)
self.derived = derived
self.ranges = ranges
self.hidden = set(hidden)
self.parameters = self.default.copy()
self.range_mapping = False
self._update_variable()
self._update_derived()
def set_range_derived(self):
self.range_mapping = True
for key, ( x1, x2 ) in self.ranges.items():
if x1 > x2:
raise RuntimeError('Inverted ranges {0}:{1} for parameter {2}.'
.format(x1, x2, key))
x = self.parameters[key]
if x <= x1 or x >= x2:
raise ValueError('Parameter {0} has value {1} which is '
'outside of the bounds {2}:{3}.'
.format(key, x, x1, x2))
self.parameters[':'+key] = atanh(2*(x-x1)/(x2-x1)-1)
self.variable += [ ':'+key ]
self.hidden.add(':'+key)
def _update_variable(self):
self.variable = []
for key in self.default.keys():
if not ( key in self.constant or key in self.derived or
key in self.ranges ):
self.variable += [ key ]
def _update_constant(self):
self.constant = []
for key in self.default.keys():
if not ( key in self.variable or key in self.derived ):
self.constant += [ key ]
def _update_derived(self):
if self.range_mapping:
for key, ( x1, x2 ) in self.ranges.items():
self.parameters[key] = x1+\
0.5*(x2-x1)*(1+tanh(self.parameters[':'+key]))
for key, func in self.derived.items():
self.parameters[key] = func(self.parameters)
def __len__(self):
return len(self.variable)
def set_variable(self, variable):
self.variable = variable
self._update_constant()
def get_variable(self):
return self.variable
def set_constant(self, constant):
self.constant = constant
self._update_variable()
def get_constant(self):
return self.constant
def set_derived(self, derived):
self.derived = derived
self._update_variable()
self._update_derived()
def get_derived(self):
return self.derived
def set(self, key, value):
if key in self.derived:
raise RuntimeError('Cannot set parameter %s since it is a derived '
'quantity.' % key)
self.parameters[key] = value
self._update_derived()
__setitem__ = set
def get(self, key):
return self.parameters[key]
__getitem__ = get
def __getattr__(self, key):
try:
return self.get(key)
except:
return super(Parameters, self).__getattr__(key)
def set_dict(self, d):
for key, value in d.items():
self.set(key, value)
def get_dict(self):
p = self.parameters.copy()
if self.hidden is not None:
for key in self.hidden:
del p[key]
return p
def set_array(self, a, keys=None):
if keys is None:
keys = self.variable
for key, value in zip(keys, a):
self.set(key, value)
def get_array(self, keys=None):
if keys is None:
keys = self.variable
return [ self.parameters[key] for key in keys ]
def get_lower_bounds(self, keys=None):
if keys is None:
keys = self.variable
return [ self.ranges[key][0] for key in keys ]
def get_upper_bounds(self, keys=None):
if keys is None:
keys = self.variable
return [ self.ranges[key][1] for key in keys ]
def in_range(self, key=None):
if self.ranges is None:
return True
if not key is None:
if key in self.ranges:
x1, x2 = self.ranges[key]
return self.parameters[key] >= x1 and self.parameters[key] <= x2
else:
return True
r = True
for key, value in self.parameters.items():
if key in self.ranges:
x1, x2 = self.ranges[key]
r = r and self.parameters[key] >= x1 and \
self.parameters[key] <= x2
return r
def __str__(self):
s = ''
for key, value in self.parameters.items():
if not key.startswith(':'):
s += '# {0:>24s} = {1}\n'.format(key, value)
return s
### Fitting superclass
class Fit(object):
"""
Parameter optimization class.
"""
__slots__ = [ 'atoms', 'calc', 'cost_history', 'minimal_cost', 'par',
'par_at_minimal_cost', 'residuals_history' ]
def __init__(self, calc, par):
self.calc = calc
self.par = par
self.minimal_cost = 1e40
self.par_at_minimal_cost = None
self.cost_history = []
self.residuals_history = []
def set_parameters_from_array(self, p):
self.par.set_array(p)
self.set_calculator(self.calc(**self.par.get_dict()))
def set_calculator_class(self, calc):
self.calc = calc
def set_parameters(self, par):
self.par = par
self.set_calculator(self.calc(**self.par.get_dict()))
def get_potential_energy(self):
return self.atoms.get_potential_energy()
def get_cohesive_energy(self):
return self.atoms.get_potential_energy()/len(self.atoms)
def get_square_residuals(self, p=None, log=None):
r2 = 0.0
if p is not None:
self.set_parameters_from_array(p)
r = np.array(self.get_residuals(log=log))
self.residuals_history += [ r ]
return r*r
def get_cost_function(self, p=None, log=None, store_history=False):
try:
c = np.sum(self.get_square_residuals(p, log=log))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print('# Warning: Error computing residuals. Penalizing '
'parameters but continuing. Error message: {0}' \
.format(e), file=log)
c = 1e40
if log is not None:
print('# Current value of the cost function/residual: {0}' \
.format(c), file=log)
if isnan(c):
c = 1e40
if store_history:
if self.cost_history == [] or c < np.min(self.cost_history):
print('# New minimum of cost function: {0}'.format(c))
self.minimal_cost = c
self.par_at_minimal_cost = p
self.cost_history += [c]
return c
def get_residuals_history(self):
return np.array(self.residuals_history)
def get_cost_history(self):
return np.array(self.cost_history)
def get_parameters_at_minimal_cost(self, log=sys.stdout):
self.set_parameters_from_array(self.par_at_minimal_cost)
print('=== PARAMETERS AT MINIMAL COST ===', file=log)
self.get_cost_function(self.par_at_minimal_cost, log=log)
return self.par
def optimize(self, log=sys.stdout, **kwargs):
self.par.set_range_derived()
res = minimize(self.get_cost_function, self.par.get_array(),
args=(log, True,), **kwargs)
self.set_parameters_from_array(res.x)
print('=== HISTORY OF COST FUNCTION ===', file=log)
print(self.cost_history, file=log)
print('=== FINAL OPTIMIZED PARAMETER SET ===', file=log)
final_cost = self.get_cost_function(res.x, log)
if abs(final_cost - self.minimal_cost) > 1e-6:
print('# WARNING: Final cost (={0}) is not minimal (={1}). This '
'may come from insufficiently converged calculations. Try '
'to decrease fmax.'.format(final_cost, self.minimal_cost))
return self.par
def optimize_leastsq(self, log=sys.stdout):
self.par.set_range_derived()
self.set_parameters_from_array(leastsq(self.get_square_residuals,
self.par.get_array(),
args=(log,))[0])
return self.par
def optimize_anneal(self, **kwargs):
self.set_parameters_from_array(anneal(self.get_cost_function,
self.par.get_array(),
lower=self.par.get_lower_bounds(),
upper=self.par.get_upper_bounds(),
**kwargs))
return self.par
def optimize_brute(self, **kwargs):
x0, fval = brute(
self.get_cost_function,
ranges=map(tuple,
list(np.transpose([self.par.get_lower_bounds(),
self.par.get_upper_bounds()]))),
**kwargs)
self.set_parameters_from_array(x0)
print('=== OPTIMIZED PARAMETER SET ===', file=log)
self.get_cost_function(x0)
return self.par
def optimize_openopt(self, solver='interalg'):
if not have_openopt:
raise RuntimeError('OpenOpt not available.')
p = GLP(self.get_cost_function, self.par.get_array(),
lb=self.par.get_lower_bounds(),
ub=self.par.get_upper_bounds())
r = p.solve(solver)
print(r, file=log)
self.set_parameters_from_array(r.xf)
print('=== OPTIMIZED PARAMETER SET ===', file=log)
self.get_cost_function(r.xf)
return self.par
class CombinedFit(Fit):
__slots__ = [ 'targets' ]
def __init__(self, calc, par, targets):
Fit.__init__(self, calc, par)
self.targets = targets
def set_parameters_from_array(self, p):
for target in self.targets:
target.set_parameters_from_array(p)
def set_parameters(self, p):
for target in self.targets:
target.set_parameters(p)
def set_calculator_class(self, calc):
for target in self.targets:
target.set_calculator_class(calc)
def get_residuals(self, log=None):
if log is not None:
print('', file=log)
print('# Computing properties for parameter set:', file=log)
print(self.par, file=log)
f = open('par.out', 'w')
print(self.par, file=f)
f.close()
r = []
for target in self.targets:
r = np.append(r, target.get_residuals(log=log))
return r
def get_potential_energy(self):
raise RuntimeError('get_potential_energy does not make sense for the '
'CombinedFit class.')
class RotatingFit(object):
__slots__ = [ 'targets', 'par' ]
def __init__(self, par, targets):
self.par = par
self.targets = targets
def optimize(self, pmax=1e-3, mix=None, **kwargs):
globalv = np.array(self.par.get_variable()).copy()
dp = 1e6
while dp > pmax:
p = np.array(self.par.get_array())
for target, variable in self.targets:
if log is not None:
print('# ===', target, '===', variable, '===', file=log)
self.par.set_variable(variable)
target.set_parameters(self.par)
self.par = target.optimize(**kwargs)
self.par.set_variable(globalv)
cp = np.array(self.par.get_array())
dp = cp - p
dp = sqrt(np.sum(dp*dp))
if mix is not None:
self.par.set_array(mix*cp + (1-mix)*p)
self.par.set_variable(globalv)
return self.par
### Generic penalty function
class Penalty(Fit):
__slots__ = [ 'func' ]
def __init__(self, calc, par, func):
Fit.__init__(self, calc, par)
self.func = func
def set_calculator(self, calc):
"""
Set the calculator
"""
pass
def get_residuals(self, log=None):
return self.func(self.par, log=log)
### Single point
class FitSinglePoint(Fit):
__slots__ = [ 'atoms', 'energy', 'forces', 'stress', 'w_energy', 'w_forces',
'w_stress' ]
def __init__(self, calc, par, atoms, w_energy=None, w_forces=None,
w_stress=None):
Fit.__init__(self, calc, par)
self.original_atoms = atoms
self.w_energy = w_energy
self.w_forces = w_forces
self.w_stress = w_stress
self.energy = self.atoms.get_potential_energy()
self.forces = self.atoms.get_forces().copy()
self.stress = self.atoms.get_stress().copy()
self.atoms = None
def set_calculator(self, calc):
"""
Set the calculator
"""
self.atoms = self.original_atoms.copy()
self.atoms.set_calculator(calc)
self.atoms.get_potential_energy()
def get_residuals(self, log=None):
r = []
w = []
if self.w_energy is not None:
cr = [ self.w_energy*(
self.atoms.get_potential_energy()-self.energy
)/self.energy ]
r += cr
w += [ self.w_energy ]
if self.w_forces is not None:
cr = list(
(self.w_forces*(
self.atoms.get_forces() - self.forces
)/self.forces).flatten()
)
r += cr
w += list(self.w_forces*np.ones_like(self.forces).flatten())
if self.w_stress is not None:
cr = list(
(self.w_stress*(
self.atoms.get_stress() - self.stress
)/self.stress).flatten()
)
r += cr
w += list(self.w_stress*np.ones_like(self.stress).flatten())
return r
### Specific structures
class FitDimer(Fit):
__slots__ = [ 'D0', 'fmax', 'r0', 'w_D0', 'w_r0' ]
def __init__(self, calc, par, els, D0, r0,
w_D0=1.0, w_r0=1.0,
vacuum=10.0, fmax=1e-6):
Fit.__init__(self, calc, par)
self.els = els
self.D0 = D0
self.r0 = r0
self.w_D0 = sqrt(w_D0)/self.D0
self.w_r0 = sqrt(w_r0)/self.r0
self.calc = calc
self.par = par
self.fmax = fmax
if type(self.els) == str:
self.els = 2*[self.els]
self.atoms = None
def new_dimer(self):
self.atoms = ase.Atoms(
self.els,
[[0.0, 0.0, 0.0],
[0.0, 0.0, r0 ]],
pbc = False)
self.atoms.center(vacuum=vacuum)
def set_calculator(self, calc):
"""
Set the calculator, and relax the structure to its ground-state.
"""
self.new_dimer()
self.atoms.set_calculator(calc)
ase.optimize.FIRE(self.atoms, logfile=_logfile).run(fmax=self.fmax,steps=10000)
def get_distance(self):
return self.atoms.get_distance(0, 1)
def get_residuals(self, log=None):
D0 = self.atoms.get_potential_energy()
r0 = self.atoms.get_distance(0, 1)
r_D0 = self.w_D0*(D0+self.D0)
r_r0 = self.w_r0*(r0-self.r0)
if log is not None:
print('# %20s D0 = %20.10f eV (%20.10f eV) - %20.10f' \
% ( 'Dimer', D0, -self.D0, r_D0 ), file=log)
print('# %20s r0 = %20.10f A (%20.10f A) - %20.10f' \
% ( '', r0, self.r0, r_r0 ), file=log)
return r_D0, r_r0
class FitCubicCrystal(Fit):
__slots__ = [ 'a0', 'calc', 'crystal', 'Ec', 'fmax', 'par', 'w_a0', 'w_Ec' ]
def __init__(self, calc, par, els,
Ec, a0,
B=None, C11=None, C12=None, C44=None, Cp=None,SFE=None,
w_Ec=1.0, w_a0=1.0,
w_B=1.0, w_C11=1.0, w_C12=1.0, w_C44=1.0, w_Cp=1.0,w_SFE=1.0,
fmax=1e-6, eps=0.001,
ecoh_ref=None,
size=[1,1,1]):
Fit.__init__(self, calc, par)
self.els = els
self.a0 = a0
self.Ec = Ec
self.SFE = SFE
self.B = B
self.C11 = C11
self.C12 = C12
self.C44 = C44
self.Cp = Cp
self.ecoh_ref = ecoh_ref
self.w_a0 = sqrt(w_a0)/self.a0
self.w_Ec = sqrt(w_Ec)/self.Ec
if self.SFE is not None:
self.w_SFE = sqrt(w_SFE)/self.SFE
if self.B is not None:
self.w_B = sqrt(w_B)/self.B
if self.C11 is not None:
self.w_C11 = sqrt(w_C11)/self.C11
if self.C12 is not None:
self.w_C12 = sqrt(w_C12)/self.C12
if self.C44 is not None:
self.w_C44 = sqrt(w_C44)/self.C44
if self.Cp is not None:
self.w_Cp = sqrt(w_Cp)/self.Cp
self.size = size
self.fmax = fmax
self.eps = eps
self.atoms = None
def new_bulk(self):
self.unitcell = self.crystal(
self.els,
latticeconstant = self.a0,
size = [1, 1, 1]
)
self.atoms = self.unitcell.copy()
self.atoms *= self.size
self.atoms.translate([0.1, 0.1, 0.1])
def set_calculator(self, calc):
self.new_bulk()
self.atoms.set_calculator(calc)
ase.optimize.FIRE(
ase.constraints.StrainFilter(self.atoms, mask=[1,1,1,0,0,0]),
logfile=_logfile).run(fmax=self.fmax,steps=10000)
a0 = self.get_lattice_constant()
self.supercell = self.crystal(
self.els,
latticeconstant = a0,
size = [3, 3, 6],
directions=[[1,1,2],[-1,1,0],[-1,-1,1]],
pbc=(1,1,0)
)
self.supercell.translate([0.1, 0.1, 0.1])
self.supercell.set_calculator(calc)
def get_lattice_constant(self):
return np.sum(self.atoms.get_cell().diagonal())/np.sum(self.size)
def get_SFE(self):
E0 = self.supercell.get_potential_energy()/J*1e3
S0 = self.supercell.get_cell()[0,0]*self.supercell.get_cell()[1,1]/m/m
pos = self.supercell.get_positions()
x,y=[],[]
for i in range(10):
pos1= np.copy(pos)
pos1[:,0][pos[:,2]>self.supercell.get_cell()[2,2]/2-2]-=(self.supercell.get_cell()[0,0]/3)*0.08+(self.supercell.get_cell()[0,0]/3)*(1./50)*i
self.supercell.set_positions(pos1)
Es = self.supercell.get_potential_energy()/J*1e3
x.append(0.08+(1./50)*i)
y.append((Es/S0)-(E0/S0))
GSF_fit = scipy.interpolate.InterpolatedUnivariateSpline(x,y)
x_fit = np.linspace(0.08,0.08+(9./50),50)
mins = argrelextrema(GSF_fit(x_fit),np.less)
x_mins = x_fit[mins[0]]
y_mins = GSF_fit(x_fit)[mins[0]]
return y_mins[0]
def get_C11(self):
sxx0, syy0, szz0, syz0, szx0, sxy0 = self.atoms.get_stress()
cell = self.atoms.get_cell()
T = np.diag( [ self.eps, 0.0, 0.0 ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
sxx11, syy11, szz11, syz11, szx11, sxy11 = self.atoms.get_stress()
self.atoms.set_cell(cell, scale_atoms=True)
return (sxx11-sxx0)/self.eps
def get_Cp(self):
sxx0, syy0, szz0, syz0, szx0, sxy0 = self.atoms.get_stress()
cell = self.atoms.get_cell()
T = np.diag( [ self.eps, -self.eps, 0.0 ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
sxx12, syy12, szz12, syz12, szx12, sxy12 = self.atoms.get_stress()
self.atoms.set_cell(cell, scale_atoms=True)
return ((sxx12-sxx0)-(syy12-syy0))/(4*self.eps)
def get_C44(self):
sxx0, syy0, szz0, syz0, szx0, sxy0 = self.atoms.get_stress()
cell = self.atoms.get_cell()
T = np.array( [ [ 0.0, 0.5*self.eps, 0.5*self.eps ],
[ 0.5*self.eps, 0.0, 0.5*self.eps ],
[ 0.5*self.eps, 0.5*self.eps, 0.0 ] ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
sxx44, syy44, szz44, syz44, szx44, sxy44 = self.atoms.get_stress()
self.atoms.set_cell(cell, scale_atoms=True)
return (syz44+szx44+sxy44-syz0-szx0-sxy0)/(3*self.eps)
def get_residuals(self, log=None):
Ec = self.get_potential_energy()
a0 = self.get_lattice_constant()
if self.ecoh_ref is not None:
syms = np.array(self.atoms.get_chemical_symbols())
for el in set(syms):
Ec -= (syms==el).sum()*self.ecoh_ref[el]
Ec /= len(self.atoms)
r_Ec = self.w_Ec*( Ec - self.Ec )
r_a0 = self.w_a0*( a0 - self.a0 )
if log is not None:
print('# %20s Ec = %20.10f eV/at. (%20.10f eV/at.) - %20.10f' \
% ( '%s (%s)' % (self.unitcell.get_chemical_formula(),
self.crystalstr), Ec, self.Ec, r_Ec ))
print('# %20s a0 = %20.10f A (%20.10f A) - %20.10f' \
% ( '', a0, self.a0, r_a0 ))
r = [ r_Ec, r_a0 ]
if self.SFE is not None:
SFE = self.get_SFE()
if self.B is not None or self.C11 is not None or self.C12 is not None:
C11 = self.get_C11()
if self.B is not None or self.Cp is not None or self.C12 is not None:
Cp = self.get_Cp()
if self.C44 is not None:
C44 = self.get_C44()
if self.SFE is not None:
if SFE < 0:
r_SFE = self.w_SFE*( SFE - self.SFE )*1000
else:
r_SFE = self.w_SFE*( SFE - self.SFE )
r += [ r_SFE ]
if log is not None:
print('# %20s SFE = %20.10f mJ/m**2 (%20.10f mJ/m**2) - %20.10f' \
% ( '', SFE, self.SFE, r_SFE ))
if self.B is not None:
r_B = self.w_B*( (3*C11-4*Cp)/3 - self.B )
r += [ r_B ]
if log is not None:
print('# %20s B = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', (3*C11-4*Cp)/3/GPa, self.B/GPa, r_B ))
if self.C11 is not None:
r_C11 = self.w_C11*( C11 - self.C11 )
r += [ r_C11 ]
if log is not None:
print('# %20s C11 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', C11/GPa, self.C11/GPa, r_C11 ))
if self.C12 is not None:
r_C12 = self.w_C12*( C11-2*Cp - self.C12 )
r += [ r_C12 ]
if log is not None:
print('# %20s C12 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', (C11-2*Cp)/GPa, self.C12/GPa, r_C12 ))
if self.C44 is not None:
r_C44 = self.w_C44*( C44 - self.C44 )
r += [ r_C44 ]
if log is not None:
print('# %20s C44 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', C44/GPa, self.C44/GPa, r_C44 ))
if self.Cp is not None:
r_Cp = self.w_Cp*( Cp - self.Cp )
r += [ r_Cp ]
if log is not None:
print('# %20s Cp = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', Cp/GPa, self.Cp/GPa, r_Cp ))
return r
class FitTetragonalCrystal(Fit):
__slots__ = [ 'a0','c0', 'calc', 'crystal', 'Ec', 'fmax', 'par', 'w_a0','w_c0', 'w_Ec' ]
def __init__(self, calc, par, els,
Ec, a0, c0, c_a=None,
B=None, C11=None, C12=None,C13=None, C33=None, C44=None, C66=None,SFE=None,
w_Ec=1.0, w_a0=1.0,w_c0=1.0, w_c_a=1.0,
w_B=1.0, w_C11=1.0, w_C12=1.0,w_C13=1.0,w_C33=1.0,w_C44=1.0,w_C66=1.0, w_Cp=1.0,w_SFE=None,
fmax=1e-6, eps=0.001,
ecoh_ref=None,
size=[1,1,1]):
Fit.__init__(self, calc, par)
self.els = els
self.a0 = a0
self.c0 = c0
self.Ec = Ec
self.c_a = c_a
self.SFE = SFE
self.B = B
self.C11 = C11
self.C12 = C12
self.C13 = C13
self.C44 = C44
self.C33 = C33
self.C66 = C66
self.ecoh_ref = ecoh_ref
self.w_a0 = sqrt(w_a0)/self.a0
self.w_c0 = sqrt(w_c0)/self.c0
if self.Ec is not None:
self.w_Ec = sqrt(w_Ec)/self.Ec
if self.c_a is not None:
self.w_c_a = sqrt(w_c_a)/self.c_a
if self.SFE is not None:
self.w_SFE = sqrt(w_SFE)/self.SFE
if self.B is not None:
self.w_B = sqrt(w_B)/self.B
if self.C11 is not None:
self.w_C11 = sqrt(w_C11)/self.C11
if self.C12 is not None:
self.w_C12 = sqrt(w_C12)/self.C12
if self.C13 is not None:
self.w_C13 = sqrt(w_C13)/self.C13
if self.C33 is not None:
self.w_C33 = sqrt(w_C33)/self.C33
if self.C44 is not None:
self.w_C44 = sqrt(w_C44)/self.C44
if self.C66 is not None:
self.w_C66 = sqrt(w_C66)/self.C66
self.size = size
self.fmax = fmax
self.eps = eps
self.atoms = None
def new_bulk(self):
self.unitcell = self.crystal(
self.els,
latticeconstant = [self.a0,self.c0],
size = [1, 1, 1]
)
self.atoms = self.unitcell.copy()
self.atoms *= self.size
self.atoms.translate([0.1, 0.1, 0.1])
def set_calculator(self, calc):
self.new_bulk()
self.atoms.set_calculator(calc)
ase.optimize.FIRE(
ase.constraints.StrainFilter(self.atoms, mask=[1,1,1,1,1,1]),
logfile=_logfile).run(fmax=self.fmax,steps=10000)
a0,c0 = self.get_lattice_constant()
self.supercell = self.crystal(
self.els,
latticeconstant = [a0,c0],
size = [3, 3, 6],
directions=[[1,1,2],[-1,1,0],[-1,-1,1]],
pbc=(1,1,0)
)
self.supercell.translate([0.1, 0.1, 0.1])
self.supercell.set_calculator(calc)
def get_SFE(self):
E0 = self.supercell.get_potential_energy()/J*1e3
S0 = self.supercell.get_cell()[0,0]*self.supercell.get_cell()[1,1]/m/m
pos = self.supercell.get_positions()
x,y=[],[]
for i in range(10):
pos1= np.copy(pos)
pos1[:,0][pos[:,2]>self.supercell.get_cell()[2,2]/2-2]-=(self.supercell.get_cell()[0,0]/3)*0.05+(self.supercell.get_cell()[0,0]/3)*(1./50)*i
self.supercell.set_positions(pos1)
Es = self.supercell.get_potential_energy()/J*1e3
x.append(0.05+(1./50)*i)
y.append((Es/S0)-(E0/S0))
GSF_fit = scipy.interpolate.InterpolatedUnivariateSpline(x,y)
x_fit = np.linspace(0.05,0.05+(9./50),50)
mins = argrelextrema(GSF_fit(x_fit),np.less)
x_mins = x_fit[mins[0]]
y_mins = GSF_fit(x_fit)[mins[0]]
return y_mins[0]
def get_lattice_constant(self):
return np.sum(self.atoms.get_cell().diagonal()[:2])/np.sum(self.size[:2]),self.atoms.get_cell().diagonal()[2]/self.size[2]
def get_C33(self):
e0 = self.atoms.get_potential_energy()
v0 = self.atoms.get_volume()
cell = self.atoms.get_cell()
T = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, self.eps ] ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
e = self.atoms.get_potential_energy()
self.atoms.set_cell(cell, scale_atoms=True)
return (2*(e-e0))/(v0*self.eps**2)
def get_C44(self):
e0 = self.atoms.get_potential_energy()
v0 = self.atoms.get_volume()
cell = self.atoms.get_cell()
T = np.array( [ [ 0.0, 0.0, self.eps ],
[ 0.0, 0.0, self.eps ],
[ self.eps, self.eps, self.eps**2 ] ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
e = self.atoms.get_potential_energy()
self.atoms.set_cell(cell, scale_atoms=True)
return (e-e0)/(4*v0*self.eps**2)
def get_C66(self):
e0 = self.atoms.get_potential_energy()
v0 = self.atoms.get_volume()
cell = self.atoms.get_cell()
T = np.array( [ [ (1.0+self.eps**2)**0.5, 0.0, 0.0 ],
[ 0.0, (1.0-self.eps**2)**0.5, 0.0 ],
[ 0.0, 0.0, 1.0 ] ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
e = self.atoms.get_potential_energy()
self.atoms.set_cell(cell, scale_atoms=True)
return (e-e0)/(2*v0*self.eps**2)
def get_D1(self):
e0 = self.atoms.get_potential_energy()
v0 = self.atoms.get_volume()
cell = self.atoms.get_cell()
T = np.array( [ [ self.eps, 0.0, 0.0 ],
[ 0.0, self.eps, 0.0 ],
[ 0.0, 0.0, 0.0 ] ] )
self.atoms.set_cell( np.dot(np.eye(3)+T, cell), scale_atoms=True )
e = self.atoms.get_potential_energy()
self.atoms.set_cell(cell, scale_atoms=True)
return (e-e0)/(v0*self.eps**2)
def get_D2(self):
e0 = self.atoms.get_potential_energy()
v0 = self.atoms.get_volume()
cell = self.atoms.get_cell()
T = np.array( [ [ 1.0+self.eps, 0.0, 0.0 ],
[ 0.0, 1.0+self.eps, 0.0 ],
[ 0.0, 0.0, 1.0/(1+self.eps)**2 ] ] )
self.atoms.set_cell( np.dot(T, cell), scale_atoms=True )
e = self.atoms.get_potential_energy()
self.atoms.set_cell(cell, scale_atoms=True)
return (e-e0)/(v0*self.eps**2)
def get_D4(self):
e0 = self.atoms.get_potential_energy()
v0 = self.atoms.get_volume()
cell = self.atoms.get_cell()
T = np.array( [ [ ((1.0+self.eps)/(1.0-self.eps))**0.5, 0.0, 0.0 ],
[ 0.0, ((1.0-self.eps)/(1.0+self.eps))**0.5, 0.0 ],
[ 0.0, 0.0, 1.0 ] ] )
self.atoms.set_cell( np.dot(T, cell), scale_atoms=True )
e = self.atoms.get_potential_energy()
self.atoms.set_cell(cell, scale_atoms=True)
return (e-e0)/(v0*self.eps**2)
def get_residuals(self, log=None):
Ec = self.get_potential_energy()
a0,c0 = self.get_lattice_constant()
if self.ecoh_ref is not None:
syms = np.array(self.atoms.get_chemical_symbols())
for el in set(syms):
Ec -= (syms==el).sum()*self.ecoh_ref[el]
Ec /= len(self.atoms)
r_a0 = self.w_a0*( a0 - self.a0 )
r_c0 = self.w_c0*( c0 - self.c0 )
r_Ec = self.w_Ec*( Ec - self.Ec )
if log is not None:
print('# %20s Ec = %20.10f eV/at. (%20.10f eV/at.) - %20.10f' \
% ( '%s (%s)' % (self.unitcell.get_chemical_formula(),
self.crystalstr), Ec, self.Ec, r_Ec ))
print('# %20s a0 = %20.10f A (%20.10f A) - %20.10f' \
% ( '', a0, self.a0, r_a0 ))
print('# %20s c0 = %20.10f A (%20.10f A) - %20.10f' \
% ( '', c0, self.c0, r_c0 ))
r = [ r_a0 ,r_c0, r_Ec]
if self.SFE is not None:
SFE = self.get_SFE()
if self.B is not None or self.C11 is not None or self.C12 is not None or self.C13 is not None or self.C33 is not None:
Czz = self.get_D2()
if self.B is not None or self.C11 is not None or self.C12 is not None:
Cp = self.get_D1()
if self.B is not None or self.C11 is not None or self.C12 is not None:
Cm = self.get_D4()
if self.C33 is not None:
C33 = self.get_C66()
if self.C44 is not None:
C44 = self.get_C66()
if self.C66 is not None:
C66 = self.get_C66()
if self.SFE is not None:
if SFE < 0:
r_SFE = self.w_SFE*( SFE - self.SFE )*1000
else:
r_SFE = self.w_SFE*( SFE - self.SFE )
r += [ r_SFE ]
if log is not None:
print('# %20s SFE = %20.10f mJ/m**2 (%20.10f mJ/m**2) - %20.10f' \
% ( '', SFE, self.SFE, r_SFE ))
if self.c_a is not None:
if (self.c_a < 1 and c0/a0 < 1) or (self.c_a > 1 and c0/a0 > 1) :
r_c_a = self.w_c_a*( c0/a0 - self.c_a )
else:
r_c_a = self.w_c_a*( c0/a0 - self.c_a )*1000
r += [ r_c_a ]
if log is not None:
print('# %20s c/a = %20.10f (%20.10f ) - %20.10f' \
% ( '', c0/a0 , self.c_a, r_c_a ))
if self.B is not None:
r_B = self.w_B*( (C33*Cp-2*C13**2)/Czz - self.B )
r += [ r_B ]
if log is not None:
print('# %20s B = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', (C33*Cp-2*C13**2)/Czz/GPa, self.B/GPa, r_B ))
if self.C11 is not None:
r_C11 = self.w_C11*( (Cp+Cm)/2 - self.C11 )
r += [ r_C11 ]
if log is not None:
print('# %20s C11 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', (Cp+Cm)/2/GPa, self.C11/GPa, r_C11 ))
if self.C12 is not None:
r_C12 = self.w_C12*( (Cp-Cm)/2 - self.C12 )
r += [ r_C12 ]
if log is not None:
print('# %20s C12 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', (Cp-Cm)/2/GPa, self.C12/GPa, r_C12 ))
if self.C13 is not None:
r_C13 = self.w_C13*( -(Czz-Cp-2*C33)/4 - self.C13 )
r += [ r_C13 ]
if log is not None:
print('# %20s C13 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', -(Czz-Cp-2*C33)/4/GPa, self.C13/GPa, r_C13 ))
if self.C33 is not None:
r_C33 = self.w_C33*( C33 - self.C33 )
r += [ r_C33 ]
if log is not None:
print('# %20s C33 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', C33/GPa, self.C33/GPa, r_C33 ))
if self.C44 is not None:
r_C44 = self.w_C44*( C44 - self.C44 )
r += [ r_C44 ]
if log is not None:
print('# %20s C44 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', C44/GPa, self.C44/GPa, r_C44 ))
if self.C66 is not None:
r_C66 = self.w_C66*( C66 - self.C66 )
r += [ r_C66 ]
if log is not None:
print('# %20s C66 = %20.10f GPa (%20.10f GPa) - %20.10f' \
% ( '', C66/GPa, self.C66/GPa, r_C66 ))
return r
class FitHexagonalCrystal(Fit):
__slots__ = [ 'a0', 'c0', 'calc', 'crystal', 'Ec', 'fmax', 'par', 'w_a0',
'w_Ec' ]
def __init__(self, calc, par, els,
Ec, a0, c0,
w_Ec = 1.0, w_a0 = 1.0,
fmax = 0.01):
Fit.__init__(self, calc, par)
self.els = els
self.Ec = Ec
self.a0 = a0
self.c0 = c0
self.w_Ec = sqrt(w_Ec)/self.Ec
self.w_a0 = sqrt(w_a0)/self.a0
self.fmax = fmax
self.atoms = None
def new_bulk(self):
self.atoms = self.crystal(
self.els,
latticeconstant = [self.a0, self.c0],
size = [1, 1, 1]
)
self.atoms.translate([0.1, 0.1, 0.1])
def set_calculator(self, calc):
self.new_bulk()
self.atoms.set_calculator(calc)
ase.optimize.FIRE(
ase.constraints.StrainFilter(self.atoms, mask=[1,1,0,0,0,0]),
logfile=_logfile).run(fmax=self.fmax,steps=10000)
def get_lattice_constant(self):
cx, cy, cz = self.atoms.get_cell()
return ( sqrt(np.dot(cx, cx)) + sqrt(np.dot(cy, cy)) )/2
def get_residuals(self, log=None):
Ec = self.get_potential_energy()/len(self.atoms)
a0 = self.get_lattice_constant()
r_Ec = self.w_Ec*( Ec + self.Ec )
r_a0 = self.w_a0*( a0 - self.a0 )
if log is not None:
print('# %20s Ec = %20.10f eV/at. (%20.10f eV/at.) - %20.10f' \
% ( 'Crystal (%s)' % self.crystalstr, Ec, -self.Ec, r_Ec ))
print('# %20s a0 = %20.10f A (%20.10f A) - %20.10f' \
% ( '', a0, self.a0, r_a0 ))
r = [ r_Ec, r_a0 ]
return r
class FitSurface(Fit):
__slots__ = [ 'a0', 'calc', 'crystal', 'Ec', 'fmax', 'par', 'w_a0', 'w_Ec' ]
def __init__(self, calc, par, els, crystal,
Esurf,
w_Esurf = 1.0):
self.Esurf = Esurf
self.w_Esurf = sqrt(w_Esurf)
self.els = els
self.calc = calc
self.par = par
self.crystal = crystal
def set_calculator(self, calc):
self.atoms, self.ncells = \
self.new_surface(self.crystal.get_lattice_constant())
ase.io.write('%s.cfg' % self.surfstr, self.atoms)
self.atoms.set_calculator(calc)
def get_surface_energy(self):
return ( self.atoms.get_potential_energy() -
self.crystal.get_cohesive_energy()*len(self.atoms) ) \
/(2*self.ncells)
def get_residuals(self, log=None):
Esurf = self.get_surface_energy()
sx, sy, sz = self.atoms.get_cell().diagonal()
tar_Esurf = self.Esurf*(sx*sy)/self.ncells
r_Esurf = self.w_Esurf*( Esurf - tar_Esurf )/tar_Esurf
if log is not None:
print('# %20s Es = %20.10f eV (%20.10f eV) - %20.10f' \
% ( 'Surface(%s)' % self.surfstr, Esurf,
tar_Esurf, r_Esurf ))
print('# %20s %20.10f J/m^2 (%20.10f J/m^2)' \
% ( '',
Esurf*self.ncells*Jm2/(sx*sy),
self.Esurf*Jm2 ))
return [ r_Esurf ]
###
class FitSC(FitCubicCrystal):
crystal = cubic.SimpleCubic
crystalstr = 'sc'
class FitBCC(FitCubicCrystal):
crystal = cubic.BodyCenteredCubic
crystalstr = 'bcc'
class FitFCC(FitCubicCrystal):
crystal = cubic.FaceCenteredCubic
crystalstr = 'fcc'
class FitB2(FitCubicCrystal):
crystal = compounds.B2
crystalstr = 'B2'
class FitL1_0(FitTetragonalCrystal):
crystal = compounds.L1_0
crystalstr = 'L1_0'
class FitL1_2(FitCubicCrystal):
crystal = compounds.L1_2
crystalstr = 'L1_2'
class FitDiamond(FitCubicCrystal):
crystal = cubic.Diamond
crystalstr = 'dia'
class FitGraphite(FitHexagonalCrystal):
crystal = hexagonal.Graphite
crystalstr = 'gra'
class FitGraphene(FitHexagonalCrystal):
crystal = hexagonal.Graphene
crystalstr = 'grp'
class FitDiamond100(FitSurface):
surfstr = 'dia-100'
def new_surface(self, a0):
a = cubic.Diamond(
self.els,
latticeconstant = a0,
directions = [ [ 1,0,0 ],
[ 0,1,0 ],
[ 0,0,1 ] ],
size = [ 1, 1, 5 ])
a.translate([0.1,0.1,0.1])
sx, sy, sz = a.get_cell().diagonal()
a.set_cell([sx,sy,2*sz])
return a, 2
class FitDiamond110(FitSurface):
surfstr = 'dia-110'
def new_surface(self, a0):
a = cubic.Diamond(
self.els,
latticeconstant = a0,
directions = [ [ 1, 0,0 ],
[ 0, 1,1 ],
[ 0,-1,1 ] ],
size = [ 1, 1, 5 ])
a.translate([0.1,0.1,0.1])
sx, sy, sz = a.get_cell().diagonal()
a.set_cell([sx,sy,2*sz])
return a, 1
class FitDiamond111(FitSurface):
surfstr = 'dia-111'
def new_surface(self, a0):
a = cubic.Diamond(
self.els,
latticeconstant = a0,
directions = [ [ 2,-1,-1 ],
[ 0, 1,-1 ],
[ 1, 1, 1 ] ],
size = [ 1, 1, 5 ])
a.translate([0.1,0.1,0.1+a0/4])
a.set_scaled_positions(a.get_scaled_positions())
sx, sy, sz = a.get_cell().diagonal()
a.set_cell([sx,sy,2*sz])
return a, 2
| 43,316 | 31.915653 | 152 | py |
matscipy | matscipy-master/matscipy/calculators/__init__.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018-2021 Jan Griesser (U. Freiburg)
# 2020 Jonas Oldenstaedt (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import warnings
from .eam import EAM
from .pair_potential import PairPotential
from .supercell_calculator import SupercellCalculator
from .polydisperse import Polydisperse
from .manybody import Manybody
from .ewald import Ewald
try:
import scipy.sparse as sp
except ImportError:
warnings.warn('Warning: no scipy')
| 1,308 | 33.447368 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/calculator.py | #
# Copyright 2021 Jan Griesser (U. Freiburg)
# 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from scipy.sparse.linalg import cg
from ase.calculators.calculator import Calculator
from numpy import deprecate
from ..elasticity import (
Voigt_6_to_full_3x3_stress,
nonaffine_elastic_contribution,
)
from ..numerical import numerical_nonaffine_forces
from ..numpy_tricks import mabincount
class MatscipyCalculator(Calculator):
"""Base class for calculators in Matscipy.
This class defines an interface for higher-order derivatives of the
potential, to be implemented by daughter calculators.
The extra properties defined by this calculator are:
- 'hessian'
- 'dynamical_matrix'
- 'nonaffine_forces'
- 'born_constants'
- 'stress_elastic_contribution'
- 'birch_coefficients'
- 'nonaffine_elastic_contribution'
- 'elastic_constants'
From the user's perspective, these can be accessed with e.g.:
>>> calc.get_property('born_constants', atoms)
Accessing properties this way makes it possible to mix different
calculators, e.g. with ``ase.calculators.mixing.SumCalculator``.
"""
def calculate(self, atoms, properties, system_changes):
super().calculate(atoms, properties, system_changes)
# Dispatching calls to special properties
properties_map = {
'hessian': self.get_hessian,
'dynamical_matrix': self.get_dynamical_matrix,
'nonaffine_forces': self.get_nonaffine_forces,
'born_constants': self.get_born_elastic_constants,
'stress_elastic_contribution':
self.get_stress_contribution_to_elastic_constants,
'birch_coefficients': self.get_birch_coefficients,
'nonaffine_elastic_contribution':
self.get_non_affine_contribution_to_elastic_constants,
'elastic_constants':
self.get_elastic_constants
}
for prop in filter(lambda p: p in properties, properties_map):
self.results[prop] = properties_map[prop](atoms)
@staticmethod
def _virial(pair_distance_vectors, pair_forces):
r_pc = pair_distance_vectors
f_pc = pair_forces
return np.concatenate([
# diagonal components (xx, yy, zz)
np.einsum('pi,pi->i', r_pc, f_pc, optimize=True),
# off-diagonal (yz, xz, xy)
np.einsum('pi,pi->i', r_pc[:, (1, 0, 0)], f_pc[:, (2, 2, 1)],
optimize=True)
])
def get_dynamical_matrix(self, atoms):
"""
Compute dynamical matrix (=mass weighted Hessian).
"""
return self.get_hessian(atoms, format="sparse", divide_by_masses=True)
def get_hessian(self, atoms, format='sparse', divide_by_masses=False):
"""
Calculate the Hessian matrix for a pair potential. For an atomic
configuration with N atoms in d dimensions the hessian matrix is a
symmetric, hermitian matrix with a shape of (d*N,d*N). The matrix is
in general a sparse matrix, which consists of dense blocks of shape
(d,d), which are the mixed second derivatives.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
format: str, optional
Output format of the Hessian matrix, either 'dense', 'sparse' or
'neighbour-list'. The format 'sparse' returns a sparse matrix
representations of scipy. The format 'neighbor-list' returns
a representation within matscipy's and ASE's neighbor list
format, i.e. the Hessian is returned per neighbor.
(Default: 'dense')
divide_by_masses : bool, optional
Divided each block entry n the Hessian matrix by sqrt(m_i m_j)
where m_i and m_j are the masses of the two atoms for the Hessian
matrix.
Returns
-------
If format=='sparse':
hessian : scipy.sparse.bsr_matrix
Hessian matrix in sparse matrix representation
If format=='neighbor-list'
hessian_ncc : np.ndarray
Array containing the Hessian blocks per atom pair
distances_nc : np.ndarray
Distance vectors between atom pairs
"""
raise NotImplementedError
def get_born_elastic_constants(self, atoms):
"""
Compute the Born elastic constants.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
H_pcc, i_p, j_p, dr_pc, abs_dr_p = self.get_hessian(atoms, 'neighbour-list')
# Second derivative with respect to displacement gradient
C_pabab = H_pcc.reshape(-1, 3, 1, 3, 1) * dr_pc.reshape(-1, 1, 3, 1, 1) * dr_pc.reshape(-1, 1, 1, 1, 3)
C_abab = -C_pabab.sum(axis=0) / (2 * atoms.get_volume())
# This contribution is necessary in order to obtain second derivative with respect to Green-Lagrange
stress_ab = self.get_property('stress', atoms)
delta_ab = np.identity(3)
if stress_ab.shape != (3, 3):
stress_ab = Voigt_6_to_full_3x3_stress(stress_ab)
C_abab -= stress_ab.reshape(1, 3, 1, 3) * delta_ab.reshape(3, 1, 3, 1)
return C_abab
def get_stress_contribution_to_elastic_constants(self, atoms):
"""
Compute the correction to the elastic constants due to non-zero stress in the configuration.
Stress term results from working with the Cauchy stress.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
stress_ab = self.get_property('stress', atoms)
if stress_ab.shape != (3, 3):
stress_ab = Voigt_6_to_full_3x3_stress(stress_ab)
delta_ab = np.identity(3)
stress_contribution = 0.5 * sum(
np.einsum(einsum, stress_ab, delta_ab)
for einsum in (
'am,bn',
'an,bm',
'bm,an',
'bn,am',
)
)
stress_contribution -= np.einsum('ab,mn', stress_ab, delta_ab)
return stress_contribution
def get_birch_coefficients(self, atoms):
"""
Compute the Birch coefficients (Effective elastic constants at non-zero stress).
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
if self.atoms is None:
self.atoms = atoms
# Born (affine) elastic constants
calculator = self
bornC_abab = calculator.get_born_elastic_constants(atoms)
# Stress contribution to elastic constants
stressC_abab = calculator.get_stress_contribution_to_elastic_constants(atoms)
return bornC_abab + stressC_abab
def get_nonaffine_forces(self, atoms):
"""
Compute the non-affine forces which result from an affine deformation of atoms.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
nat = len(atoms)
H_pcc, i_p, j_p, dr_pc, abs_dr_p = self.get_hessian(atoms, 'neighbour-list')
naF_pcab = -0.5 * H_pcc.reshape(-1, 3, 3, 1) * dr_pc.reshape(-1, 1, 1, 3)
naforces_icab = mabincount(i_p, naF_pcab, nat) - mabincount(j_p, naF_pcab, nat)
return naforces_icab
def get_elastic_constants(self,
atoms,
cg_parameters={
"x0": None,
"tol": 1e-5,
"maxiter": None,
"M": None,
"callback": None,
"atol": 1e-5}):
"""
Compute the elastic constants at zero temperature.
These are sum of the born, the non-affine and the stress contribution.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
cg_parameters: dict
Dictonary for the conjugate-gradient solver.
x0: {array, matrix}
Starting guess for the solution.
tol/atol: float, optional
Tolerances for convergence, norm(residual) <= max(tol*norm(b), atol).
maxiter: int
Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved.
M: {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A.
callback: function
User-supplied function to call after each iteration.
"""
if self.atoms is None:
self.atoms = atoms
# Born (affine) elastic constants
calculator = self
C = calculator.get_born_elastic_constants(atoms)
# Stress contribution to elastic constants
C += calculator.get_stress_contribution_to_elastic_constants(atoms)
# Non-affine contribution
C += nonaffine_elastic_contribution(atoms, cg_parameters=cg_parameters)
return C
@deprecate(new_name="elasticity.nonaffine_elastic_contribution")
def get_non_affine_contribution_to_elastic_constants(self, atoms, eigenvalues=None, eigenvectors=None, pc_parameters=None, cg_parameters={"x0": None, "tol": 1e-5, "maxiter": None, "M": None, "callback": None, "atol": 1e-5}):
"""
Compute the correction of non-affine displacements to the elasticity tensor.
The computation of the occuring inverse of the Hessian matrix is bypassed by using a cg solver.
If eigenvalues and and eigenvectors are given the inverse of the Hessian can be easily computed.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
eigenvalues: array
Eigenvalues in ascending order obtained by diagonalization of Hessian matrix.
If given, use eigenvalues and eigenvectors to compute non-affine contribution.
eigenvectors: array
Eigenvectors corresponding to eigenvalues.
cg_parameters: dict
Dictonary for the conjugate-gradient solver.
x0: {array, matrix}
Starting guess for the solution.
tol/atol: float, optional
Tolerances for convergence, norm(residual) <= max(tol*norm(b), atol).
maxiter: int
Maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved.
M: {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A.
callback: function
User-supplied function to call after each iteration.
pc_parameters: dict
Dictonary for the incomplete LU decomposition of the Hessian.
A: array_like
Sparse matrix to factorize.
drop_tol: float
Drop tolerance for an incomplete LU decomposition.
fill_factor: float
Specifies the fill ratio upper bound.
drop_rule: str
Comma-separated string of drop rules to use.
permc_spec: str
How to permute the columns of the matrix for sparsity.
diag_pivot_thresh: float
Threshold used for a diagonal entry to be an acceptable pivot.
relax: int
Expert option for customizing the degree of relaxing supernodes.
panel_size: int
Expert option for customizing the panel size.
options: dict
Dictionary containing additional expert options to SuperLU.
"""
nat = len(atoms)
calc = self
if (eigenvalues is not None) and (eigenvectors is not None):
naforces_icab = calc.get_nonaffine_forces(atoms)
G_incc = (eigenvectors.T).reshape(-1, 3*nat, 1, 1) * naforces_icab.reshape(1, 3*nat, 3, 3)
G_incc = (G_incc.T/np.sqrt(eigenvalues)).T
G_icc = np.sum(G_incc, axis=1)
C_abab = np.sum(G_icc.reshape(-1,3,3,1,1) * G_icc.reshape(-1,1,1,3,3), axis=0)
else:
H_nn = calc.get_hessian(atoms)
naforces_icab = calc.get_nonaffine_forces(atoms)
if pc_parameters != None:
# Transform H to csc
H_nn = H_nn.tocsc()
# Compute incomplete LU
approx_Hinv = spilu(H_nn, **pc_parameters)
operator_Hinv = LinearOperator(H_nn.shape, approx_Hinv.solve)
cg_parameters["M"] = operator_Hinv
D_iab = np.zeros((3*nat, 3, 3))
for i in range(3):
for j in range(3):
x, info = cg(H_nn, naforces_icab[:, :, i, j].flatten(), **cg_parameters)
if info != 0:
print("info: ", info)
raise RuntimeError(" info > 0: CG tolerance not achieved, info < 0: Exceeded number of iterations.")
D_iab[:,i,j] = x
C_abab = np.sum(naforces_icab.reshape(3*nat, 3, 3, 1, 1) * D_iab.reshape(3*nat, 1, 1, 3, 3), axis=0)
# Symmetrize
C_abab = (C_abab + C_abab.swapaxes(0, 1) + C_abab.swapaxes(2, 3) + C_abab.swapaxes(0, 1).swapaxes(2, 3)) / 4
return -C_abab/atoms.get_volume()
@deprecate(new_name='numerical.numerical_nonaffine_forces')
def get_numerical_non_affine_forces(self, atoms, d=1e-6):
"""
Calculate numerical non-affine forces using central finite differences.
This is done by deforming the box, rescaling atoms and measure the force.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
return numerical_nonaffine_forces(atoms, d=d)
| 14,926 | 34.796163 | 228 | py |
matscipy | matscipy-master/matscipy/calculators/pair_potential/__init__.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018-2019 Jan Griesser (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .calculator import LennardJonesCut, LennardJonesQuadratic, LennardJonesLinear, FeneLJCut, LennardJones84, PairPotential
| 1,060 | 41.44 | 124 | py |
matscipy | matscipy-master/matscipy/calculators/pair_potential/calculator.py | #
# Copyright 2018-2019, 2021 Jan Griesser (U. Freiburg)
# 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simple pair potential.
"""
#
# Coding convention
# * All numpy arrays are suffixed with the array dimensions
# * The suffix stands for a certain type of dimension:
# - n: Atomic index, i.e. array dimension of length nb_atoms
# - p: Pair index, i.e. array dimension of length nb_pairs
# - c: Cartesian index, array dimension of length 3
#
from abc import ABC, abstractmethod
import numpy as np
from scipy.sparse import bsr_matrix
from ...neighbours import neighbour_list, first_neighbours
from ..calculator import MatscipyCalculator
from ...numpy_tricks import mabincount
class CutoffInteraction(ABC):
"""Pair interaction potential with cutoff."""
def __init__(self, cutoff):
"""Initialize with cutoff."""
self._cutoff = cutoff
@property
def cutoff(self):
"""Physical cutoff distance for pair interaction."""
return self._cutoff
@cutoff.setter
def cutoff(self, v):
self._cutoff = np.clip(v, 0, None)
def get_cutoff(self):
"""Get cutoff. Deprecated."""
return self.cutoff
@abstractmethod
def __call__(self, r, qi, qj):
"""Compute interaction energy."""
@abstractmethod
def first_derivative(self, r, qi, qj):
"""Compute derivative w/r to distance."""
@abstractmethod
def second_derivative(self, r, qi, qj):
"""Compute second derivative w/r to distance."""
def derivative(self, n=1):
"""Return specified derivative."""
if n == 1:
return self.first_derivative
elif n == 2:
return self.second_derivative
else:
raise ValueError(
"Don't know how to compute {}-th derivative.".format(n)
)
class LennardJonesCut(CutoffInteraction):
"""
Functional form for a 12-6 Lennard-Jones potential with a hard cutoff.
Energy is shifted to zero at cutoff.
"""
def __init__(self, epsilon, sigma, cutoff):
super().__init__(cutoff)
self.epsilon = epsilon
self.sigma = sigma
self.offset = (sigma / cutoff) ** 12 - (sigma / cutoff) ** 6
def __call__(self, r, *args):
r6 = (self.sigma / r) ** 6
return 4 * self.epsilon * ((r6 - 1) * r6 - self.offset)
def first_derivative(self, r, *args):
r = self.sigma / r
r6 = r**6
return -24 * self.epsilon / self.sigma * (2 * r6 - 1) * r6 * r
def second_derivative(self, r, *args):
r2 = (self.sigma / r) ** 2
r6 = r2**3
return 24 * self.epsilon / self.sigma**2 * (26 * r6 - 7) * r6 * r2
###
class LennardJonesQuadratic(CutoffInteraction):
"""
Functional form for a 12-6 Lennard-Jones potential with a soft cutoff.
Energy, its first and second derivative are shifted to zero at cutoff.
"""
def __init__(self, epsilon, sigma, cutoff):
super().__init__(cutoff)
self.epsilon = epsilon
self.sigma = sigma
self.offset_energy = (sigma / cutoff) ** 12 - (sigma / cutoff) ** 6
self.offset_force = (
6 / cutoff * (-2 * (sigma / cutoff) ** 12 + (sigma / cutoff) ** 6)
)
self.offset_dforce = (1 / cutoff**2) * (
156 * (sigma / cutoff) ** 12 - 42 * (sigma / cutoff) ** 6
)
def __call__(self, r, *args):
"""
Return function value (potential energy).
"""
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* (
(r6 - 1) * r6
- self.offset_energy
- (r - self.cutoff) * self.offset_force
- ((r - self.cutoff) ** 2 / 2) * self.offset_dforce
)
)
def first_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* (
(6 / r) * (-2 * r6 + 1) * r6
- self.offset_force
- (r - self.cutoff) * self.offset_dforce
)
)
def second_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* ((1 / r**2) * (156 * r6 - 42) * r6 - self.offset_dforce)
)
###
class LennardJonesLinear(CutoffInteraction):
"""
Function form of a 12-6 Lennard-Jones potential with a soft cutoff
The energy and the force are shifted at the cutoff.
"""
def __init__(self, epsilon, sigma, cutoff):
super().__init__(cutoff)
self.epsilon = epsilon
self.sigma = sigma
self.offset_energy = (sigma / cutoff) ** 12 - (sigma / cutoff) ** 6
self.offset_force = (
6 / cutoff * (-2 * (sigma / cutoff) ** 12 + (sigma / cutoff) ** 6)
)
def __call__(self, r, *args):
"""
Return function value (potential energy).
"""
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* (
(r6 - 1) * r6
- self.offset_energy
- (r - self.cutoff) * self.offset_force
)
)
def first_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* ((6 / r) * (-2 * r6 + 1) * r6 - self.offset_force)
)
def second_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return 4 * self.epsilon * ((1 / r**2) * (156 * r6 - 42) * r6)
###
class FeneLJCut(LennardJonesCut):
"""
Finite extensible nonlinear elastic(FENE) potential for a bead-spring polymer model.
For the Lennard-Jones interaction a LJ-cut potential is used. Due to choice of the cutoff (rc=2^(1/6) sigma)
it ensures a continous potential and force at the cutoff.
"""
def __init__(self, K, R0, epsilon, sigma):
super().__init__(2 ** (1 / 6) * sigma)
self.K = K
self.R0 = R0
self.epsilon = epsilon
self.sigma = sigma
def __call__(self, r, *args):
return -0.5 * self.K * self.R0**2 * np.log(
1 - (r / self.R0) ** 2
) + super().__call__(r)
def first_derivative(self, r, *args):
return self.K * r / (
1 - (r / self.R0) ** 2
) + super().first_derivative(r)
def second_derivative(self, r, *args):
invLength = 1 / (1 - (r / self.R0) ** 2)
return (
self.K * invLength
+ 2 * self.K * r**2 * invLength**2 / self.R0**2
+ super().second_derivative(r)
)
###
class LennardJones84(CutoffInteraction):
"""
Function form of a 8-4 Lennard-Jones potential, used to model the structure of a CuZr.
Kobayashi, Shinji et. al. "Computer simulation of atomic structure of Cu57Zr43 amorphous alloy."
Journal of the Physical Society of Japan 48.4 (1980): 1147-1152.
"""
def __init__(self, C1, C2, C3, C4, cutoff):
super().__init__(cutoff)
self.C1 = C1
self.C2 = C2
self.C3 = C3
self.C4 = C4
def __call__(self, r, *args):
r4 = (1 / r) ** 4
return (self.C2 * r4 - self.C1) * r4 + self.C3 * r + self.C4
def first_derivative(self, r, *args):
r4 = (1 / r) ** 4
return (-8 * self.C2 * r4 / r + 4 * self.C1 / r) * r4 + self.C3
def second_derivative(self, r, *args):
r4 = (1 / r) ** 4
return (72 * self.C2 * r4 / r**2 - 20 * self.C1 / r**2) * r4
class BeestKramerSanten(CutoffInteraction):
"""
Beest, Kramer, van Santen (BKS) potential.
Buckingham:
Energy is shifted to zero at the cutoff.
References
----------
B. W. Van Beest, G. J. Kramer and R. A. Van Santen, Phys. Rev. Lett. 64.16 (1990)
"""
def __init__(self, A, B, C, cutoff):
super().__init__(cutoff)
self.A, self.B, self.C = A, B, C
self.buck_offset_energy = A * np.exp(-B * cutoff) - C / cutoff**6
def __call__(self, r, *args):
return (
self.A * np.exp(-self.B * r)
- self.C / r**6
- self.buck_offset_energy
)
def first_derivative(self, r, *args):
return -self.A * self.B * np.exp(-self.B * r) + 6 * self.C / r**7
def second_derivative(self, r, *args):
return (
self.A * self.B**2 * np.exp(-self.B * r) - 42 * self.C / r**8
)
# Broadcast slices
_c, _cc = np.s_[..., np.newaxis], np.s_[..., np.newaxis, np.newaxis]
class PairPotential(MatscipyCalculator):
implemented_properties = [
"energy",
"free_energy",
"stress",
"forces",
"hessian",
"dynamical_matrix",
"nonaffine_forces",
"birch_coefficients",
"nonaffine_elastic_contribution",
"stress_elastic_contribution",
"born_constants",
'elastic_constants',
]
default_parameters = {}
name = "PairPotential"
class _dummy_charge:
"""Dummy object for when system has no charge."""
def __getitem__(self, x):
return None
def __init__(self, f, cutoff=None):
"""Construct calculator."""
MatscipyCalculator.__init__(self)
self.f = f
self.reset()
def reset(self):
super().reset()
self.dict = {x: obj.cutoff for x, obj in self.f.items()}
self.df = {x: obj.derivative(1) for x, obj in self.f.items()}
self.df2 = {x: obj.derivative(2) for x, obj in self.f.items()}
def _mask_pairs(self, i_p, j_p):
"""Iterate over pair masks."""
numi_p, numj_p = self.atoms.numbers[i_p], self.atoms.numbers[j_p]
for pair in self.dict:
mask = (numi_p == pair[0]) & (numj_p == pair[1])
if pair[0] != pair[1]:
mask |= (numi_p == pair[1]) & (numj_p == pair[0])
yield mask, pair
def _get_charges(self, i_p, j_p):
"""Return charges if available."""
if self.atoms.has("charge"):
return [self.atoms.get_array("charge")[i] for i in (i_p, j_p)]
return [self._dummy_charge(), self._dummy_charge()]
def calculate(self, atoms, properties, system_changes):
"""Calculate system properties."""
super().calculate(atoms, properties, system_changes)
nb_atoms = len(self.atoms)
i_p, j_p, r_p, r_pc = neighbour_list("ijdD", atoms, self.dict)
qi_p, qj_p = self._get_charges(i_p, j_p)
e_p = np.zeros_like(r_p)
de_p = np.zeros_like(r_p)
for mask, pair in self._mask_pairs(i_p, j_p):
e_p[mask] = self.f[pair](r_p[mask], qi_p[mask], qj_p[mask])
de_p[mask] = self.df[pair](r_p[mask], qi_p[mask], qj_p[mask])
epot = 0.5 * np.sum(e_p)
# Forces
df_pc = -0.5 * de_p[_c] * r_pc / r_p[_c]
f_nc = mabincount(j_p, df_pc, nb_atoms) - mabincount(
i_p, df_pc, nb_atoms
)
# Virial
virial_v = -np.array(
[
r_pc[:, 0] * df_pc[:, 0], # xx
r_pc[:, 1] * df_pc[:, 1], # yy
r_pc[:, 2] * df_pc[:, 2], # zz
r_pc[:, 1] * df_pc[:, 2], # yz
r_pc[:, 0] * df_pc[:, 2], # xz
r_pc[:, 0] * df_pc[:, 1],
]
).sum(
axis=1
) # xy
self.results.update(
{
"energy": epot,
"free_energy": epot,
"stress": virial_v / atoms.get_volume(),
"forces": f_nc,
}
)
###
def get_hessian(self, atoms, format="sparse", divide_by_masses=False):
"""
Calculate the Hessian matrix for a pair potential.
For an atomic configuration with N atoms in d dimensions the hessian matrix is a symmetric, hermitian matrix
with a shape of (d*N,d*N). The matrix is in general a sparse matrix, which consists of dense blocks of
shape (d,d), which are the mixed second derivatives. The result of the derivation for a pair potential can be
found e.g. in:
L. Pastewka et. al. "Seamless elastic boundaries for atomistic calculations", Phys. Rev. B 86, 075459 (2012).
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
format: "sparse" or "neighbour-list"
Output format of the hessian matrix.
divide_by_masses: bool
if true return the dynamic matrix else hessian matrix
Restrictions
----------
This method is currently only implemented for three dimensional systems
"""
if self.atoms is None:
self.atoms = atoms
f = self.f
df = self.df
df2 = self.df2
nb_atoms = len(atoms)
i_p, j_p, r_p, r_pc = neighbour_list("ijdD", atoms, self.dict)
first_i = first_neighbours(nb_atoms, i_p)
qi_p, qj_p = self._get_charges(i_p, j_p)
e_p = np.zeros_like(r_p)
de_p = np.zeros_like(r_p)
dde_p = np.zeros_like(r_p)
for mask, pair in self._mask_pairs(i_p, j_p):
e_p[mask] = f[pair](r_p[mask], qi_p[mask], qj_p[mask])
de_p[mask] = df[pair](r_p[mask], qi_p[mask], qj_p[mask])
dde_p[mask] = df2[pair](r_p[mask], qi_p[mask], qj_p[mask])
n_pc = r_pc / r_p[_c]
nn_pcc = n_pc[..., :, np.newaxis] * n_pc[..., np.newaxis, :]
H_pcc = -(dde_p[_cc] * nn_pcc)
H_pcc += -((de_p / r_p)[_cc] * (np.eye(3, dtype=n_pc.dtype) - nn_pcc))
# Sparse BSR-matrix
if format == "sparse":
if divide_by_masses:
masses_n = atoms.get_masses()
geom_mean_mass_p = np.sqrt(masses_n[i_p] * masses_n[j_p])
H = bsr_matrix(
((H_pcc.T / geom_mean_mass_p).T, j_p, first_i),
shape=(3 * nb_atoms, 3 * nb_atoms),
)
else:
H = bsr_matrix(
(H_pcc, j_p, first_i), shape=(3 * nb_atoms, 3 * nb_atoms)
)
Hdiag_icc = np.empty((nb_atoms, 3, 3))
for x in range(3):
for y in range(3):
Hdiag_icc[:, x, y] = -np.bincount(
i_p, weights=H_pcc[:, x, y], minlength=nb_atoms
)
if divide_by_masses:
H += bsr_matrix(
(
(Hdiag_icc.T / masses_n).T,
np.arange(nb_atoms),
np.arange(nb_atoms + 1),
),
shape=(3 * nb_atoms, 3 * nb_atoms),
)
else:
H += bsr_matrix(
(Hdiag_icc, np.arange(nb_atoms), np.arange(nb_atoms + 1)),
shape=(3 * nb_atoms, 3 * nb_atoms),
)
return H
# Neighbour list format
elif format == "neighbour-list":
return H_pcc, i_p, j_p, r_pc, r_p
| 15,872 | 29.349904 | 117 | py |
matscipy | matscipy-master/matscipy/calculators/ewald/__init__.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018-2019 Jan Griesser (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .calculator import Ewald
| 965 | 37.64 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/ewald/calculator.py | #
# Copyright 2021 Jan Griesser (U. Freiburg)
# 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Pair potential + Ewald summation
"""
#
# Coding convention
# * All numpy arrays are suffixed with the array dimensions
# * The suffix stands for a certain type of dimension:
# - n: Atomic index, i.e. array dimension of length nb_atoms
# - p: Pair index, i.e. array dimension of length nb_pairs
# - c: Cartesian index, array dimension of length 3
# - l: Wave vector index, i.e. array of dimension length of k_lc
from collections import defaultdict
import numpy as np
from scipy.linalg import block_diag
from scipy.special import erfc
from ...calculators.pair_potential.calculator import (
PairPotential,
CutoffInteraction,
)
# Charges q are expressed as multiples of the elementary charge e: q = x*e
# e^2/(4*pi*epsilon0) = 14.399645 eV * Angström
conversion_prefactor = 14.399645
class EwaldShortRange(CutoffInteraction):
"""Short range term of Ewald summation."""
def __init__(self, alpha, cutoff):
super().__init__(cutoff)
self.alpha = alpha
def __call__(self, r, qi, qj):
return conversion_prefactor * qi * qj * erfc(self.alpha * r) / r
def first_derivative(self, r, qi, qj):
a = self.alpha
return (
-conversion_prefactor
* qi
* qj
* (
erfc(a * r) / r**2
+ 2 * a * np.exp(-((a * r) ** 2)) / (np.sqrt(np.pi) * r)
)
)
def second_derivative(self, r, qi, qj):
a = self.alpha
return (
conversion_prefactor
* qi
* qj
* (
2 * erfc(a * r) / r**3
+ 4
* a
* np.exp((-((a * r) ** 2)))
/ np.sqrt(np.pi)
* (1 / r**2 + a**2)
)
)
class Ewald(PairPotential):
"""Ewal summation calculator."""
name = "Ewald"
default_parameters = {
"accuracy": 1e-6,
"cutoff": 3,
"verbose": True,
"kspace": {},
}
def __init__(self):
super().__init__(defaultdict(lambda: self.short_range))
self.set(**self.parameters)
self.kvectors = None
self.initial_I = None
self.initial_alpha = None
@property
def short_range(self):
return EwaldShortRange(
self.alpha,
self.parameters["cutoff"],
)
@property
def alpha(self):
"""Get alpha."""
return self._alpha
@alpha.setter
def alpha(self, v):
"""Set alpha."""
self._alpha = v
def set(self, **kwargs):
super().set(**kwargs)
if "accuracy" in kwargs:
self.reset()
def reset(self):
super().reset()
self.dict = defaultdict(lambda: self.short_range.cutoff)
self.df = defaultdict(lambda: self.short_range.derivative(1))
self.df2 = defaultdict(lambda: self.short_range.derivative(2))
def _mask_pairs(self, i_p, j_p):
"""Match all atom types to the (1, 1) object for pair interactions."""
yield np.s_[:], (1, 1)
@staticmethod
def determine_alpha(charge, acc, cutoff, cell):
"""
Determine an estimate for alpha on the basis of the cell, cutoff and desired accuracy
(Adopted from LAMMPS)
"""
# The kspace rms error is computed relative to the force that two unit point
# charges exert on each other at a distance of 1 Angström
accuracy_relative = acc * conversion_prefactor
qsqsum = conversion_prefactor * np.sum(charge**2)
a = (
accuracy_relative
* np.sqrt(
len(charge) * cutoff * cell[0, 0] * cell[1, 1] * cell[2, 2]
)
/ (2 * qsqsum)
)
if a >= 1.0:
return (1.35 - 0.15 * np.log(accuracy_relative)) / cutoff
else:
return np.sqrt(-np.log(a)) / cutoff
@staticmethod
def determine_nk(charge, cell, acc, a, natoms):
"""
Determine the maximal number of points in reciprocal space for each direction,
and the cutoff in reciprocal space
"""
# The kspace rms error is computed relative to the force that two unit point
# charges exert on each other at a distance of 1 Angström
accuracy_relative = acc * conversion_prefactor
nxmax = 1
nymax = 1
nzmax = 1
qsqsum = conversion_prefactor * np.sum(charge**2)
error = Ewald.rms_kspace(nxmax, cell[0, 0], natoms, a, qsqsum)
while error > (accuracy_relative):
nxmax += 1
error = Ewald.rms_kspace(nxmax, cell[0, 0], natoms, a, qsqsum)
error = Ewald.rms_kspace(nymax, cell[1, 1], natoms, a, qsqsum)
while error > (accuracy_relative):
nymax += 1
error = Ewald.rms_kspace(nymax, cell[1, 1], natoms, a, qsqsum)
error = Ewald.rms_kspace(nzmax, cell[2, 2], natoms, a, qsqsum)
while error > (accuracy_relative):
nzmax += 1
error = Ewald.rms_kspace(nzmax, cell[2, 2], natoms, a, qsqsum)
kxmax = 2 * np.pi / cell[0, 0] * nxmax
kymax = 2 * np.pi / cell[1, 1] * nymax
kzmax = 2 * np.pi / cell[2, 2] * nzmax
kmax = max(kxmax, kymax, kzmax)
# Check if box is triclinic --> Scale lattice vectors for triclinic skew
if np.count_nonzero(cell - np.diag(np.diagonal(cell))) != 9:
vector = np.array(
[nxmax / cell[0, 0], nymax / cell[1, 1], nzmax / cell[2, 2]]
)
scaled_nbk = np.dot(np.array(np.abs(cell)), vector)
nxmax = max(1, np.int(scaled_nbk[0]))
nymax = max(1, np.int(scaled_nbk[1]))
nzmax = max(1, np.int(scaled_nbk[2]))
return kmax, np.array([nxmax, nymax, nzmax])
@staticmethod
def determine_kc(cell, nk):
"""
Determine maximal wave vector based in a given integer triplet
"""
kxmax = 2 * np.pi / cell[0, 0] * nk[0]
kymax = 2 * np.pi / cell[1, 1] * nk[1]
kzmax = 2 * np.pi / cell[2, 2] * nk[2]
return max(kxmax, kymax, kzmax)
@staticmethod
def rms_kspace(km, l, n, a, q2):
"""
Compute the root mean square error of the force in reciprocal space
Reference
------------------
Henrik G. Petersen, The Journal of chemical physics 103.9 (1995)
"""
return (
2
* q2
* a
/ l
* np.sqrt(1 / (np.pi * km * n))
* np.exp(-((np.pi * km / (a * l)) ** 2))
)
@staticmethod
def rms_rspace(charge, cell, a, rc):
"""
Compute the root mean square error of the force in real space
Reference
------------------
Henrik G. Petersen, The Journal of chemical physics 103.9 (1995)
"""
return (
2
* np.sum(charge**2)
* np.exp(-((a * rc) ** 2))
/ np.sqrt(rc * len(charge) * cell[0, 0] * cell[1, 1] * cell[2, 2])
)
@staticmethod
def allowed_wave_vectors(cell, km, a, nk):
"""
Compute allowed wave vectors and the prefactor I
"""
nx = np.arange(-nk[0], nk[0] + 1, 1)
ny = np.arange(-nk[1], nk[1] + 1, 1)
nz = np.arange(-nk[2], nk[2] + 1, 1)
n_lc = np.array(np.meshgrid(nx, ny, nz)).T.reshape(-1, 3)
k_lc = 2 * np.pi * np.dot(np.linalg.inv(np.array(cell)), n_lc.T).T
k = np.linalg.norm(k_lc, axis=1)
mask = np.logical_and(k <= km, k != 0)
return np.exp(-((k[mask] / (2 * a)) ** 2)) / k[mask] ** 2, k_lc[mask]
@staticmethod
def self_energy(charge, a):
"""
Return the self energy
"""
return -conversion_prefactor * a * np.sum(charge**2) / np.sqrt(np.pi)
@staticmethod
def kspace_energy(charge, pos, vol, I, k):
"""
Return the energy from the reciprocal space contribution
"""
structure_factor_l = np.sum(
charge * np.exp(1j * np.tensordot(k, pos, axes=((1), (1)))), axis=1
)
return (
conversion_prefactor
* 2
* np.pi
* np.sum(I * np.absolute(structure_factor_l) ** 2)
/ vol
)
@staticmethod
def first_derivative_kspace(charge, natoms, vol, pos, I, k):
"""Return the kspace part of the force."""
n = len(pos)
phase_ln = np.tensordot(k, pos, axes=((1), (1)))
cos_ln = np.cos(phase_ln)
sin_ln = np.sin(phase_ln)
cos_sin_ln = (cos_ln.T * np.sum(charge * sin_ln, axis=1)).T
sin_cos_ln = (sin_ln.T * np.sum(charge * cos_ln, axis=1)).T
prefactor_ln = (I * (cos_sin_ln - sin_cos_ln).T).T
f_nc = np.sum(
k.reshape(-1, 1, 3) * prefactor_ln.reshape(-1, n, 1), axis=0
)
return -conversion_prefactor * 4 * np.pi * (charge * f_nc.T).T / vol
@staticmethod
def stress_kspace(charge, pos, vol, a, I, k):
"""Return the stress contribution of the long-range Coulomb part."""
sqk_l = np.sum(k * k, axis=1)
structure_factor_l = np.sum(
charge * np.exp(1j * np.tensordot(k, pos, axes=((1), (1)))), axis=1
)
wave_vectors_lcc = (k.reshape(-1, 3, 1) * k.reshape(-1, 1, 3)) * (
1 / (2 * a**2) + 2 / sqk_l
).reshape(-1, 1, 1) - np.identity(3)
stress_lcc = (I * np.absolute(structure_factor_l) ** 2).reshape(
len(I), 1, 1
) * wave_vectors_lcc
stress_cc = np.sum(stress_lcc, axis=0)
stress_cc *= conversion_prefactor * 2 * np.pi / vol
return np.array(
[
stress_cc[0, 0], # xx
stress_cc[1, 1], # yy
stress_cc[2, 2], # zz
stress_cc[1, 2], # yz
stress_cc[0, 2], # xz
stress_cc[0, 1],
]
) # xy
def reset_kspace(self, atoms):
"""Reset kspace setup."""
if not atoms.has("charge"):
raise AttributeError(
"Unable to load atom charges from atoms object!"
)
charge_n = atoms.get_array("charge")
if np.abs(charge_n.sum()) > 1e-3:
print("Net charge: ", np.sum(charge_n))
raise AttributeError("System is not charge neutral!")
if not all(atoms.get_pbc()):
raise AttributeError(
"This code only works for 3D systems with periodic boundaries!"
)
accuracy = self.parameters["accuracy"]
rc = self.parameters["cutoff"]
kspace_params = self.parameters["kspace"]
self.alpha = kspace_params.get(
"alpha",
self.determine_alpha(charge_n, accuracy, rc, atoms.get_cell()),
)
alpha = self.alpha
nb_atoms = len(atoms)
if "nbk_c" in kspace_params:
nbk_c = kspace_params["nbk_c"]
kc = kspace_params.get(
"cutoff", self.determine_kc(atoms.get_cell(), nbk_c)
)
else:
kc, nbk_c = self.determine_nk(
charge_n, atoms.get_cell(), accuracy, alpha, nb_atoms
)
self.set(cutoff_kspace=kc)
self.initial_alpha = alpha
I_l, k_lc = self.allowed_wave_vectors(
atoms.get_cell(), kc, alpha, nbk_c
)
self.kvectors = k_lc
self.initial_I = I_l
# Priting info
if self.parameters.get("verbose"):
rms_rspace = self.rms_rspace(charge_n, atoms.get_cell(), alpha, rc)
rms_kspace = [
self.rms_kspace(
nbk_c[i],
atoms.get_cell()[i, i],
nb_atoms,
alpha,
conversion_prefactor * np.sum(charge_n**2),
)
for i in range(3)
]
print("Estimated alpha: ", alpha)
print("Number of wave vectors: ", k_lc.shape[0])
print("Cutoff for kspace vectors: ", kc)
print(
"Estimated kspace triplets nx/ny/nx: ",
nbk_c[0],
"/",
nbk_c[1],
"/",
nbk_c[2],
)
print(
"Estimated absolute RMS force accuracy (Real space): ",
np.absolute(rms_rspace),
)
print(
"Estimated absolute RMS force accuracy (Kspace): ",
np.linalg.norm(rms_kspace),
)
def calculate(self, atoms, properties, system_changes):
"""Compute Coulomb interactions with Ewald summation."""
if "cell" in system_changes or getattr(self, 'alpha', None) is None:
self.reset_kspace(atoms)
super().calculate(atoms, properties, system_changes)
nb_atoms = len(atoms)
charge_n = atoms.get_array("charge")
k_lc = self.kvectors
I_l = self.initial_I
alpha = self.alpha
# Energy
e_self = self.self_energy(charge_n, alpha)
e_long = self.kspace_energy(
charge_n, atoms.get_positions(), atoms.get_volume(), I_l, k_lc
)
self.results["energy"] += e_self + e_long
self.results["free_energy"] += e_self + e_long
# Forces
self.results["forces"] += self.first_derivative_kspace(
charge_n,
nb_atoms,
atoms.get_volume(),
atoms.get_positions(),
I_l,
k_lc,
)
# Virial
self.results["stress"] += (
self.stress_kspace(
charge_n,
atoms.get_positions(),
atoms.get_volume(),
alpha,
I_l,
k_lc,
)
/ atoms.get_volume()
)
def kspace_properties(self, atoms, prop="Hessian", divide_by_masses=False):
"""
Calculate the recirprocal contributiom to the Hessian, the non-affine
forces and the Born elastic constants
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
prop: "Hessian", "Born" or "Naforces"
Compute either the Hessian/Dynamical matrix, the Born constants
or the non-affine forces.
divide_by_masses: bool
if true return the dynamic matrix else Hessian matrix
Restrictions
----------
This method is currently only implemented for three dimensional systems
"""
nb_atoms = len(atoms)
alpha = self.alpha
k_lc = self.kvectors
I_l = self.initial_I
charge_n = atoms.get_array("charge")
if prop == "Hessian":
H = np.zeros((3 * nb_atoms, 3 * nb_atoms))
pos = atoms.get_positions()
for i, k in enumerate(k_lc):
phase_l = np.sum(k * pos, axis=1)
I_sqcos_sqsin = I_l[i] * (
np.cos(phase_l).reshape(-1, 1)
* np.cos(phase_l).reshape(1, -1)
+ np.sin(phase_l).reshape(-1, 1)
* np.sin(phase_l).reshape(1, -1)
)
I_sqcos_sqsin[range(nb_atoms), range(nb_atoms)] = 0.0
H += np.concatenate(
np.concatenate(
k.reshape(1, 1, 3, 1)
* k.reshape(1, 1, 1, 3)
* I_sqcos_sqsin.reshape(nb_atoms, nb_atoms, 1, 1),
axis=2,
),
axis=0,
)
H *= (
(conversion_prefactor * 4 * np.pi / atoms.get_volume())
* charge_n.repeat(3).reshape(-1, 1)
* charge_n.repeat(3).reshape(1, -1)
)
Hdiag = np.zeros((3 * nb_atoms, 3))
for x in range(3):
Hdiag[:, x] = -np.sum(H[:, x::3], axis=1)
Hdiag = block_diag(*Hdiag.reshape(nb_atoms, 3, 3))
H += Hdiag
if divide_by_masses:
masses_p = (atoms.get_masses()).repeat(3)
H /= np.sqrt(masses_p.reshape(-1, 1) * masses_p.reshape(1, -1))
return H
elif prop == "Born":
delta_ab = np.identity(3)
sqk_l = np.sum(k_lc * k_lc, axis=1)
structure_factor_l = np.sum(
charge_n
* np.exp(
1j
* np.tensordot(
k_lc, atoms.get_positions(), axes=((1), (1))
)
),
axis=1,
)
prefactor_l = (I_l * np.absolute(structure_factor_l) ** 2).reshape(
-1, 1, 1, 1, 1
)
# First expression
first_abab = delta_ab.reshape(1, 3, 3, 1, 1) * delta_ab.reshape(
1, 1, 1, 3, 3
) + delta_ab.reshape(1, 1, 3, 3, 1) * delta_ab.reshape(
1, 3, 1, 1, 3
)
# Second expression
prefactor_second_l = -(1 / (2 * alpha**2) + 2 / sqk_l).reshape(
-1, 1, 1, 1, 1
)
second_labab = (
k_lc.reshape(-1, 1, 1, 3, 1)
* k_lc.reshape(-1, 1, 1, 1, 3)
* delta_ab.reshape(1, 3, 3, 1, 1)
+ k_lc.reshape(-1, 3, 1, 1, 1)
* k_lc.reshape(-1, 1, 1, 3, 1)
* delta_ab.reshape(1, 1, 3, 1, 3)
+ k_lc.reshape(-1, 3, 1, 1, 1)
* k_lc.reshape(-1, 1, 3, 1, 1)
* delta_ab.reshape(1, 1, 1, 3, 3)
+ k_lc.reshape(-1, 1, 3, 1, 1)
* k_lc.reshape(-1, 1, 1, 3, 1)
* delta_ab.reshape(1, 3, 1, 1, 3)
+ k_lc.reshape(-1, 3, 1, 1, 1)
* k_lc.reshape(-1, 1, 1, 1, 3)
* delta_ab.reshape(1, 1, 3, 3, 1)
)
# Third expression
prefactor_third_l = (
1 / (4 * alpha**4)
+ 2 / (alpha**2 * sqk_l)
+ 8 / sqk_l**2
).reshape(-1, 1, 1, 1, 1)
third_labab = (
k_lc.reshape(-1, 3, 1, 1, 1)
* k_lc.reshape(-1, 1, 3, 1, 1)
* k_lc.reshape(-1, 1, 1, 3, 1)
* k_lc.reshape(-1, 1, 1, 1, 3)
)
C_labab = prefactor_l * (
first_abab
+ prefactor_second_l * second_labab
+ prefactor_third_l * third_labab
)
return (
conversion_prefactor
* 2
* np.pi
* np.sum(C_labab, axis=0)
/ atoms.get_volume() ** 2
)
elif prop == "Naforces":
delta_ab = np.identity(3)
sqk_l = np.sum(k_lc * k_lc, axis=1)
phase_ln = np.tensordot(
k_lc, atoms.get_positions(), axes=((1), (1))
)
cos_ln = np.cos(phase_ln)
sin_ln = np.sin(phase_ln)
cos_sin_ln = (cos_ln.T * np.sum(charge_n * sin_ln, axis=1)).T
sin_cos_ln = (sin_ln.T * np.sum(charge_n * cos_ln, axis=1)).T
prefactor_ln = (I_l * (cos_sin_ln - sin_cos_ln).T).T
# First expression
first_lccc = (
(1 / (2 * alpha**2) + 2 / sqk_l).reshape(-1, 1, 1, 1)
* k_lc.reshape(-1, 1, 1, 3)
* k_lc.reshape(-1, 3, 1, 1)
* k_lc.reshape(-1, 1, 3, 1)
)
# Second expression
second_lccc = -(
k_lc.reshape(-1, 3, 1, 1) * delta_ab.reshape(-1, 1, 3, 3)
+ k_lc.reshape(-1, 1, 3, 1) * delta_ab.reshape(-1, 3, 1, 3)
)
naforces_nccc = np.sum(
prefactor_ln.reshape(-1, nb_atoms, 1, 1, 1)
* (first_lccc + second_lccc).reshape(-1, 1, 3, 3, 3),
axis=0,
)
return (
-conversion_prefactor
* 4
* np.pi
* (charge_n * naforces_nccc.T).T
/ atoms.get_volume()
)
def get_hessian(self, atoms, format=""):
"""
Compute the real space + kspace Hessian
"""
# Ignore kspace here
if format == "neighbour-list":
return super().get_hessian(atoms, format=format)
return super().get_hessian(
atoms, format="sparse"
).todense() + self.kspace_properties(atoms, prop="Hessian")
def get_nonaffine_forces(self, atoms):
"""
Compute the non-affine forces which result from an affine deformation of atoms.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
return super().get_nonaffine_forces(atoms) + self.kspace_properties(
atoms, prop="Naforces"
)
def get_born_elastic_constants(self, atoms):
"""
Compute the Born elastic constants.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
C_abab = super().get_born_elastic_constants(
atoms
) + self.kspace_properties(atoms, prop="Born")
# Symmetrize elastic constant tensor
C_abab = (
C_abab
+ C_abab.swapaxes(0, 1)
+ C_abab.swapaxes(2, 3)
+ C_abab.swapaxes(0, 1).swapaxes(2, 3)
) / 4
return C_abab | 22,564 | 29.742507 | 93 | py |
matscipy | matscipy-master/matscipy/calculators/polydisperse/__init__.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2018-2020 Jan Griesser (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .calculator import InversePowerLawPotential, Polydisperse
| 998 | 38.96 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/polydisperse/calculator.py | #
# Copyright 2020-2021 Jan Griesser (U. Freiburg)
# 2020 [email protected]
# 2020 Arnaud Allera (U. Lyon 1)
# 2014 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from scipy.special import factorial2
from scipy.sparse import bsr_matrix
import ase
from ...neighbours import neighbour_list, first_neighbours
from ..calculator import MatscipyCalculator
from ...numpy_tricks import mabincount
###
class InversePowerLawPotential():
"""
Functional form for a smoothed inverse-power-law potential (IPL)
with an repulsive exponent of 10.
Parameters
----------
epsilon : float
Energy scale
cutoff : float
Cutoff for the pair-interaction
minSize : float
Minimal size of a particle, lower bound of distribtuion
maxSize : float
Maximal size of a particle, upper bound of distribtuion
na : float
Non-additivity paramter for pairwise sizes
q : int
Smooth the potential up to the q-th derivative.
For q=0 the potential is smoothed, for q=1 the potential
and its first derivative are zero at the cutoff,...
Reference:
----------
E. Lerner, Journal of Non-Crystalline Solids, 522, 119570.
"""
def __init__(self, epsilon, cutoff, na, q, minSize, maxSize):
self.epsilon = epsilon
self.cutoff = cutoff
self.minSize = minSize
self.maxSize = maxSize
self.na = na
self.q = q
self.coeffs = []
for index in range(0, q+1):
first_expr = np.power(-1, index+1) / (factorial2(
2*q - 2*index, exact=True) * factorial2(2*index, exact=True))
second_expr = factorial2(10+2*q, exact=True) / (factorial2(
8, exact=True) * (10+2*index))
third_expr = np.power(cutoff, -(10+2*index))
self.coeffs.append(first_expr * second_expr * third_expr)
def __call__(self, r, ijsize):
"""
Return function value (potential energy)
"""
ipl = self.epsilon * np.power(ijsize, 10) / np.power(r, 10)
for l in range(0, self.q+1):
ipl += self.epsilon * self.coeffs[l] * np.power(r/ijsize, 2*l)
return ipl
def mix_sizes(self, isize, jsize):
"""
Nonadditive interaction rule for the cross size of particles i and j.
"""
return 0.5 * (isize+jsize) * (1 - self.na * np.absolute(isize-jsize))
def get_cutoff(self):
"""
Return the cutoff.
"""
return self.cutoff
def get_coeffs(self):
"""
Return the smoothing coefficients of the potential.
"""
return self.coeffs
def get_maxSize(self):
"""
Return the maximal size of a particle (=Upper boundary of distribution)
"""
return self.maxSize
def get_minSize(self):
"""
Return the minimal size of a particle (=Lower boundary of distribution)
"""
return self.minSize
def first_derivative(self, r, ijsize):
"""
Return first derivative
"""
dipl = -10 * self.epsilon * np.power(ijsize, 10) / np.power(r, 11)
for l in range(0, self.q+1):
dipl += 2*self.epsilon*l * \
self.coeffs[l] * np.power(r, 2*l-1) / np.power(ijsize, 2*l)
return dipl
def second_derivative(self, r, ijsize):
"""
Return second derivative
"""
ddipl = 110 * self.epsilon * np.power(ijsize, 10) / np.power(r, 12)
for l in range(0, self.q+1):
ddipl += self.epsilon * \
(4*np.power(l, 2)-2*l) * \
self.coeffs[l] * np.power(r, 2*l-2) / np.power(ijsize, 2*l)
return ddipl
def derivative(self, n=1):
if n == 1:
return self.first_derivative
elif n == 2:
return self.second_derivative
else:
raise ValueError(
"Don't know how to compute {}-th derivative.".format(n))
###
class Polydisperse(MatscipyCalculator):
implemented_properties = [
"energy",
"free_energy",
"stress",
"forces",
"hessian",
"dynamical_matrix",
"nonaffine_forces",
"birch_coefficients",
"nonaffine_elastic_contribution",
"stress_elastic_contribution",
"born_constants",
'elastic_constants',
]
default_parameters = {}
name = "Polydisperse"
def __init__(self, f, cutoff=None):
MatscipyCalculator.__init__(self)
self.f = f
self.reset()
def calculate(self, atoms, properties, system_changes):
super().calculate(atoms, properties, system_changes)
f = self.f
nb_atoms = len(self.atoms)
if atoms.has("size"):
size = self.atoms.get_array("size")
else:
raise AttributeError(
"Attribute error: Unable to load atom sizes from atoms object!")
i_p, j_p, r_pc, r_p = neighbour_list("ijDd", self.atoms, f.get_maxSize()*f.get_cutoff())
ijsize = f.mix_sizes(size[i_p], size[j_p])
# Mask neighbour list to consider only true neighbors
mask = r_p <= f.get_cutoff() * ijsize
i_p = i_p[mask]
j_p = j_p[mask]
r_pc = r_pc[mask]
r_p = r_p[mask]
ijsize = ijsize[mask]
e_p = f(r_p, ijsize)
de_p = f.first_derivative(r_p, ijsize)
# Energy
epot = 0.5*np.sum(e_p)
# Forces
df_pc = -0.5*de_p.reshape(-1, 1)*r_pc/r_p.reshape(-1, 1)
f_nc = mabincount(j_p, df_pc, nb_atoms) - mabincount(i_p, df_pc, nb_atoms)
# Virial
virial_v = -np.array([r_pc[:, 0] * df_pc[:, 0], # xx
r_pc[:, 1] * df_pc[:, 1], # yy
r_pc[:, 2] * df_pc[:, 2], # zz
r_pc[:, 1] * df_pc[:, 2], # yz
r_pc[:, 0] * df_pc[:, 2], # xz
r_pc[:, 0] * df_pc[:, 1]]).sum(axis=1) # xy
self.results.update(
{
'energy': epot,
'free_energy': epot,
'stress': virial_v / self.atoms.get_volume(),
'forces': f_nc,
}
)
###
def get_hessian(self, atoms, format='sparse', divide_by_masses=False):
"""
Calculate the Hessian matrix for a polydisperse systems where atoms interact via a pair potential.
For an atomic configuration with N atoms in d dimensions the hessian matrix is a symmetric, hermitian matrix
with a shape of (d*N,d*N). The matrix is due to the cutoff function a sparse matrix, which consists of dense blocks of shape (d,d), which
are the mixed second derivatives. The result of the derivation for a pair potential can be found in:
L. Pastewka et. al. "Seamless elastic boundaries for atomistic calculations", Phys. Ev. B 86, 075459 (2012).
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
format: "sparse" or "neighbour-list"
Output format of the hessian matrix.
divide_by_masses: bool
Divide the block "l,m" by the corresponding atomic masses "sqrt(m_l, m_m)" to obtain dynamical matrix.
Restrictions
----------
This method is currently only implemented for three dimensional systems
"""
if self.atoms is None:
self.atoms = atoms
f = self.f
nb_atoms = len(self.atoms)
if atoms.has("size"):
size = atoms.get_array("size")
else:
raise AttributeError(
"Attribute error: Unable to load atom sizes from atoms object! Probably missing size array.")
i_p, j_p, r_pc, r_p = neighbour_list("ijDd", self.atoms, f.get_maxSize()*f.get_cutoff())
ijsize = f.mix_sizes(size[i_p], size[j_p])
# Mask neighbour list to consider only true neighbors
mask = r_p <= f.get_cutoff()*ijsize
i_p = i_p[mask]
j_p = j_p[mask]
r_pc = r_pc[mask]
r_p = r_p[mask]
ijsize = ijsize[mask]
first_i = first_neighbours(nb_atoms, i_p)
if divide_by_masses:
mass_n = atoms.get_masses()
geom_mean_mass_p = np.sqrt(mass_n[i_p]*mass_n[j_p])
# Hessian
de_p = f.first_derivative(r_p, ijsize)
dde_p = f.second_derivative(r_p, ijsize)
n_pc = (r_pc.T/r_p).T
H_pcc = -(dde_p * (n_pc.reshape(-1, 3, 1)
* n_pc.reshape(-1, 1, 3)).T).T
H_pcc += -(de_p/r_p * (np.eye(3, dtype=n_pc.dtype)
- (n_pc.reshape(-1, 3, 1) * n_pc.reshape(-1, 1, 3))).T).T
if format == "sparse":
if divide_by_masses:
H = bsr_matrix(((H_pcc.T/geom_mean_mass_p).T,
j_p, first_i), shape=(3*nb_atoms, 3*nb_atoms))
else:
H = bsr_matrix((H_pcc, j_p, first_i), shape=(3*nb_atoms, 3*nb_atoms))
Hdiag_icc = np.empty((nb_atoms, 3, 3))
for x in range(3):
for y in range(3):
Hdiag_icc[:, x, y] = - \
np.bincount(i_p, weights=H_pcc[:, x, y])
if divide_by_masses:
H += bsr_matrix(((Hdiag_icc.T/mass_n).T, np.arange(nb_atoms),
np.arange(nb_atoms+1)), shape=(3*nb_atoms, 3*nb_atoms))
else:
H += bsr_matrix((Hdiag_icc, np.arange(nb_atoms),
np.arange(nb_atoms+1)), shape=(3*nb_atoms, 3*nb_atoms))
return H
# Neighbour list format
elif format == "neighbour-list":
return H_pcc, i_p, j_p, r_pc, r_p
| 10,746 | 32.689655 | 145 | py |
matscipy | matscipy-master/matscipy/calculators/committee/utils.py | import numpy as np
import ase.io
def subsample(samples, output_files, num_subsets, num_subset_samples, keep_isolated_atoms=True):
"""
Draw sub-samples (without replacement) from given configurations and write to files.
Parameter:
----------
samples: list(Atoms)
List of configurations to draw sub-samples from (e.g. full training set).
output_files: list(str / Path)
Target locations for sub-sampled sets of configurations.
num_subsets: int
Number of sub-sets to be drawn.
num_subset_samples: int
Number of configurations per sub-sets.
keep_isolated_atoms: bool, default True
Make isolated atoms (if present) be part of each sub-set.
Returns:
--------
subsamples: list(list(Atoms))
Contains the lists of sub-sampled configurations.
"""
# keep track of position in original set of configurations
sample_atoms = []
isolated_atoms = [] # keep isolated atoms for each sub-sample
for idx_i, atoms_i in enumerate(samples):
atoms_i.info['_Index_FullTrainingSet'] = idx_i
if keep_isolated_atoms and len(atoms_i) == 1:
isolated_atoms.append(atoms_i)
else:
sample_atoms.append(atoms_i)
num_subset_samples -= len(isolated_atoms)
assert 1 < num_subset_samples <= len(sample_atoms), 'Negative `num_subset_samples` (after reduction by number of isolated atoms)'
assert len(output_files) == num_subsets, f'`outputs` requires `num_subsets` files to be specified.'
subsample_indices = _get_subsample_indices(len(sample_atoms), num_subsets, num_subset_samples)
subsamples = [isolated_atoms + [sample_atoms[idx_i] for idx_i in idxs] for idxs in subsample_indices]
for output_file_i, subsample_i in zip(output_files, subsamples):
ase.io.write(output_file_i, subsample_i)
return subsamples
def _get_subsample_indices(num_samples, num_subsets, num_subset_samples):
"""
Draw indices for sub-samples (without replacement).
Parameter:
----------
num_samples: int
Number of configurations to draw sub-samples from (e.g. size of full training set).
num_subsets: int
Number of sub-sets to be drawn.
num_subset_samples: int
Number of configurations per sub-sets.
Returns:
--------
subsample_indices: list(list(int))
Contains the lists of indices representing sub-sampled configurations.
"""
indice_pool = np.arange(num_samples)
subsample_indices = []
for _ in range(num_subsets):
if num_subset_samples <= len(indice_pool):
selected_indices = np.random.choice(indice_pool, num_subset_samples, False)
indice_pool = indice_pool[~np.isin(indice_pool, selected_indices)]
subsample_indices.append(selected_indices)
else:
selected_indices_part_1 = indice_pool
# re-fill pool with indices, taking account of already selected ones,
# in order to avoid duplicate selections
indice_pool = np.arange(num_samples)
indice_pool = indice_pool[~np.isin(indice_pool, selected_indices_part_1)]
selected_indices_part_2 = np.random.choice(indice_pool, num_subset_samples - len(selected_indices_part_1), False)
indice_pool = indice_pool[~np.isin(indice_pool, selected_indices_part_2)]
selected_indices = np.concatenate((selected_indices_part_1, selected_indices_part_2))
subsample_indices.append(selected_indices)
return subsample_indices
| 3,554 | 39.862069 | 133 | py |
matscipy | matscipy-master/matscipy/calculators/committee/log.py | import os
import logging
def create_logger(name, level='warning', log_file=None):
"""
Create a logger with defined level, and some nice formatter and handler.
Parameter:
----------
name: str
Name of the logger.
level: str, optional default='warning'
The sensitivity level of both the logger and the handler.
Allowed options are 'debug', 'info', 'warning', 'error' and 'critical'.
log_file: str, optional default=None
Path to the file, in case you want to log to a file rather than standard out.
Returns:
--------
logger: logging.Logger
The respective logger.
"""
levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logger = logging.getLogger(name)
logger.setLevel(levels[level])
# create console handler and set level to debug
if not log_file:
ch = logging.StreamHandler()
else:
ch = logging.FileHandler(os.path.abspath(log_file))
ch.setLevel(levels[level])
# create formatter
formatter = logging.Formatter('%(asctime)s: [%(levelname)s] %(name)s : %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def set_logging(level='warning', log_file=None):
"""
Set the global logging level (and corresponding file location).
Parameter:
----------
level: str, optional default='warning'
The sensitivity level of both the logger and the handler.
Allowed options are 'debug', 'info', 'warning', 'error' and 'critical'.
log_file: str, optional default=None
Path to the file, in case you want to log to a file rather than standard out.
"""
logger = create_logger('matscipy.calculators.committee', level=level, log_file=log_file)
| 1,924 | 28.166667 | 92 | py |
matscipy | matscipy-master/matscipy/calculators/committee/__init__.py | """Implements the Committee (of Models) approach."""
from .committee import CommitteeUncertainty, Committee, CommitteeMember
from .log import set_logging
from .utils import subsample
| 184 | 29.833333 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/committee/committee.py | import warnings
import logging
from pathlib import Path
from collections import Counter
import numpy as np
import ase.io
from ase.calculators.calculator import Calculator, all_changes
logger = logging.getLogger('matscipy.calculators.committee')
class CommitteeUncertainty(Calculator):
"""
Calculator for a committee of machine learned interatomic potentials (MLIP).
The class assumes individual members of the committee already exist (i.e. their
training is performed externally). Instances of this class are initialized with
these committee members and results (energy, forces) are calculated as average
over these members. In addition to these values, also the uncertainty (standard
deviation) is calculated.
The idea for this Calculator class is based on the following publication:
Musil et al., J. Chem. Theory Comput. 15, 906−915 (2019)
https://pubs.acs.org/doi/full/10.1021/acs.jctc.8b00959
Parameter:
----------
committee: Committee-instance
Representation for a collection of Calculators.
atoms : ase-Atoms, optional default=None
Optional object to which the calculator will be attached.
"""
def __init__(self, committee, atoms=None):
self.implemented_properties = ['energy', 'forces', 'stress']
self.committee = committee
super().__init__(atoms=atoms)
logger.info('Initialized committee uncertainty calculator')
for line_i in self.committee.__repr__().splitlines():
logger.debug(line_i)
def calculate(self, atoms=None, properties=['energy', 'forces', 'stress'], system_changes=all_changes):
"""Calculates committee (mean) values and variances."""
logger.info(f'Calculating properties {properties} with committee')
super().calculate(atoms, properties, system_changes)
property_committee = {k_i: [] for k_i in properties}
for cm_i in self.committee.members:
cm_i.calculator.calculate(atoms=atoms, properties=properties, system_changes=system_changes)
for p_i in properties:
property_committee[p_i].append(cm_i.calculator.results[p_i])
for p_i in properties:
self.results[p_i] = np.mean(property_committee[p_i], axis=0)
self.results[f'{p_i}_uncertainty'] = np.sqrt(np.var(property_committee[p_i], ddof=1, axis=0))
if self.committee.is_calibrated_for(p_i):
self.results[f'{p_i}_uncertainty'] = self.committee.scale_uncertainty(self.results[f'{p_i}_uncertainty'], p_i)
else:
warnings.warn(f'Uncertainty estimation has not been calibrated for {p_i}.')
class Committee:
"""
Instances of this class represent a committee of models.
It's use is to store the ```CommitteeMember```s representing the committee model
and to calibrate the obtained uncertainties (required when sub-sampling is used
to create the training data of the committee members).
Parameter:
----------
members: list(M)
List of ```CommitteeMember``` instances representing the committee of (here `M`) models.
"""
def __init__(self, members=None):
self.members = [] if members is None else members
logger.info('Initialized committee')
for line_i in self.__repr__().splitlines():
logger.debug(line_i)
@property
def members(self):
"""List with committee members."""
return self._members
@members.setter
def members(self, members):
"""Set list with committee members."""
for member_i in members:
self._check_member_type(member_i)
self._members = members
logger.info(f'Set {len(self.members)} members to represent the committee')
self._update()
@property
def number(self):
"""Number of committee members."""
return self._number
@property
def atoms(self):
"""Combined Atoms/samples in the committee."""
return self._atoms
@property
def ids(self):
"""Identifiers of atoms/samples in the committee."""
return self._ids
@property
def id_to_atoms(self):
"""Dictionary to translate identifiers to Atoms-objects."""
return self._id_to_atoms
@property
def id_counter(self):
"""Counter-object for identifier appearances in the committee."""
return self._id_counter
@property
def alphas(self):
"""(Linear) scaling factors for committee uncertainties."""
return self._alphas
@property
def calibrated_for(self):
"""Set of properties the committee has been calibrated for."""
return self.alphas.keys()
@property
def validation_set(self):
"""List of Atoms-objects."""
if not self._validation_set:
msg = '`Committee.set_internal_validation_set()` has not been called or ' + \
'`Committee`-instance has been altered since last call.'
logger.warning(msg)
warnings.warn(msg)
return self._validation_set
def _update(self):
"""Update status when ```Committee```-instance has been altered."""
self._number = len(self.members)
self._atoms = [atoms_ij for cm_i in self.members for atoms_ij in cm_i.atoms]
self._ids = [id_ij for cm_i in self.members for id_ij in cm_i.ids]
self._id_to_atoms = {id_i: atoms_i for id_i, atoms_i in zip(self.ids, self.atoms)}
self._id_counter = Counter(self.ids)
self._validation_set = []
self._alphas = {}
logger.info('Updated committee status')
def add_member(self, member):
"""Extend committee by new ```member``` (i.e. ```CommitteeMember```-instance)."""
self._check_member_type(member)
self.members.append(member)
logger.info('Added +1 member to the committee')
self._update()
def __add__(self, member):
"""Extend committee by new ```member``` (i.e. ```CommitteeMember```-instance)."""
self._check_member_type(member)
self.add_member(member)
return self
@staticmethod
def _check_member_type(member):
"""Make sure ```member``` is of type ```CommitteeMember```."""
assert isinstance(member, CommitteeMember), \
f'Members of `Committee` need to be of type `CommitteeMember`. Found {type(member)}'
def set_internal_validation_set(self, appearance_threshold):
"""
Define a validation set based on the Atoms-objects of sub-sampled committee training sets.
Parameter:
----------
appearance_threshold: int
Number of times a sample for the validation set
is maximally allowed to appear across the training sets
of committee members.
"""
if self._alphas:
msg = '`alphas` will be reset to avoid inconsistencies with new validation set.'
logger.warning(msg)
warnings.warn(msg)
self._reset_calibration_parameters()
assert 0 < appearance_threshold <= self.number - 2
self._validation_set = []
for id_i, appearance_i in self.id_counter.most_common()[::-1]:
if appearance_i > appearance_threshold:
break
self._validation_set.append(self.id_to_atoms[id_i])
logger.info(f'Set internal validation set with {len(self.validation_set)} entries')
def _reset_calibration_parameters(self):
"""Reset parameters obtained from calling ```self.calibrate()```."""
self._alphas = {}
logger.info('Reset calibration parameters')
def calibrate(self, prop, key, location, system_changes=all_changes):
"""
Obtain parameters to properly scale committee uncertainties and make
them available as an attribute (```alphas```) with another associated
attribute (```calibrated_for```) providing information about the property
for which the uncertainty will be scaled by it.
Parameter:
----------
properties: list(str)
Properties for which the calibration will determine scaling factors.
key: str
Key under which the reference values in the validation set are stored
(i.e. under Atoms.info[```key```] / Atoms.arrays[```key```]).
location: str
Either 'info' or 'arrays'.
"""
assert location in ['info', 'arrays'], f'`location` must be \'info\' or \'arrays\', not \'{location}\'.'
validation_ref = [np.asarray(getattr(sample_i, location)[key]).flatten() for sample_i in self.validation_set]
validation_pred, validation_pred_var = [], []
for idx_i, sample_i in enumerate(self.validation_set):
sample_committee_pred = []
for cm_j in self.members:
if cm_j.is_sample_in_atoms(sample_i):
continue
cm_j.calculator.calculate(atoms=sample_i, properties=[prop], system_changes=system_changes)
sample_committee_pred.append(cm_j.calculator.results[prop])
validation_pred.append(np.mean(sample_committee_pred, axis=0).flatten())
validation_pred_var.append(np.var(sample_committee_pred, ddof=1, axis=0).flatten())
# For symmetry-reasons it can happen that e.g. all values for a force component of an atom are equal.
# This would lead to a division-by-zero error in self._calculate_alpha() due to zero-variances.
validation_ref = np.concatenate(validation_ref)
validation_pred = np.concatenate(validation_pred)
validation_pred_var = np.concatenate(validation_pred_var)
ignore_indices = np.where(validation_pred_var == 0)[0]
validation_ref = np.delete(validation_ref, ignore_indices)
validation_pred = np.delete(validation_pred, ignore_indices)
validation_pred_var = np.delete(validation_pred_var, ignore_indices)
self._alphas.update(
{prop: self._calculate_alpha(
vals_ref=validation_ref,
vals_pred=validation_pred,
vars_pred=validation_pred_var,
)
})
logger.info(f'Calibrated committee for property \'{prop}\'')
logger.debug(f'\talpha = {self.alphas[prop]}')
def is_calibrated_for(self, prop):
"""Check whether committee has been calibrated for ```prop```."""
return prop in self.calibrated_for
def _calculate_alpha(self, vals_ref, vals_pred, vars_pred):
"""
Get (linear) uncertainty scaling factor alpha.
This implementation is based on:
Imbalzano et al., J. Chem. Phys. 154, 074102 (2021)
https://doi.org/10.1063/5.0036522
Parameter:
----------
vals_ref: ndarray(N)
Reference values for validation set samples.
vals_pred: ndarray(N)
Values predicted by the committee for validation set samples.
vars_pred: ndarray(N)
Variance predicted by the committee for validation set samples.
Returns:
--------
(Linear) uncertainty scaling factor alpha.
"""
N_val = len(vals_ref)
M = self.number
alpha_squared = -1/M + (M - 3)/(M - 1) * 1/N_val * np.sum(np.power(vals_ref-vals_pred, 2) / vars_pred)
logger.info(f'Calculated alpha')
logger.debug(f'\tN_val = {N_val}')
logger.debug(f'\tM = {M}')
logger.debug(f'\talpha_squared = {alpha_squared}')
assert alpha_squared > 0, f'Obtained negative value for `alpha_squared`: {alpha_squared}'
return np.sqrt(alpha_squared)
def scale_uncertainty(self, value, prop):
"""
Scale uncertainty ```value``` obtained with the committee according to the calibration
for the corresponding property (```prop```).
Parameter:
----------
value: float / ndarray
Represents the uncertainty values (e.g. energy, forces) to be scaled.
prop: str
The property associated with ```value``` (for which the committee needs to be calibrated).
Returns:
--------
Scaled input ```value```.
"""
return self.alphas[prop] * value
def __repr__(self):
s = ''
s_i = f'Committee Status\n'
s += s_i
s += '='*len(s_i) + '\n\n'
s += f'# members: {self.number:>10d}\n'
s += f'# atoms: {len(self.atoms):>10d}\n'
s += f'# ids: {len(self.ids):>10d}\n'
s += f'# atoms validation set: {len(self._validation_set):>10d}\n'
if not self.calibrated_for:
s += f'calibrated for: {"-":>10}\n'
else:
s += f'calibrated for:\n'
for p_i in sorted(self.calibrated_for):
s += f'{"":>4s}{p_i:<18}{self.alphas[p_i]:>18}\n'
for idx_i, cm_i in enumerate(self.members):
s += '\n\n'
s_i = f'Committee Member {idx_i}:\n'
s += s_i
s += '-'*len(s_i) + '\n'
s += cm_i.__repr__()
return s
class CommitteeMember:
"""
Lightweight class defining a member (i.e. a sub-model) of a committee model.
Parameter:
----------
calculator: Calculator
Instance of a Calculator-class (or heirs e.g. quippy.potential.Potential)
representing a machine-learned model.
training_data: str / Path / list(Atoms), optional default=None
Path to or Atoms of (sub-sampled) training set used to create the machine-learned model
defined by the ```calculator```.
"""
def __init__(self, calculator, training_data=None):
self._calculator = calculator
self._filename = 'no filename'
self._atoms = []
self._ids = []
if training_data is not None:
self.set_training_data(training_data)
logger.info('Created committee member')
for line_i in self.__repr__().splitlines():
logger.debug(line_i)
@property
def calculator(self):
"""Model of the committee member."""
return self._calculator
@property
def filename(self):
"""Path to the atoms/samples in the committee member."""
return self._filename
@filename.setter
def filename(self, filename):
"""Set path to the atoms/samples in the committee member."""
msg = 'Use `set_training_data()` to modify the committee member'
logger.error(msg)
raise RuntimeError(msg)
@property
def atoms(self):
"""Atoms/samples in the committee member."""
return self._atoms
@atoms.setter
def atoms(self, atoms):
"""Set Atoms/samples in the committee member."""
msg = 'Use `set_training_data()` to modify the committee member'
logger.error(msg)
raise RuntimeError(msg)
@property
def ids(self):
"""Identifiers of atoms/samples in the committee member."""
return self._ids
@ids.setter
def ids(self, ids):
"""Set identifiers of atoms/samples in the committee member."""
msg = 'Use `set_training_data()` to modify the committee member'
logger.error(msg)
raise RuntimeError(msg)
def set_training_data(self, training_data):
"""
Read in and store the training data of this committee members from the passed ```filename```.
Parameter:
----------
training_data: str / Path / list(Atoms), optional default=None
Path to or Atoms of (sub-sampled) training set used to create the machine-learned model
defined by the ```calculator```. Individual Atoms need an Atoms.info['_Index_FullTrainingset']
for unique identification.
"""
if len(self.atoms) > 0:
msg = 'Overwriting current training data.'
logger.warning(msg)
warnings.warn(msg)
if isinstance(training_data, (str, Path)):
self._filename = Path(training_data)
self._atoms = ase.io.read(self.filename, ':')
elif isinstance(training_data, list):
self._filename = 'No Filename'
self._atoms = training_data
self._ids = [atoms_i.info['_Index_FullTrainingSet'] for atoms_i in self.atoms]
def is_sample_in_atoms(self, sample):
"""Check if passed Atoms-object is part of this committee member (by comparing identifiers)."""
if '_Index_FullTrainingSet' not in sample.info:
msg = 'Can\'t test if `sample` is in `atoms`. `sample` has no Atoms.info[\'_Index_FullTrainingSet\']'
logger.error(msg)
raise RuntimeError(msg)
else:
return sample.info['_Index_FullTrainingSet'] in self.ids
def __repr__(self):
s = ''
s += f'calculator: {str(self.calculator.__class__):>60s}\n'
s += f'filename: {str(self.filename):>60s}\n'
s += f'# Atoms: {len(self.atoms):>60d}\n'
s += f'# IDs: {len(self.ids):>60d}'
return s
| 17,234 | 36.386117 | 126 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/newmb.py | """Manybody calculator definition."""
import numpy as np
from abc import ABC, abstractmethod
from collections import defaultdict
from itertools import combinations_with_replacement
from typing import Mapping
from scipy.sparse import bsr_matrix
from ...calculators.calculator import MatscipyCalculator
from ...neighbours import (
Neighbourhood, first_neighbours, find_indices_of_reversed_pairs,
MolecularNeighbourhood
)
from ...numpy_tricks import mabincount
from ...elasticity import full_3x3_to_Voigt_6_stress
from copy import deepcopy
__all__ = ["Manybody"]
# Broadcast slices
_c = np.s_[..., np.newaxis]
_cc = np.s_[..., np.newaxis, np.newaxis]
_ccc = np.s_[..., np.newaxis, np.newaxis, np.newaxis]
_cccc = np.s_[..., np.newaxis, np.newaxis, np.newaxis, np.newaxis]
def ein(*args):
"""Optimized einsum."""
return np.einsum(*args, optimize=True)
class Manybody(MatscipyCalculator):
"""Generic two- and three- body interaction calculator."""
implemented_properties = [
'free_energy',
'energy',
'stress',
'forces',
'hessian',
'dynamical_matrix',
'born_constants',
'nonaffine_forces',
'birch_coefficients',
'elastic_constants',
]
_voigt_seq = [0, 5, 4, 5, 1, 3, 4, 3, 2]
class Phi(ABC):
"""Define the manybody interaction with pair term ɸ(rᵢⱼ², ξᵢⱼ)."""
@abstractmethod
def __call__(self, rsq_p, xi_p):
"""Return ɸ(rᵢⱼ², ξᵢⱼ)."""
@abstractmethod
def gradient(self, rsq_p, xi_p):
"""Return [∂₁ɸ(rᵢⱼ², ξᵢⱼ), ∂₂ɸ(rᵢⱼ², ξᵢⱼ)]."""
@abstractmethod
def hessian(self, rsq_p, xi_p):
"""Return [∂₁₁ɸ(rᵢⱼ², ξᵢⱼ), ∂₂₂ɸ(rᵢⱼ², ξᵢⱼ), ∂₁₂ɸ(rᵢⱼ², ξᵢⱼ)]."""
class Theta(ABC):
"""Define the three-body term Θ(rᵢⱼ², rᵢₖ², rⱼₖ²)."""
@abstractmethod
def __call__(self, R1_p, R2_p, R3_p):
"""Return Θ(rᵢⱼ², rᵢₖ², rⱼₖ²)."""
@abstractmethod
def gradient(self, R1_p, R2_p, R3_p):
"""
Return [∂₁Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₂Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₃Θ(rᵢⱼ², rᵢₖ², rⱼₖ²)].
"""
@abstractmethod
def hessian(self, R1_p, R2_p, R3_p):
"""
Return [∂₁₁Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₂₂Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₃₃Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₂₃Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₁₃Θ(rᵢⱼ², rᵢₖ², rⱼₖ²),
∂₁₂Θ(rᵢⱼ², rᵢₖ², rⱼₖ²)].
"""
class _idx:
"""Helper class for index algebra."""
def __init__(self, idx, sign=1):
self.idx = idx
self.sign = sign
def __eq__(self, other):
return self.idx == other.idx and self.sign == other.sign
def __str__(self):
return ("-" if self.sign < 0 else "") + self.idx
def __repr__(self):
return str(self)
def __mul__(self, other):
return type(self)(self.idx + other.idx, self.sign * other.sign)
def __neg__(self):
return type(self)(self.idx, -self.sign)
def offdiagonal(self):
for c in "ijk":
if self.idx.count(c) > 1:
return False
return True
def __init__(self, phi: Mapping[int, Phi], theta: Mapping[int, Theta],
neighbourhood: Neighbourhood):
"""Construct with potentials ɸ(rᵢⱼ², ξᵢⱼ) and Θ(rᵢⱼ², rᵢₖ², rⱼₖ²)."""
super().__init__()
if isinstance(phi, defaultdict):
self.phi = phi
else:
from .potentials import ZeroPair # noqa
self.phi = defaultdict(lambda: ZeroPair())
self.phi.update(phi)
self.theta = theta
self.neighbourhood = neighbourhood
@staticmethod
def _assemble_triplet_to_pair(ij_t, values_t, nb_pairs):
return mabincount(ij_t, values_t, minlength=nb_pairs)
@staticmethod
def _assemble_pair_to_atom(i_p, values_p, nb_atoms):
return mabincount(i_p, values_p, minlength=nb_atoms)
@staticmethod
def _assemble_triplet_to_atom(i_t, values_t, nb_atoms):
return mabincount(i_t, values_t, minlength=nb_atoms)
@classmethod
def sum_ij_pi_ij_n(cls, n, pairs, values_p):
r"""Compute :math:`\sum_{ij}\pi_{ij|n}\Chi_{ij}`."""
i_p, j_p = pairs
return (
+ cls._assemble_pair_to_atom(i_p, values_p, n)
- cls._assemble_pair_to_atom(j_p, values_p, n)
) # yapf: disable
@classmethod
def sum_ij_sum_X_pi_X_n(cls, n, pairs, triplets, values_tq):
r"""Compute :math:`\sum_{ij}\sum_{k\neq i,j}\sum_{X}\pi_{X|n}\Chi_X`."""
i_p, j_p = pairs
ij_t, ik_t = triplets
return sum(
+ cls._assemble_triplet_to_atom(i, values_tq[:, q], n)
- cls._assemble_triplet_to_atom(j, values_tq[:, q], n)
# Loop of pairs in the ijk triplet
for q, (i, j) in enumerate([(i_p[ij_t], j_p[ij_t]), # ij pair
(i_p[ik_t], j_p[ik_t]), # ik pair
(j_p[ij_t], j_p[ik_t])]) # jk pair
) # yapf: disable
@classmethod
def sum_ijk_tau_XY_mn(cls, n, triplets, tr_p, X, Y, values_t):
triplets = {
k: v for k, v in zip(["ij", "ik", "jk", "ji", "ki", "kj"],
list(triplets) + [tr_p[t] for t in triplets])
}
# All indices in τ_XY|mn
indices = X[np.newaxis] * Y[np.newaxis].T
# Avoid double counting symmetric indices
if np.all(X == Y):
indices = indices[~np.tri(2, 2, -1, dtype=bool)]
return sum(
idx.sign
* cls._assemble_triplet_to_pair(triplets[idx.idx], values_t, n)
# Indices relevant for off-diagonal terms
for idx in np.ravel(indices) if idx.offdiagonal()
)
@classmethod
def _X_indices(cls):
i, j, k = map(cls._idx, 'ijk')
return np.array([[i, -j],
[i, -k],
[j, -k]])
@classmethod
def sum_XY_sum_ijk_tau_XY_mn(cls, n, triplets, tr_p, values_tXY):
X_indices = cls._X_indices()
return sum(
cls.sum_ijk_tau_XY_mn(n, triplets, tr_p, X, Y, values_tXY[:, x, y])
for (x, X), (y, Y) in combinations_with_replacement(
enumerate(X_indices), r=2
)
)
@classmethod
def sum_XX_sum_ijk_tau_XX_mn(cls, n, triplets, tr_p, values_tX):
X_indices = cls._X_indices()
return sum(
cls.sum_ijk_tau_XY_mn(n, triplets, tr_p, X, X, values_tX[:, x])
for x, X in enumerate(X_indices)
)
@classmethod
def sum_X_sum_ijk_tau_ijX_mn(cls, n, triplets, tr_p, values_tX):
X_indices = cls._X_indices()
return sum(
cls.sum_ijk_tau_XY_mn(n, triplets, tr_p,
X_indices[0], X, values_tX[:, x])
for x, X in enumerate(X_indices)
)
@classmethod
def sum_X_sum_ijk_tau_ij_XOR_X_mn(cls, n, triplets, tr_p, values_tX):
X_indices = cls._X_indices()
return sum(
cls.sum_ijk_tau_XY_mn(n, triplets, tr_p,
X_indices[0], X, values_tX[:, x + 1])
for x, X in enumerate(X_indices[1:])
)
def _masked_compute(self, atoms, order, list_ij=None, list_ijk=None,
neighbourhood=None):
"""Compute requested derivatives of phi and theta."""
if not isinstance(order, list):
order = [order]
if neighbourhood is None:
neighbourhood = self.neighbourhood
if list_ijk is None and list_ij is None:
i_p, j_p, r_pc = neighbourhood.get_pairs(atoms, 'ijD')
ij_t, ik_t, r_tqc = neighbourhood.get_triplets(atoms, 'ijD')
else:
i_p, j_p, r_pc = list_ij
ij_t, ik_t, r_tqc = list_ijk
# Pair and triplet types
t_p = neighbourhood.pair_type(*(atoms.numbers[i]
for i in (i_p, j_p)))
t_t = neighbourhood.triplet_type(*(atoms.numbers[i]
for i in (i_p[ij_t], j_p[ij_t],
j_p[ik_t])))
derivatives = np.array([
('__call__', 1, 1),
('gradient', 2, 3),
('hessian', 3, 6),
], dtype=object)
phi_res = {
d[0]: np.zeros([d[1], len(r_pc)])
for d in derivatives[order]
}
theta_res = {
d[0]: np.zeros([d[2], len(r_tqc)])
for d in derivatives[order]
}
# Do not allocate array for theta_t if energy is explicitely requested
if '__call__' in theta_res:
theta_t = theta_res['__call__']
extra_compute_theta = False
else:
theta_t = np.zeros([1, len(r_tqc)])
extra_compute_theta = True
# Squared distances
rsq_p = np.sum(r_pc**2, axis=-1)
rsq_tq = np.sum(r_tqc**2, axis=-1)
for t in np.unique(t_t):
m = t_t == t # type mask
R = rsq_tq[m].T # distances squared
# Required derivative order
for attr, res in theta_res.items():
res[:, m] = getattr(self.theta[t], attr)(*R)
# We need value of theta to compute xi
if extra_compute_theta:
theta_t[:, m] = self.theta[t](*R)
# Aggregating xi
xi_p = self._assemble_triplet_to_pair(ij_t, theta_t.squeeze(),
len(r_pc))
for t in np.unique(t_p):
m = t_p == t # type mask
# Required derivative order
for attr, res in phi_res.items():
res[:, m] = getattr(self.phi[t], attr)(rsq_p[m], xi_p[m])
return phi_res.values(), theta_res.values()
def calculate(self, atoms, properties, system_changes):
"""Calculate properties on atoms."""
super().calculate(atoms, properties, system_changes)
# Topology information
i_p, j_p, r_pc = self.neighbourhood.get_pairs(atoms, 'ijD')
ij_t, ik_t, r_tqc = self.neighbourhood.get_triplets(atoms, 'ijD')
n = len(atoms)
# Request energy and gradient
(phi_p, dphi_cp), (theta_t, dtheta_qt) = \
self._masked_compute(atoms, order=[0, 1],
list_ij=[i_p, j_p, r_pc],
list_ijk=[ij_t, ik_t, r_tqc])
# Energy
epot = 0.5 * phi_p.sum()
# Forces
dpdxi = dphi_cp[1]
# compute dɸ/dxi * dΘ/dRX * rX
dpdxi_dtdRX_rX = ein('t,qt,tqc->tqc', dpdxi[ij_t], dtheta_qt, r_tqc)
dpdR_r = dphi_cp[0][_c] * r_pc # compute dɸ/dR * r
# Assembling triplet force contribution for each pair in triplet
f_nc = self.sum_ij_sum_X_pi_X_n(n, (i_p, j_p), (ij_t, ik_t),
dpdxi_dtdRX_rX)
# Assembling the pair force contributions
f_nc += self.sum_ij_pi_ij_n(n, (i_p, j_p), dpdR_r)
# Stresses
s_cc = ein('tXi,tXj->ij', dpdxi_dtdRX_rX,
r_tqc) # outer + sum triplets
s_cc += ein('pi,pj->ij', dpdR_r, r_pc) # outer + sum pairs
s_cc *= 1 / atoms.get_volume()
# Update results
self.results.update({
"energy": epot,
"free_energy": epot,
"stress": full_3x3_to_Voigt_6_stress(s_cc),
"forces": f_nc,
})
def get_born_elastic_constants(self, atoms):
"""Compute the Born (affine) elastic constants."""
if self.atoms is None:
self.atoms = atoms
# Topology information
r_pc = self.neighbourhood.get_pairs(atoms, 'D')
ij_t, r_tqc = self.neighbourhood.get_triplets(atoms, 'iD')
(dphi_cp, ddphi_cp), (dtheta_qt, ddtheta_qt) = \
self._masked_compute(atoms, order=[1, 2])
# Term 1 vanishes
C_cccc = np.zeros([3] * 4)
# Term 2
ddpddR = ddphi_cp[0]
C_cccc += ein('p,pa,pb,pm,pn->abmn', ddpddR, r_pc, r_pc, r_pc, r_pc)
# Term 3
dpdxi = dphi_cp[1][ij_t]
# Combination indices involved in term 3
X = [0, 1, 2, 2, 1, 0, 2, 0, 1]
Y = [0, 1, 2, 1, 2, 2, 0, 1, 0]
XY = [0, 1, 2, 3, 3, 4, 4, 5, 5]
C_cccc += ein('t,qt,tqa,tqb,tqm,tqn->abmn', dpdxi, ddtheta_qt[XY],
r_tqc[:, X], r_tqc[:, X], r_tqc[:, Y], r_tqc[:, Y])
# Term 4
ddpdRdxi = ddphi_cp[2][ij_t]
# Combination indices involved in term 4
# also implicitely symmetrizes ?
X = [0, 0, 0, 1, 0, 2]
Y = [0, 0, 1, 0, 2, 0]
XY = [0, 0, 1, 1, 2, 2]
C_cccc += ein('t,qt,tqa,tqb,tqm,tqn->abmn', ddpdRdxi, dtheta_qt[XY],
r_tqc[:, X], r_tqc[:, X], r_tqc[:, Y], r_tqc[:, Y])
# Term 5
ddpddxi = ddphi_cp[1]
dtdRx_rXrX = self._assemble_triplet_to_pair(
ij_t,
ein('qt,tqa,tqb->tab', dtheta_qt, r_tqc, r_tqc),
len(r_pc),
)
C_cccc += ein('p,pab,pmn->abmn', ddpddxi, dtdRx_rXrX, dtdRx_rXrX)
return 2 * C_cccc / atoms.get_volume()
def get_nonaffine_forces(self, atoms):
"""Compute non-affine forces (derivatives w/r reference positions)."""
n = len(atoms)
i_p, j_p, r_pc = self.neighbourhood.get_pairs(atoms, 'ijD')
ij_t, ik_t, r_tqc = self.neighbourhood.get_triplets(atoms, 'ijD')
(dphi_cp, ddphi_cp), (dtheta_qt, ddtheta_qt) = \
self._masked_compute(atoms, order=[1, 2])
# Term 1 and 2 have the same structure, we assemble @ same time
e = np.eye(3)
dpdR, ddpddR = dphi_cp[0], ddphi_cp[0]
term_12_pcab = (
(ein('p,pa,bg->pgab', dpdR, r_pc, e) # term 1
+ ein('p,pb,ag->pgab', dpdR, r_pc, e)) # term 1
+ 2 * ein('p,pa,pb,pg->pgab', ddpddR, r_pc, r_pc, r_pc) # term 2
)
# Assemble pair terms
naf_ncab = self.sum_ij_pi_ij_n(n, (i_p, j_p), term_12_pcab)
# Term 3
# Here we sum over Y in the inner loop, over X in the assembly
# because there is a pi_{X|n} in the sum
# terms 3 and 5 actually have the same structure
# maybe group up?
dpdxi = dphi_cp[1][ij_t]
# turn voigt dtdRxdRy to 3x3
ddtdRXdRY = ddtheta_qt[self._voigt_seq].reshape(3, 3, -1)
term_3_tXcab = 2 * ein('XYt,tYa,tYb,tXc->tXcab', ddtdRXdRY, r_tqc,
r_tqc, r_tqc)
term_3_tXcab += (
ein('Xt,tXb,ag->tXgab', dtheta_qt, r_tqc, e)
+ ein('Xt,tXa,bg->tXgab', dtheta_qt, r_tqc, e)
)
term_3_tXcab *= dpdxi[_cccc]
naf_ncab += self.sum_ij_sum_X_pi_X_n(n, (i_p, j_p), (ij_t, ik_t),
term_3_tXcab)
# Term 4
# Here we have two sub-terms:
# - one sums over X in the inner loop and has pi_{ij|n}
# => sub-term 1 (defined on pairs)
# - one has pi_{X|n}
# => sub-term 2 (define on triplets)
ddpdRdxi = ddphi_cp[2][ij_t]
dtdRX = dtheta_qt
term_4_1_pab = self._assemble_triplet_to_pair(
ij_t,
ein(
't,Xt,tXa,tXb,tc->tcab', # note: sum over X
ddpdRdxi,
dtdRX,
r_tqc,
r_tqc,
r_tqc[:, 0]),
len(i_p),
)
term_4_2_tXcab = ein('t,Xt,ta,tb,tXc->tXcab', ddpdRdxi, dtdRX,
r_tqc[:, 0], r_tqc[:, 0], r_tqc)
# assembling sub-terms
naf_ncab += 2 * self.sum_ij_pi_ij_n(n, (i_p, j_p), term_4_1_pab)
naf_ncab += 2 * self.sum_ij_sum_X_pi_X_n(n, (i_p, j_p),
(ij_t, ik_t), term_4_2_tXcab)
# Term 5
ddpddxi = ddphi_cp[1][ij_t]
dtdRY = dtdRX # just for clarity
term_5_1_pab = self._assemble_triplet_to_pair(
ij_t, ein('qt,tqa,tqb->tab', dtdRX, r_tqc, r_tqc), len(i_p))
term_5_2_tYgab = ein('t,Yt,tab,tYg->tYgab', ddpddxi, dtdRY,
term_5_1_pab[ij_t], r_tqc)
naf_ncab += 2 * self.sum_ij_sum_X_pi_X_n(n, (i_p, j_p),
(ij_t, ik_t), term_5_2_tYgab)
return naf_ncab
def get_hessian(self, atoms, format='sparse', divide_by_masses=False):
"""Compute hessian."""
double_cutoff, pairwise_cutoff, neigh = \
self.neighbourhood.double_neighbourhood()
# We need twice the cutoff to get jk
i_p, j_p, r_p, r_pc = neigh.get_pairs(
atoms, 'ijdD', cutoff=double_cutoff
)
mask = neigh.mask(r_p, pairwise_cutoff)
tr_p = neigh.reverse_pair_indices(i_p, j_p, r_p)
ij_t, ik_t, jk_t, r_tq, r_tqc = neigh.get_triplets(
atoms, 'ijkdD', neighbours=[i_p, j_p, r_p, r_pc]
)
n = len(atoms)
nb_pairs = len(i_p)
nb_triplets = len(ij_t)
first_n = first_neighbours(n, i_p)
first_p = first_neighbours(nb_pairs, ij_t) \
if nb_triplets != 0 else [0, 0]
(dphi_cp, ddphi_cp), (dtheta_qt, ddtheta_qt) = \
self._masked_compute(atoms, order=[1, 2],
list_ij=[i_p, j_p, r_pc],
list_ijk=[ij_t, ik_t, r_tqc],
neighbourhood=neigh)
# Masking extraneous pair contributions
dphi_cp[:, mask] = 0
ddphi_cp[:, mask] = 0
# Term 1, merge with T2 in the end
e = np.identity(3)
dpdR = dphi_cp[0]
H_pcc = ein('p,ab->pab', dpdR, -e)
# Term 2, merge with T1 in the end
ddpddR = ddphi_cp[0]
H_pcc -= ein('p,pa,pb->pab', 2 * ddpddR, r_pc, r_pc)
# Term 3
dpdxi = dphi_cp[1]
dpdxi = dpdxi[ij_t]
dtdRX = dtheta_qt
ddtdRXdRY = ddtheta_qt[self._voigt_seq].reshape(3, 3, -1)
dp_dt_e = ein('t,Xt,ab->tXab', dpdxi, dtdRX, e)
dp_ddt_rX_rY = ein('t,XYt,tXa,tYb->tXYab', 2 * dpdxi, ddtdRXdRY,
r_tqc, r_tqc)
H_pcc += self.sum_XY_sum_ijk_tau_XY_mn(nb_pairs, (ij_t, ik_t, jk_t),
tr_p, dp_ddt_rX_rY)
H_pcc += self.sum_XX_sum_ijk_tau_XX_mn(nb_pairs, (ij_t, ik_t, jk_t),
tr_p, dp_dt_e)
# Term 4
ddpdRdxi = ddphi_cp[2]
ddpdRdxi = ddpdRdxi[ij_t]
dtdRX = dtheta_qt
ddp_dt_rij_rX = ein('t,Xt,ta,tXb->tXab', 2 * ddpdRdxi, dtdRX,
r_tqc[:, 0], r_tqc)
H_pcc += self.sum_X_sum_ijk_tau_ijX_mn(nb_pairs, (ij_t, ik_t, jk_t),
tr_p, ddp_dt_rij_rX)
H_pcc -= self._assemble_triplet_to_pair(tr_p[ij_t], ddp_dt_rij_rX[:, 0],
nb_pairs)
# Term 5
ddpddxi = ddphi_cp[1]
ddpddxi = ddpddxi[ij_t]
dtdRX = dtheta_qt
# Pair
dtdRp = self._assemble_triplet_to_pair(ij_t, dtdRX[0], nb_pairs)
H_pcc += ein('p,p,p,pa,pb->pab',
-2 * ddphi_cp[1], dtdRp, dtdRp, r_pc, r_pc)
# Triplet
dtdRx_rx = ein('Xt,tXa->tXa', dtdRX, r_tqc)
ddp_dtdRx_rx_dtdRy_ry = ein(
't,tXa,tYb->tXYab',
2 * ddpddxi,
self._assemble_triplet_to_pair(ij_t, dtdRx_rx, nb_pairs)[ij_t],
dtdRx_rx
)
H_pcc += self.sum_X_sum_ijk_tau_ij_XOR_X_mn(
nb_pairs, (ij_t, ik_t, jk_t),
tr_p, ddp_dtdRx_rx_dtdRy_ry[:, 0]
)
# Quadruplets
H_pcc -= self._assemble_triplet_to_pair(ik_t, ddp_dtdRx_rx_dtdRy_ry[:, 1, 1], nb_pairs)
H_pcc -= self._assemble_triplet_to_pair(jk_t, ddp_dtdRx_rx_dtdRy_ry[:, 2, 2], nb_pairs)
H_pcc -= self._assemble_triplet_to_pair(ik_t, ddp_dtdRx_rx_dtdRy_ry[:, 1, 2], nb_pairs)
H_pcc -= self._assemble_triplet_to_pair(tr_p[jk_t], ddp_dtdRx_rx_dtdRy_ry[:, 2, 1], nb_pairs)
H_pcc += ein(
'p,pa,pb->pab',
2 * ddphi_cp[1],
self._assemble_triplet_to_pair(ij_t, dtdRx_rx[:, 1], nb_pairs),
self._assemble_triplet_to_pair(ij_t, dtdRx_rx[:, 2], nb_pairs)
)
# Deal with ij_im / ij_in expression
for im_in in range(nb_triplets):
pair_im = ij_t[im_in]
pair_in = ik_t[im_in]
pair_mn = jk_t[im_in]
for t in range(first_p[pair_im], first_p[pair_im + 1]):
pair_ij = ik_t[t]
if pair_ij == pair_im or pair_ij == pair_in:
continue
rij_c = r_pc[pair_ij]
rsq_ij = np.sum(rij_c**2)
ddphi_t5 = ddphi_cp[1][pair_ij]
rim_c = r_pc[pair_im]
rin_c = r_pc[pair_in]
rsq_im = np.sum(rim_c**2)
rsq_in = np.sum(rin_c**2)
# Distances jm and jn
rjn_c = rin_c - rij_c
rjm_c = rim_c - rij_c
rsq_jm = np.sum(rjm_c**2)
rsq_jn = np.sum(rjn_c**2)
nati, natj, natm, natn = (
atoms.numbers[i_p[pair_ij]],
atoms.numbers[j_p[pair_ij]],
atoms.numbers[j_p[pair_im]],
atoms.numbers[j_p[pair_in]],
)
# Should return 0-d arrays, convert to int
ijm_type = int(neigh.triplet_type(nati, natj, natm))
ijn_type = int(neigh.triplet_type(nati, natj, natn))
dtheta_t5_mm = self.theta[ijm_type].gradient(rsq_ij,
rsq_im,
rsq_jm)
dtheta_t5_nn = self.theta[ijn_type].gradient(rsq_ij,
rsq_in,
rsq_jn)
H5 = np.outer(dtheta_t5_mm[1] * rim_c, dtheta_t5_nn[1] * rin_c)
H5 += np.outer(dtheta_t5_mm[2] * rjm_c, dtheta_t5_nn[2] * rjn_c)
H5 += 2 * np.outer(dtheta_t5_mm[1] * rim_c,
dtheta_t5_nn[2] * rjn_c)
H5 *= ddphi_t5
H_pcc[pair_mn] += H5
# Symmetrization with H_nm
H_pcc += H_pcc.transpose(0, 2, 1)[tr_p]
if format == 'neighbour-list':
return H_pcc, i_p, j_p, r_pc, r_p
# Compute the diagonal elements by bincount the off-diagonal elements
H_acc = -self._assemble_pair_to_atom(i_p, H_pcc, n)
if divide_by_masses:
mass_p = atoms.get_masses()
H_pcc /= np.sqrt(mass_p[i_p] * mass_p[j_p])[_cc]
H_acc /= mass_p[_cc]
H = (
bsr_matrix((H_pcc, j_p, first_n), shape=(3 * n, 3 * n))
+ bsr_matrix((H_acc, np.arange(n), np.arange(n + 1)),
shape=(3 * n, 3 * n))
)
return H
| 23,461 | 33.101744 | 101 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/potentials.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
# 2022 Jan Griesser (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Manybody potential definitions."""
import numpy as np
from functools import wraps
from typing import Iterable
from types import SimpleNamespace
from itertools import combinations_with_replacement
from .newmb import Manybody
from ..pair_potential import LennardJonesCut
def distance_defined(cls):
"""
Decorate class to help potential definition from distance.
Transforms a potential defined with the distance to
one defined by the distance squared.
"""
old = SimpleNamespace()
old.__call__ = cls.__call__
old.gradient = cls.gradient
old.hessian = cls.hessian
@wraps(cls.__call__)
def call(self, rsq_p, xi_p):
return old.__call__(self, np.sqrt(rsq_p), xi_p)
@wraps(cls.gradient)
def gradient(self, rsq_p, xi_p):
r_p = np.sqrt(rsq_p)
res = old.gradient(self, r_p, xi_p)
res[0] *= 1 / (2 * r_p)
return res
@wraps(cls.hessian)
def hessian(self, rsq_p, xi_p):
r_p = np.sqrt(rsq_p)
e_p = 1 / (2 * r_p)
grad = old.gradient(self, r_p, xi_p)
hess = old.hessian(self, r_p, xi_p)
# Correcting double R derivative
hess[0] *= e_p**2
hess[0] += grad[0] * (-1 / 4) * rsq_p**(-3 / 2)
# Correcting mixed derivative
hess[2] *= e_p
return hess
cls.__call__ = call
cls.gradient = gradient
cls.hessian = hessian
return cls
def angle_distance_defined(cls):
"""
Decorate class to help potential definition from distance.
Transforms a potential defined with the distance to
one defined by the distance squared.
"""
old = SimpleNamespace()
old.__call__ = cls.__call__
old.gradient = cls.gradient
old.hessian = cls.hessian
@wraps(cls.__call__)
def call(self, rsq_ij, rsq_ik, rsq_jk):
return old.__call__(self, np.sqrt(rsq_ij), np.sqrt(rsq_ik), np.sqrt(rsq_jk))
@wraps(cls.gradient)
def gradient(self, rsq_ij, rsq_ik, rsq_jk):
rij = np.sqrt(rsq_ij)
rik = np.sqrt(rsq_ik)
rjk = np.sqrt(rsq_jk)
grad = old.gradient(self, rij, rik, rjk)
grad[0] *= 1 / (2 * rij)
grad[1] *= 1 / (2 * rik)
grad[2] *= 1 / (2 * rjk)
return grad
@wraps(cls.hessian)
def hessian(self, rsq_ij, rsq_ik, rsq_jk):
rij = np.sqrt(rsq_ij)
rik = np.sqrt(rsq_ik)
rjk = np.sqrt(rsq_jk)
grad = old.gradient(self, rij, rik, rjk)
hess = old.hessian(self, rij, rik, rjk)
# Correction due to derivatives with respect to rsq
hess[0] = hess[0] * (1 / (4 * rsq_ij)) - grad[0] * (1 / (4 * rij**3))
hess[1] = hess[1] * (1 / (4 * rsq_ik)) - grad[1] * (1 / (4 * rik**3))
hess[2] = hess[2] * (1 / (4 * rsq_jk)) - grad[2] * (1 / (4 * rjk**3))
hess[3] = hess[3] * (1 / (4 * rik * rjk))
hess[4] = hess[4] * (1 / (4 * rij * rjk))
hess[5] = hess[5] * (1 / (4 * rij * rik))
return hess
cls.__call__ = call
cls.gradient = gradient
cls.hessian = hessian
return cls
class ZeroPair(Manybody.Phi):
"""Implementation of zero pair interaction."""
def __call__(self, r_p, xi_p):
return xi_p
def gradient(self, r_p, xi_p):
return np.stack([
np.zeros_like(r_p),
np.ones_like(xi_p),
])
def hessian(self, r_p, xi_p):
return np.zeros([3] + list(r_p.shape))
class ZeroAngle(Manybody.Theta):
"""Implementation of a zero three-body interaction."""
def __call__(self, R1, R2, R3):
return np.zeros_like(R1)
def gradient(self, R1, R2, R3):
return np.zeros([3] + list(R1.shape))
def hessian(self, R1, R2, R3):
return np.zeros([6] + list(R1.shape))
@distance_defined
class HarmonicPair(Manybody.Phi):
"""
Implementation of a harmonic pair interaction.
"""
def __init__(self, K=1, r0=0):
self.K = K
self.r0 = r0
def __call__(self, r_p, xi_p):
return 0.5 * self.K * (r_p - self.r0)**2 + xi_p
def gradient(self, r_p, xi_p):
return np.stack([
self.K * (r_p - self.r0),
np.ones_like(xi_p),
])
def hessian(self, r_p, xi_p):
return np.stack([
np.full_like(r_p, self.K),
np.zeros_like(xi_p),
np.zeros_like(xi_p),
])
@angle_distance_defined
class HarmonicAngle(Manybody.Theta):
"""
Implementation of a harmonic angle interaction.
"""
def __init__(self, k0=1, theta0=np.pi/2):
self.k0 = k0
self.theta0 = theta0
def __call__(self, rij, rik, rjk):
r"""
Angle harmonic energy.
"""
f = np.arccos((rij**2 + rik**2 - rjk**2) / (2 * rij * rik))
return 0.5 * self.k0 * (f - self.theta0)**2
def gradient(self, rij, rik, rjk):
r"""First order derivatives of :math:`\Theta` w/r to
:math:`r_{ij}, r_{ik}, r_{jk}`
"""
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
f = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# derivatives with respect to r
df_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
df_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
df_drjk = - rjk / (rij * rik)
# Scalar derivatives
def E(a):
return self.k0 * (a - self.theta0)
def h(f):
with np.errstate(divide="raise"):
d_arccos = -1 / np.sqrt(1 - f**2)
return E(np.arccos(f)) * d_arccos
return h(f) * np.stack([df_drij, df_drik, df_drjk])
def hessian(self, rij, rik, rjk):
r"""Second order derivatives of :math:`\Theta` w/r to
:math:`r_{ij}, r_{ik}, r_{jk}`
"""
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
f = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# first derivatives with respect to r
df_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
df_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
df_drjk = - rjk / (rij * rik)
# second derivatives with respect to r
ddf_drijdrij = (rsq_ik - rsq_jk) / (rij**3 * rik)
ddf_drikdrik = (rsq_ij - rsq_jk) / (rik**3 * rij)
ddf_drjkdrjk = - 1 / (rij * rik)
ddf_drijdrik = - (rsq_ij + rsq_ik + rsq_jk) / (2 * rsq_ij * rsq_ik)
ddf_drijdrjk = rjk / (rik * rsq_ij)
ddf_drikdrjk = rjk / (rij * rsq_ik)
# Scalar functions
dE = lambda a: self.k0 * (a - self.theta0)
ddE = lambda a: self.k0
darcos = lambda x: -1 / np.sqrt(1 - x**2)
ddarcos = lambda x: -x / (1 - x**2)**(3/2)
# Scalar derivative of theta
dtheta_dx = dE(np.arccos(f)) * darcos(f)
ddtheta_dxdx = (
ddE(np.arccos(f)) * darcos(f)**2 + dE(np.arccos(f)) * ddarcos(f)
)
df_prods = np.stack([
df_drij * df_drij,
df_drik * df_drik,
df_drjk * df_drjk,
df_drjk * df_drik,
df_drjk * df_drij,
df_drik * df_drij,
])
ddf = np.stack([
ddf_drijdrij,
ddf_drikdrik,
ddf_drjkdrjk,
ddf_drikdrjk,
ddf_drijdrjk,
ddf_drijdrik,
])
return (
# Derivative of product rule
ddtheta_dxdx[np.newaxis] * df_prods + dtheta_dx[np.newaxis] * ddf
)
@distance_defined
class LennardJones(Manybody.Phi):
"""Implementation of LennardJones potential."""
def __init__(self, epsilon=1, sigma=1, cutoff=np.inf):
self.lj = LennardJonesCut(epsilon, sigma, cutoff)
def __call__(self, r, xi):
return self.lj(r) + xi
def gradient(self, r, xi):
return np.stack([
self.lj.first_derivative(r),
np.ones_like(xi),
])
def hessian(self, r, xi):
return np.stack([
self.lj.second_derivative(r),
np.zeros_like(xi),
np.zeros_like(xi),
])
@distance_defined
class BornMayerCut(Manybody.Phi):
"""
Implementation of the Born-Mayer potential.
Energy is shifted to zero at the cutoff
"""
def __init__(self, A=1, C=1, D=1, sigma=1, rho=1, cutoff=np.inf):
self.A, self.C, self.D = A, C, D
self.sigma = sigma
self.rho = rho
self.cutoff = cutoff
self.offset = (
self.A * np.exp((self.sigma - cutoff) / self.rho)
- self.C / cutoff**6
+ self.D / cutoff**8
)
def __call__(self, r, xi):
return (
self.A * np.exp((self.sigma - r) / self.rho)
- self.C / r**6
+ self.D / r**8
- self.offset + xi
)
def gradient(self, r, xi):
return np.stack([
(-self.A / self.rho) * np.exp((self.sigma - r) / self.rho)
+ 6 * self.C / r**7
- 8 * self.D / r**9,
np.ones_like(xi),
])
def hessian(self, r, xi):
return np.stack([
(self.A / self.rho**2) * np.exp((self.sigma - r) / self.rho)
- 42 * self.C / r**8
+ 72 * self.D / r**10,
np.zeros_like(xi),
np.zeros_like(xi),
])
@distance_defined
class StillingerWeberPair(Manybody.Phi):
"""
Implementation of the Stillinger-Weber Potential
"""
def __init__(self, parameters, cutoff=None):
# Maybe set only parameters needed for \Phi
self.ref = parameters['__ref__']
self.el = parameters['el']
self.epsilon = parameters['epsilon']
self.sigma = parameters['sigma']
self.costheta0 = parameters['costheta0']
self.A = parameters['A']
self.B = parameters['B']
self.p = parameters['p']
self.q = parameters['q']
self.a = parameters['a']
self.lambda1 = parameters['lambda1']
self.gamma = parameters['gamma']
self.cutoff = (
parameters['a'] * parameters['sigma']
if cutoff is None else cutoff
)
def __call__(self, r_p, xi_p):
s = (
self.B * np.power(self.sigma / r_p, self.p)
- np.power(self.sigma / r_p, self.q)
)
h = np.exp(self.sigma / (r_p - self.a * self.sigma))
return np.where(
r_p <= self.cutoff,
self.A * self.epsilon * s * h + self.lambda1 * xi_p,
0.0,
)
def gradient(self, r_p, xi_p):
sigma_r_p = np.power(self.sigma / r_p, self.p)
sigma_r_q = np.power(self.sigma / r_p, self.q)
h = np.exp(self.sigma / (r_p - self.a * self.sigma))
dh = -self.sigma / np.power(r_p - self.a * self.sigma, 2) * h
s = self.B * sigma_r_p - sigma_r_q
ds = -self.p * self.B * sigma_r_p / r_p + self.q * sigma_r_q / r_p
return np.where(
r_p <= self.cutoff,
np.stack([
self.A * self.epsilon * (ds * h + dh * s),
self.lambda1 * np.ones_like(xi_p)
]),
0.0,
)
def hessian(self, r_p, xi_p):
sigma_r_p = np.power(self.sigma / r_p, self.p)
sigma_r_q = np.power(self.sigma / r_p, self.q)
h = np.exp(self.sigma / (r_p - self.a * self.sigma))
dh = -self.sigma / np.power(r_p - self.a * self.sigma, 2) * h
ddh = h * self.sigma**2 / np.power(r_p - self.a * self.sigma, 4)
ddh += h * 2 * self.sigma / np.power(r_p - self.a * self.sigma, 3)
s = self.B * sigma_r_p - sigma_r_q
ds = -self.p * self.B * sigma_r_p / r_p + self.q * sigma_r_q / r_p
dds = self.p * self.B * sigma_r_p / r_p**2 * (1 + self.p)
dds -= self.q * sigma_r_q / r_p**2 * (1 + self.q)
return np.where(
r_p <= self.cutoff,
np.stack([
self.A * self.epsilon * (h * dds + 2 * ds * dh + s * ddh),
np.zeros_like(xi_p),
np.zeros_like(xi_p)
]),
0.0,
)
@angle_distance_defined
class StillingerWeberAngle(Manybody.Theta):
"""
Implementation of the Stillinger-Weber Potential
"""
def __init__(self, parameters):
# Maybe set only parameters needed for \Phi
self.ref = parameters['__ref__']
self.el = parameters['el']
self.epsilon = parameters['epsilon']
self.sigma = parameters['sigma']
self.costheta0 = parameters['costheta0']
self.A = parameters['A']
self.B = parameters['B']
self.p = parameters['p']
self.q = parameters['q']
self.a = parameters['a']
self.lambda1 = parameters['lambda1']
self.gamma = parameters['gamma']
self.cutoff = parameters['a'] * parameters['sigma']
def __call__(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# Functions
m = np.exp(self.gamma * self.sigma / (rij - self.a * self.sigma))
n = np.exp(self.gamma * self.sigma / (rik - self.a * self.sigma))
g = np.power(cos + self.costheta0, 2)
return np.where(rik <= self.cutoff, self.epsilon * g * m * n, 0.0)
def gradient(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# Functions
m = np.exp(self.gamma * self.sigma / (rij - self.a * self.sigma))
n = np.exp(self.gamma * self.sigma / (rik - self.a * self.sigma))
g = np.power(cos + self.costheta0, 2)
# Derivative of scalar functions
dg_dcos = 2 * (cos + self.costheta0)
dm_drij = - self.gamma * self.sigma / np.power(rij - self.a * self.sigma, 2) * m
dn_drik = - self.gamma * self.sigma / np.power(rik - self.a * self.sigma, 2) * n
# Derivative of cosine
dg_drij = dg_dcos * (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
dg_drik = dg_dcos * (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
dg_drjk = - dg_dcos * rjk / (rij * rik)
return self.epsilon * np.where(rik <= self.cutoff,
np.stack([
dg_drij * m * n + dm_drij * g * n,
dg_drik * m * n + dn_drik * g * m,
dg_drjk * m * n
]), 0.0)
def hessian(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# Functions
m = np.exp(self.gamma * self.sigma / (rij - self.a * self.sigma))
n = np.exp(self.gamma * self.sigma / (rik - self.a * self.sigma))
g = np.power(cos + self.costheta0, 2)
# Derivative of scalar functions
dg_dcos = 2 * (cos + self.costheta0)
ddg_ddcos = 2 * np.ones_like(rij)
dm_drij = - self.gamma * self.sigma / np.power(rij - self.a * self.sigma, 2) * m
ddm_ddrij = 2 * self.gamma * self.sigma / np.power(rij - self.a * self.sigma, 3)
ddm_ddrij += np.power(self.gamma * self.sigma, 2) / np.power(rij - self.a * self.sigma, 4)
ddm_ddrij *= m
dn_drik = - self.gamma * self.sigma / np.power(rik - self.a * self.sigma, 2) * n
ddn_ddrik = 2 * self.gamma * self.sigma / np.power(rik - self.a * self.sigma, 3)
ddn_ddrik += np.power(self.gamma * self.sigma, 2) / np.power(rik - self.a * self.sigma, 4)
ddn_ddrik *= n
# First derivative of cos with respect to r
dcos_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
dcos_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
dcos_drjk = - rjk / (rij * rik)
# Second derivatives with respect to r
ddcos_drijdrij = (rsq_ik - rsq_jk) / (rij**3 * rik)
ddcos_drikdrik = (rsq_ij - rsq_jk) / (rik**3 * rij)
ddcos_drjkdrjk = - 1 / (rij * rik)
ddcos_drijdrik = - (rsq_ij + rsq_ik + rsq_jk) / (2 * rsq_ij * rsq_ik)
ddcos_drijdrjk = rjk / (rik * rsq_ij)
ddcos_drikdrjk = rjk / (rij * rsq_ik)
# First and second order derivatives of g
dg_drij = dg_dcos * dcos_drij
dg_drik = dg_dcos * dcos_drik
dg_drjk = dg_dcos * dcos_drjk
ddg_ddrij = ddg_ddcos * dcos_drij * dcos_drij + dg_dcos * ddcos_drijdrij
ddg_ddrik = ddg_ddcos * dcos_drik * dcos_drik + dg_dcos * ddcos_drikdrik
ddg_ddrjk = ddg_ddcos * dcos_drjk * dcos_drjk + dg_dcos * ddcos_drjkdrjk
ddg_drjkdrik = dcos_drik * ddg_ddcos * dcos_drjk + dg_dcos * ddcos_drikdrjk
ddg_drjkdrij = dcos_drij * ddg_ddcos * dcos_drjk + dg_dcos * ddcos_drijdrjk
ddg_drikdrij = dcos_drij * ddg_ddcos * dcos_drik + dg_dcos * ddcos_drijdrik
return self.epsilon * np.where(rik <= self.cutoff,
np.stack([
n * (ddg_ddrij * m + dg_drij * dm_drij + ddm_ddrij * g + dm_drij * dg_drij) ,
m * (ddg_ddrik * n + dn_drik * dg_drik + ddn_ddrik * g + dn_drik * dg_drik),
ddg_ddrjk * m * n,
m * (ddg_drjkdrik * n + dn_drik * dg_drjk),
n * (ddg_drjkdrij * m + dm_drij * dg_drjk),
ddg_drikdrij * m * n + dg_drij * dn_drik * m + dm_drij * dg_drik * n + dm_drij * dn_drik * g
]), 0.0)
@distance_defined
class KumagaiPair(Manybody.Phi):
"""
Implementation of Phi for the Kumagai potential
"""
def __init__(self, parameters):
# Maybe set only parameters needed for \Phi
self.ref = parameters['__ref__']
self.el = parameters['el']
self.A = parameters["A"]
self.B = parameters["B"]
self.lambda_1 = parameters["lambda_1"]
self.lambda_2 = parameters["lambda_2"]
self.eta = parameters["eta"]
self.delta = parameters["delta"]
self.alpha = parameters["alpha"]
self.c_1 = parameters["c_1"]
self.c_2 = parameters["c_2"]
self.c_3 = parameters["c_3"]
self.c_4 = parameters["c_4"]
self.c_5 = parameters["c_5"]
self.h = parameters["h"]
self.R_1 = parameters["R_1"]
self.R_2 = parameters["R_2"]
def __call__(self, r_p, xi_p):
# Cutoff
fc = np.where(r_p <= self.R_1, 1.0,
np.where(r_p >= self.R_2, 0.0,
1/2 + 9 / 16 * np.cos(np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
- 1 / 16 * np.cos(3 * np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
)
)
fr = self.A * np.exp(-self.lambda_1 * r_p)
fa = -self.B * np.exp(-self.lambda_2 * r_p)
b = 1 / np.power(1 + xi_p**self.eta, self.delta)
return fc * (fr + b * fa)
def gradient(self, r_p, xi_p):
# Cutoff function
fc = np.where(r_p <= self.R_1, 1.0,
np.where(r_p >= self.R_2, 0.0,
1/2 + 9 / 16 * np.cos(np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
- 1 / 16 * np.cos(3 * np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
)
)
dfc = np.where(r_p <= self.R_1, 0.0,
np.where(r_p >= self.R_2, 0.0,
3 * np.pi / (16 * (self.R_2 - self.R_1)) * (
np.sin(3 * np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
- 3 * np.sin(np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
)
)
)
# Repulsive and attractive
fr = self.A * np.exp(-self.lambda_1 * r_p)
dfr = - self.lambda_1 * fr
fa = -self.B * np.exp(-self.lambda_2 * r_p)
dfa = - self.lambda_2 * fa
# Bond-order expression
b = 1 / np.power(1 + xi_p**self.eta, self.delta)
db = - self.delta * self.eta * np.power(xi_p, self.eta - 1) * (1 + xi_p**self.eta)**(-self.delta - 1)
return np.stack([
dfc * (fr + b * fa) + fc * (dfr + b * dfa),
fc * fa * db
])
def hessian(self, r_p, xi_p):
# Cutoff function
fc = np.where(r_p <= self.R_1, 1.0,
np.where(r_p >= self.R_2, 0.0,
1/2 + 9 / 16 * np.cos(np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
- 1 / 16 * np.cos(3 * np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
)
)
dfc = np.where(r_p <= self.R_1, 0.0,
np.where(r_p >= self.R_2, 0.0,
3 * np.pi / (16 * (self.R_2 - self.R_1)) * (
np.sin(3 * np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
- 3 * np.sin(np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
)
)
)
ddfc = np.where(r_p <= self.R_1, 0.0,
np.where(r_p >= self.R_2, 0.0,
9 * np.pi**2 / (16 * np.power(self.R_2 - self.R_1, 2)) * (
np.cos(3 * np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
- np.cos(np.pi * (r_p - self.R_1) / (self.R_2 - self.R_1))
)
)
)
# Repulsive and attractive
fr = self.A * np.exp(-self.lambda_1 * r_p)
dfr = - self.lambda_1 * fr
ddfr = self.lambda_1**2 * fr
fa = -self.B * np.exp(-self.lambda_2 * r_p)
dfa = - self.lambda_2 * fa
ddfa = self.lambda_2**2 * fa
# Bond-order expression
b = 1 / np.power(1 + xi_p**self.eta, self.delta)
db = - self.delta * self.eta * np.power(xi_p, self.eta - 1) * (1 + xi_p**self.eta)**(-self.delta - 1)
ddb = np.power(xi_p, 2 * self.eta - 2) * (self.eta * self.delta + 1)
if self.eta != 1.0:
ddb -= np.power(xi_p, self.eta - 2) * (self.eta -1)
ddb *= self.delta * self.eta * np.power(1 + xi_p**self.eta, -self.delta - 2)
return np.stack([
ddfc * (fr + b * fa) + 2 * dfc * (dfr + b * dfa) + fc * (ddfr + b * ddfa),
fc * fa * ddb,
dfc * fa * db + fc * dfa * db
])
@angle_distance_defined
class KumagaiAngle(Manybody.Theta):
"""
Implementation of Theta for the Kumagai potential
"""
def __init__(self, parameters):
# Maybe set only parameters needed for \Phi
self.ref = parameters['__ref__']
self.el = parameters['el']
self.A = parameters["A"]
self.B = parameters["B"]
self.lambda_1 = parameters["lambda_1"]
self.lambda_2 = parameters["lambda_2"]
self.eta = parameters["eta"]
self.delta = parameters["delta"]
self.alpha = parameters["alpha"]
self.beta = parameters["beta"]
self.c_1 = parameters["c_1"]
self.c_2 = parameters["c_2"]
self.c_3 = parameters["c_3"]
self.c_4 = parameters["c_4"]
self.c_5 = parameters["c_5"]
self.h = parameters["h"]
self.R_1 = parameters["R_1"]
self.R_2 = parameters["R_2"]
def __call__(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# Cutoff
fc = np.where(rik <= self.R_1, 1.0,
np.where(rik >= self.R_2, 0.0,
(1/2 + (9 / 16) * np.cos(np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
- (1 / 16) * np.cos(3 * np.pi * (rik - self.R_1) / (self.R_2 - self.R_1)))
)
)
# Functions
m = np.exp(self.alpha * np.power(rij - rik, self.beta))
g0 = (self.c_2 * np.power(self.h - cos, 2)) / (self.c_3 + np.power(self.h - cos, 2))
ga = 1 + self.c_4 * np.exp(-self.c_5 * np.power(self.h - cos, 2))
g = self.c_1 + g0 * ga
return fc * g * m
def gradient(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# First derivative of cos with respect to r
dcos_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
dcos_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
dcos_drjk = - rjk / (rij * rik)
# Cutoff
fc = np.where(rik <= self.R_1, 1.0,
np.where(rik >= self.R_2, 0.0,
(1/2 + (9 / 16) * np.cos(np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
- (1 / 16) * np.cos(3 * np.pi * (rik - self.R_1) / (self.R_2 - self.R_1)))
)
)
dfc = np.where(rik <= self.R_1, 0.0,
np.where(rik >= self.R_2, 0.0,
3 * np.pi / (16 * (self.R_2 - self.R_1)) * (
np.sin(3 * np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
- 3 * np.sin(np.pi * (rik - self.R_1) / (self.R_2 - self.R_1)))
)
)
# Functions
m = np.exp(self.alpha * np.power(rij - rik, self.beta))
dm_drij = self.alpha * self.beta * np.power(rij - rik, self.beta - 1) * m
dm_drik = - dm_drij
g0 = (self.c_2 * np.power(self.h - cos, 2)) / (self.c_3 + np.power(self.h - cos, 2))
dg0_dcos = (-2 * self.c_2 * self.c_3 * (self.h - cos)) / np.power(self.c_3 + np.power(self.h - cos, 2), 2)
ga = 1 + self.c_4 * np.exp(-self.c_5 * np.power(self.h - cos, 2))
dga_dcos = 2 * self.c_4 * self.c_5 * (self.h - cos) * np.exp(-self.c_5 * np.power(self.h - cos, 2))
g = self.c_1 + g0 * ga
dg_dcos = dg0_dcos * ga + g0 * dga_dcos
dg_drij = dg_dcos * dcos_drij
dg_drik = dg_dcos * dcos_drik
dg_drjk = dg_dcos * dcos_drjk
return np.stack([
fc * dg_drij * m + fc * g * dm_drij,
dfc * g * m + fc * dg_drik * m + fc * g * dm_drik,
fc * dg_drjk * m
])
def hessian(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# First derivative of cos with respect to r
dcos_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
dcos_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
dcos_drjk = - rjk / (rij * rik)
# Second derivatives with respect to r
ddcos_drijdrij = (rsq_ik - rsq_jk) / (rij**3 * rik)
ddcos_drikdrik = (rsq_ij - rsq_jk) / (rik**3 * rij)
ddcos_drjkdrjk = - 1 / (rij * rik)
ddcos_drijdrik = - (rsq_ij + rsq_ik + rsq_jk) / (2 * rsq_ij * rsq_ik)
ddcos_drijdrjk = rjk / (rik * rsq_ij)
ddcos_drikdrjk = rjk / (rij * rsq_ik)
# Cutoff
fc = np.where(rik <= self.R_1, 1.0,
np.where(rik > self.R_2, 0.0,
1/2 + 9 / 16 * np.cos(np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
- 1 / 16 * np.cos(3 * np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
)
)
dfc = np.where(rik <= self.R_1, 0.0,
np.where(rik >= self.R_2, 0.0,
3 * np.pi / (16 * (self.R_2 - self.R_1)) * (
np.sin(3 * np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
- 3 * np.sin(np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
)
)
)
ddfc = np.where(rik <= self.R_1, 0.0,
np.where(rik > self.R_2, 0.0,
9 * np.pi**2 / (16 * np.power(self.R_2 - self.R_1, 2)) * (
np.cos(3 * np.pi * (rik - self.R_1) / (self.R_2 - self.R_1)) -
np.cos(np.pi * (rik - self.R_1) / (self.R_2 - self.R_1))
)
)
)
# Functions
m = np.exp(self.alpha * np.power(rij - rik, self.beta))
dm_drij = self.alpha * self.beta * np.power(rij - rik, self.beta - 1) * m
dm_drik = - dm_drij
ddm_ddrij = np.power(self.alpha * self.beta * np.power(rij - rik, self.beta - 1), 2)
if self.beta != 1.0:
ddm_ddrij += self.alpha * self.beta * (self.beta - 1) * np.power(rij - rik, self.beta - 2)
ddm_ddrij *= m
ddm_ddrik = ddm_ddrij
ddm_drijdrik = - ddm_ddrij
# New
g0 = (self.c_2 * np.power(self.h - cos, 2)) / (self.c_3 + np.power(self.h - cos, 2))
dg0_dcos = (-2 * self.c_2 * self.c_3 * (self.h - cos)) / np.power(self.c_3 + np.power(self.h - cos, 2), 2)
ddg0_ddcos = 2 * self.c_2 * self.c_3 * (self.c_3 - 3 * np.power(self.h - cos, 2)) / np.power(self.c_3 + np.power(self.h - cos, 2), 3)
ga = 1 + self.c_4 * np.exp(-self.c_5 * np.power(self.h - cos, 2))
dga_dcos = 2 * self.c_4 * self.c_5 * (self.h - cos) * np.exp(-self.c_5 * np.power(self.h - cos, 2))
ddga_ddcos = 2 * self.c_5 * np.power(self.h - cos, 2) - 1
ddga_ddcos *= 2 * self.c_4 * self.c_5 * np.exp(-self.c_5 * np.power(self.h - cos, 2))
g = self.c_1 + g0 * ga
dg_dcos = dg0_dcos * ga + g0 * dga_dcos
ddg_ddcos = ddg0_ddcos * ga + 2 * dg0_dcos * dga_dcos + g0 * ddga_ddcos
dg_drij = dg_dcos * dcos_drij
dg_drik = dg_dcos * dcos_drik
dg_drjk = dg_dcos * dcos_drjk
ddg_drijdrij = ddg_ddcos * dcos_drij * dcos_drij + dg_dcos * ddcos_drijdrij
ddg_drikdrik = ddg_ddcos * dcos_drik * dcos_drik + dg_dcos * ddcos_drikdrik
ddg_drjkdrjk = ddg_ddcos * dcos_drjk * dcos_drjk + dg_dcos * ddcos_drjkdrjk
ddg_drikdrjk = ddg_ddcos * dcos_drik * dcos_drjk + dg_dcos * ddcos_drikdrjk
ddg_drijdrjk = ddg_ddcos * dcos_drij * dcos_drjk + dg_dcos * ddcos_drijdrjk
ddg_drijdrik = ddg_ddcos * dcos_drij * dcos_drik + dg_dcos * ddcos_drijdrik
return np.stack([
fc * (ddg_drijdrij * m + dg_drij * dm_drij + dg_drij * dm_drij + g * ddm_ddrij),
ddfc * g * m + dfc * dg_drik * m + dfc * g * dm_drik + \
dfc * dg_drik * m + fc * ddg_drikdrik * m + fc * dg_drik * dm_drik + \
dfc * g * dm_drik + fc * dg_drik * dm_drik + fc * g * ddm_ddrik,
fc * ddg_drjkdrjk * m,
dfc * dg_drjk * m + fc * ddg_drikdrjk * m + fc * dg_drjk * dm_drik,
fc * ddg_drijdrjk * m + fc * dg_drjk * dm_drij,
dfc * dg_drij * m + fc * ddg_drijdrik * m + fc * dg_drij * dm_drik + \
dfc * g * dm_drij + fc * dg_drik * dm_drij + fc * g * ddm_drijdrik
])
@distance_defined
class TersoffBrennerPair(Manybody.Phi):
"""
Implementation of Phi for Tersoff-Brenner potentials
"""
def __init__(self, parameters):
self.ref = parameters['__ref__']
self.style = parameters['style'].lower()
self.el = parameters['el']
self.c = np.array(parameters['c'])
self.d = np.array(parameters['d'])
self.h = np.array(parameters['h'])
self.R1 = np.array(parameters['R1'])
self.R2 = np.array(parameters['R2'])
if self.style == 'tersoff':
# These are Tersoff-style parameters. The symbols follow the notation in
# Tersoff J., Phys. Rev. B 39, 5566 (1989)
#
# In particular, pair terms are characterized by A, B, lam, mu and parameters for the three body terms ijk
# depend only on the type of atom i
self.A = np.array(parameters['A'])
self.B = np.array(parameters['B'])
self.lambda1 = np.array(parameters['lambda1'])
self.mu = np.array(parameters['mu'])
self.beta = np.array(parameters['beta'])
self.lambda3 = np.array(parameters['lambda3'])
self.chi = np.array(parameters['chi'])
self.n = np.array(parameters['n'])
elif self.style == 'brenner':
# These are Brenner/Erhart-Albe-style parameters. The symbols follow the notation in
# Brenner D., Phys. Rev. B 42, 9458 (1990) and
# Erhart P., Albe K., Phys. Rev. B 71, 035211 (2005)
#
# In particular, pairs terms are characterized by D0, S, beta, r0, the parameters n, chi are always unity and
# parameters for the three body terms ijk depend on the type of the bond ij
_D0 = np.array(parameters['D0'])
_S = np.array(parameters['S'])
_r0 = np.array(parameters['r0'])
_beta = np.array(parameters['beta'])
_mu = np.array(parameters['mu'])
gamma = np.array(parameters['gamma'])
# Convert to Tersoff parameters
self.lambda3 = 2 * _mu
self.lam = _beta * np.sqrt(2 * _S)
self.mu = _beta * np.sqrt(2 / _S)
self.A = _D0 / (_S - 1) * np.exp(lam * _r0)
self.B = _S * _D0 / (_S - 1) * np.exp(mu * _r0)
else:
raise ValueError(f'Unknown parameter style {self.style}')
def __call__(self, r_p, xi_p):
# Cutoff function
fc = np.where(r_p <= self.R1, 1.0,
np.where(r_p > self.R2, 0.0,
(1 + np.cos(np.pi * (r_p - self.R1) / (self.R2 - self.R1))) / 2
)
)
# Attractive interaction
fa = -self.B * np.exp(-self.mu * r_p)
# Repulsive interaction
fr = self.A * np.exp(-self.lambda1 * r_p)
# Bond-order parameter
if self.style == 'tersoff':
b = self.chi / np.power(1 + np.power(self.beta * xi_p, self.n), 1 / (2 * self.n))
else:
raise ValueError(f'Brenner not implemented {self.style}')
return fc * (fr + b * fa)
def gradient(self, r_p, xi_p):
# Cutoff function
fc = np.where(r_p <= self.R1, 1.0,
np.where(r_p > self.R2, 0.0,
(1 + np.cos(np.pi * (r_p - self.R1) / (self.R2 - self.R1))) / 2
)
)
dfc = np.where(r_p <= self.R1, 0.0,
np.where(r_p > self.R2, 0.0,
-np.pi / (2 * (self.R2 - self.R1)) * np.sin(np.pi * (r_p - self.R1) / (self.R2 - self.R1))
)
)
# Attractive interaction
fa = -self.B * np.exp(-self.mu * r_p)
dfa = -self.mu * fa
# Repulsive interaction
fr = self.A * np.exp(-self.lambda1 * r_p)
dfr = -self.lambda1 * fr
# Bond-order parameter
if self.style == 'tersoff':
b = self.chi * np.power(1 + np.power(self.beta * xi_p, self.n), -1 / (2 * self.n))
db = -0.5 * self.beta * self.chi * np.power(self.beta * xi_p, self.n - 1)
db *= 1 / np.power(1 + np.power(self.beta * xi_p, self.n), 1 + 1 / (2 * self.n))
else:
raise ValueError(f'Brenner not implemented {self.style}')
return np.stack([
dfc * (fr + b * fa) + fc * (dfr + b * dfa),
fc * fa * db
])
def hessian(self, r_p, xi_p):
# Cutoff function
fc = np.where(r_p <= self.R1, 1.0,
np.where(r_p > self.R2, 0.0,
(1 + np.cos(np.pi * (r_p - self.R1) / (self.R2 - self.R1))) / 2
)
)
dfc = np.where(r_p <= self.R1, 0.0,
np.where(r_p > self.R2, 0.0,
-np.pi / (2 * (self.R2 - self.R1)) * np.sin(np.pi * (r_p - self.R1) / (self.R2 - self.R1))
)
)
ddfc = np.where(r_p <= self.R1, 0.0,
np.where(r_p > self.R2, 0.0,
-np.pi**2 / (2 * np.power(self.R2 - self.R1, 2)) * np.cos(np.pi * (r_p - self.R1) / (self.R2 - self.R1))
)
)
# Attractive interaction
fa = -self.B * np.exp(-self.mu * r_p)
dfa = -self.mu * fa
ddfa = self.mu**2 * fa
# Repulsive interaction
fr = self.A * np.exp(-self.lambda1 * r_p)
dfr = -self.lambda1 * fr
ddfr = self.lambda1**2 * fr
# Bond-order parameter
if self.style == 'tersoff':
b = self.chi * np.power(1 + np.power(self.beta * xi_p, self.n), -1 / (2 * self.n))
db = -0.5 * self.beta * self.chi * np.power(self.beta * xi_p, self.n - 1)
db *= 1 / np.power(1 + np.power(self.beta * xi_p, self.n), 1 + 1 / (2 * self.n))
ddb = (self.n - 1) * np.power(self.beta * xi_p, self.n - 2) / np.power(1 + np.power(self.beta * xi_p, self.n), 1 + 1 / (2 * self.n))
ddb += (-self.n - 0.5) * np.power(self.beta * xi_p, 2 * self.n - 2) / np.power(1 + np.power(self.beta * xi_p, self.n), 2 + 1 / (2 * self.n))
ddb *= -0.5 * self.chi * self.beta**2
else:
raise ValueError(f'Brenner not implemented {self.style}')
return np.stack([
ddfc * (fr + b * fa) + 2 * dfc * (dfr + b * dfa) + fc * (ddfr + b * ddfa),
fc * fa * ddb,
dfc * fa * db + fc * dfa * db
])
@angle_distance_defined
class TersoffBrennerAngle(Manybody.Theta):
"""
Implementation of Theta for Tersoff-Brenner potentials
"""
def __init__(self, parameters):
self.ref = parameters['__ref__']
self.style = parameters['style'].lower()
self.el = parameters['el']
self.c = np.array(parameters['c'])
self.d = np.array(parameters['d'])
self.h = np.array(parameters['h'])
self.R1 = np.array(parameters['R1'])
self.R2 = np.array(parameters['R2'])
if self.style == 'tersoff':
# These are Tersoff-style parameters. The symbols follow the notation in
# Tersoff J., Phys. Rev. B 39, 5566 (1989)
#
# In particular, pair terms are characterized by A, B, lam, mu and parameters for the three body terms ijk
# depend only on the type of atom i
self.A = np.array(parameters['A'])
self.B = np.array(parameters['B'])
self.lambda1 = np.array(parameters['lambda1'])
self.mu = np.array(parameters['mu'])
self.beta = np.array(parameters['beta'])
self.lambda3 = np.array(parameters['lambda3'])
self.chi = np.array(parameters['chi'])
self.n = np.array(parameters['n'])
elif self.style == 'brenner':
# These are Brenner/Erhart-Albe-style parameters. The symbols follow the notation in
# Brenner D., Phys. Rev. B 42, 9458 (1990) and
# Erhart P., Albe K., Phys. Rev. B 71, 035211 (2005)
#
# In particular, pairs terms are characterized by D0, S, beta, r0, the parameters n, chi are always unity and
# parameters for the three body terms ijk depend on the type of the bond ij
_D0 = np.array(parameters['D0'])
_S = np.array(parameters['S'])
_r0 = np.array(parameters['r0'])
_beta = np.array(parameters['beta'])
_mu = np.array(parameters['mu'])
gamma = np.array(parameters['gamma'])
# Convert to Tersoff parameters
self.lambda3 = 2 * _mu
self.lam = _beta * np.sqrt(2 * _S)
self.mu = _beta * np.sqrt(2 / _S)
self.A = _D0 / (_S - 1) * np.exp(lam * _r0)
self.B = _S * _D0 / (_S - 1) * np.exp(mu * _r0)
else:
raise ValueError(f'Unknown parameter style {self.style}')
def __call__(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# Cutoff function
fc = np.where(rik <= self.R1, 1.0,
np.where(rik >= self.R2, 0.0,
(1 + np.cos(np.pi * (rik - self.R1) / (self.R2 - self.R1))) / 2
)
)
if self.style == 'tersoff':
g = 1 + np.power(self.c / self.d, 2) - self.c**2 / (self.d**2 + np.power(self.h - cos, 2))
else:
raise ValueError(f'Brenner not implemented {self.style}')
return fc * g
def gradient(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# First derivative of cos with respect to r
dcos_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
dcos_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
dcos_drjk = - rjk / (rij * rik)
# Cutoff function
fc = np.where(rik <= self.R1, 1.0,
np.where(rik > self.R2, 0.0,
(1 + np.cos(np.pi * (rik - self.R1) / (self.R2 - self.R1))) / 2
)
)
dfc = np.where(rik <= self.R1, 0.0,
np.where(rik > self.R2, 0.0,
-np.pi / (2 * (self.R2 - self.R1)) * np.sin(np.pi * (rik - self.R1) / (self.R2 - self.R1))
)
)
if self.style == 'tersoff':
g = 1 + np.power(self.c / self.d, 2) - self.c**2 / (self.d**2 + np.power(self.h - cos, 2))
dg_dcos = -2 * self.c**2 * (self.h - cos) / np.power(self.d**2 + np.power(self.h - cos, 2) , 2)
dg_drij = dg_dcos * dcos_drij
dg_drik = dg_dcos * dcos_drik
dg_drjk = dg_dcos * dcos_drjk
else:
raise ValueError(f'Brenner not implemented {self.style}')
return np.stack([
fc * dg_drij,
dfc * g + fc * dg_drik,
fc * dg_drjk
])
def hessian(self, rij, rik, rjk):
# Squared distances
rsq_ij = rij**2
rsq_ik = rik**2
rsq_jk = rjk**2
# cos of angle
cos = (rsq_ij + rsq_ik - rsq_jk) / (2 * rij * rik)
# First derivative of cos with respect to r
dcos_drij = (rsq_ij - rsq_ik + rsq_jk) / (2 * rsq_ij * rik)
dcos_drik = (rsq_ik - rsq_ij + rsq_jk) / (2 * rsq_ik * rij)
dcos_drjk = - rjk / (rij * rik)
# Second derivatives with respect to r
ddcos_drijdrij = (rsq_ik - rsq_jk) / (rij**3 * rik)
ddcos_drikdrik = (rsq_ij - rsq_jk) / (rik**3 * rij)
ddcos_drjkdrjk = - 1 / (rij * rik)
ddcos_drijdrik = - (rsq_ij + rsq_ik + rsq_jk) / (2 * rsq_ij * rsq_ik)
ddcos_drijdrjk = rjk / (rik * rsq_ij)
ddcos_drikdrjk = rjk / (rij * rsq_ik)
# Cutoff function
fc = np.where(rik <= self.R1, 1.0,
np.where(rik >= self.R2, 0.0,
(1 + np.cos(np.pi * (rik - self.R1) / (self.R2 - self.R1))) / 2
)
)
dfc = np.where(rik <= self.R1, 0.0,
np.where(rik >= self.R2, 0.0,
-np.pi / (2 * (self.R2 - self.R1)) * np.sin(np.pi * (rik - self.R1) / (self.R2 - self.R1))
)
)
ddfc = np.where(rik <= self.R1, 0.0,
np.where(rik > self.R2, 0.0,
-np.pi**2 / (2 * np.power(self.R2 - self.R1, 2)) * np.cos(np.pi * (rik - self.R1) / (self.R2 - self.R1))
)
)
if self.style == 'tersoff':
g = 1 + np.power(self.c / self.d, 2) - self.c**2 / (self.d**2 + np.power(self.h - cos, 2))
dg_dcos = -2 * self.c**2 * (self.h - cos) / np.power(self.d**2 + np.power(self.h - cos, 2) , 2)
ddg_ddcos = (2 * self.c**2 * (self.d**2 - 3 * np.power(self.h - cos, 2))) / np.power(self.d**2 + np.power(self.h - cos, 2) , 3)
dg_drij = dg_dcos * dcos_drij
dg_drik = dg_dcos * dcos_drik
dg_drjk = dg_dcos * dcos_drjk
ddg_drijdrij = ddg_ddcos * dcos_drij * dcos_drij + dg_dcos * ddcos_drijdrij
ddg_drikdrik = ddg_ddcos * dcos_drik * dcos_drik + dg_dcos * ddcos_drikdrik
ddg_drjkdrjk = ddg_ddcos * dcos_drjk * dcos_drjk + dg_dcos * ddcos_drjkdrjk
ddg_drikdrjk = ddg_ddcos * dcos_drik * dcos_drjk + dg_dcos * ddcos_drikdrjk
ddg_drijdrjk = ddg_ddcos * dcos_drij * dcos_drjk + dg_dcos * ddcos_drijdrjk
ddg_drijdrik = ddg_ddcos * dcos_drij * dcos_drik + dg_dcos * ddcos_drijdrik
else:
raise ValueError(f'Brenner not implemented {self.style}')
return np.stack([
fc * ddg_drijdrij,
ddfc * g + dfc * dg_drik + dfc * dg_drik + fc * ddg_drikdrik,
fc * ddg_drjkdrjk,
dfc * dg_drjk + fc * ddg_drikdrjk,
fc * ddg_drijdrjk,
dfc * dg_drij + fc * ddg_drijdrik
])
try:
from sympy import lambdify, Expr, Symbol
def _l(*args):
return lambdify(*args, 'numpy')
def _extend(symfunc, sample_array):
"""Extend array in case sympy returns litteral."""
def f(*args):
res = symfunc(*args)
if not isinstance(res, np.ndarray):
return np.full_like(sample_array, res, dtype=sample_array.dtype)
return res
return f
class SymPhi(Manybody.Phi):
"""Pair potential from Sympy symbolic expression."""
def __init__(self, energy_expression: Expr, symbols: Iterable[Symbol]):
assert len(symbols) == 2, "Expression should only have 2 symbols"
self.e = energy_expression
# Lambdifying expression for energy and gradient
self.phi = _l(symbols, self.e)
self.dphi = [_l(symbols, self.e.diff(v)) for v in symbols]
# Pairs of symbols for 2nd-order derivatives
dvars = list(combinations_with_replacement(symbols, 2))
dvars = [dvars[i] for i in (0, 2, 1)] # arrange order
# Lambdifying hessian
self.ddphi = [_l(symbols, self.e.diff(*v)) for v in dvars]
def __call__(self, rsq_p, xi_p):
return _extend(self.phi, rsq_p)(rsq_p, xi_p)
def gradient(self, rsq_p, xi_p):
return np.stack([
_extend(f, rsq_p)(rsq_p, xi_p)
for f in self.dphi
])
def hessian(self, rsq_p, xi_p):
return np.stack([
_extend(f, rsq_p)(rsq_p, xi_p)
for f in self.ddphi
])
class SymTheta(Manybody.Theta):
"""Three-body potential from Sympy symbolic expression."""
def __init__(self, energy_expression: Expr, symbols: Iterable[Symbol]):
assert len(symbols) == 3, "Expression should only have 3 symbols"
self.e = energy_expression
# Lambdifying expression for energy and gradient
self.theta = _l(symbols, self.e)
self.dtheta = [_l(symbols, self.e.diff(v)) for v in symbols]
# Pairs of symbols for 2nd-order derivatives
dvars = list(combinations_with_replacement(symbols, 2))
dvars = [dvars[i] for i in (0, 3, 5, 4, 2, 1)] # arrange order
self.ddtheta = [_l(symbols, self.e.diff(*v)) for v in dvars]
def __call__(self, R1_t, R2_t, R3_t):
return _extend(self.theta, R1_t)(R1_t, R2_t, R3_t)
def gradient(self, R1_t, R2_t, R3_t):
return np.stack([
_extend(f, R1_t)(R1_t, R2_t, R3_t)
for f in self.dtheta
])
def hessian(self, R1_t, R2_t, R3_t):
return np.stack([
_extend(f, R1_t)(R1_t, R2_t, R3_t)
for f in self.ddtheta
])
except ImportError:
pass
| 48,728 | 35.015521 | 152 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/__init__.py | #
# Copyright 2014-2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2018-2021 Jan Griesser (U. Freiburg)
# 2020 Jonas Oldenstaedt (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .calculator import Manybody
from .explicit_forms import Kumagai, TersoffBrenner, StillingerWeber
| 1,091 | 39.444444 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/calculator.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2021 Jan Griesser (U. Freiburg)
# 2020-2021 Jonas Oldenstaedt (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Generic Many body potential.
"""
#
# Coding convention
# * All numpy arrays are suffixed with the array dimensions
# * The suffix stands for a certain type of dimension:
# - n: Atomic index, i.e. array dimension of length nb_atoms
# - p: Pair index, i.e. array dimension of length nb_pairs
# - t: Triplet index, i.e. array dimension of length nb_triplets
# - q: Index in n-uplet, i.e. pair or triplet (length # elements in n-uplet)
# - c: Cartesian index, array dimension of length 3
# - a: Cartesian index for the first dimension of the deformation gradient,
# array dimension of length 3
# - b: Cartesian index for the second dimension of the deformation gradient,
# array dimension of length 3
#
from itertools import product, starmap
from abc import ABC, abstractmethod
import numpy as np
from scipy.sparse.linalg import cg
from scipy.sparse import bsr_matrix
from ase.calculators.calculator import Calculator
from ase.geometry import find_mic
from ...calculators.calculator import MatscipyCalculator
from ...elasticity import Voigt_6_to_full_3x3_stress
from ...neighbours import (
find_indices_of_reversed_pairs,
first_neighbours,
Neighbourhood,
CutoffNeighbourhood
)
from ...numpy_tricks import mabincount
def _o(x, y, z=None):
"""Outer product."""
if z is None:
return np.einsum('...i,...j', x, y)
return np.einsum('...i,...j,...k', x, y, z)
# broadcast slices
_c, _cc = np.s_[..., np.newaxis], np.s_[..., np.newaxis, np.newaxis]
class Manybody(MatscipyCalculator):
implemented_properties = [
'free_energy',
'energy',
'stress',
'forces',
'hessian',
'born_constants',
'nonaffine_forces',
'birch_coefficients',
'dynamical_matrix',
'elastic_constants',
]
default_parameters = {}
name = 'Manybody'
def __init__(self, atom_type, pair_type,
F, G,
d1F, d2F,
d11F, d22F, d12F,
d1G, d11G, d2G, d22G, d12G,
cutoff, neighbourhood: Neighbourhood = None):
super().__init__()
self.atom_type = atom_type
self.pair_type = pair_type
self.F = F
self.G = G
self.d1F = d1F
self.d2F = d2F
self.d11F = d11F
self.d22F = d22F
self.d12F = d12F
self.d2G = d2G
self.d1G = d1G
self.d22G = d22G
self.d11G = d11G
self.d12G = d12G
self.cutoff = cutoff
if neighbourhood is None:
self.neighbourhood = CutoffNeighbourhood(atom_types=atom_type,
pair_types=pair_type)
else:
self.neighbourhood = neighbourhood
if not isinstance(self.neighbourhood, Neighbourhood):
raise TypeError(
f"{self.neighbourhood} is not of type Neighbourhood")
def get_cutoff(self, atoms):
"""Return a valid cutoff."""
if np.isscalar(self.cutoff):
return self.cutoff
# Get largest cutoff for all possible pair types
atom_types = map(self.atom_type, np.unique(atoms.numbers))
pair_types = starmap(self.pair_type, product(atom_types, repeat=2))
return max(self.cutoff[p] for p in pair_types)
def calculate(self, atoms, properties, system_changes):
"""Calculate system properties."""
super().calculate(atoms, properties, system_changes)
# Setting up cutoffs for neighbourhood
self.neighbourhood.cutoff = self.get_cutoff(atoms) # no-op on mols
# get internal atom types from atomic numbers
t_n = self.neighbourhood.atom_type(atoms.numbers)
# construct neighbor list
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, "ijdD")
nb_atoms = len(self.atoms)
nb_pairs = len(i_p)
# normal vectors
n_pc = r_pc / r_p[_c]
# construct triplet list
ij_t, ik_t, r_tq, r_tqc = self.neighbourhood.get_triplets(atoms,
"ijdD")
# construct lists with atom and pair types
ti_p = t_n[i_p]
tij_p = self.neighbourhood.pair_type(ti_p, t_n[j_p])
ti_t = t_n[i_p[ij_t]]
tij_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ij_t]])
tik_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ik_t]])
# potential-dependent functions
G_t = self.G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d1G_tc = self.d1G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d2G_tc = self.d2G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
xi_p = np.bincount(ij_t, weights=G_t, minlength=nb_pairs)
F_p = self.F(r_p, xi_p, ti_p, tij_p)
d1F_p = self.d1F(r_p, xi_p, ti_p, tij_p)
d2F_p = self.d2F(r_p, xi_p, ti_p, tij_p)
d2F_d2G_t = d2F_p[ij_t][_c] * d2G_tc
# calculate energy
epot = 0.5 * np.sum(F_p)
# calculate forces (per pair)
f_pc = (d1F_p[_c] * n_pc
+ d2F_p[_c] * mabincount(ij_t, d1G_tc, nb_pairs)
+ mabincount(ik_t, d2F_d2G_t, nb_pairs))
# collect atomic forces
f_nc = 0.5 * (mabincount(i_p, f_pc, nb_atoms)
- mabincount(j_p, f_pc, nb_atoms))
# Virial
virial_v = np.concatenate([
# diagonal components (xx, yy, zz)
np.einsum('pi,pi->i', r_pc, f_pc),
# off-diagonal (yz, xz, xy)
np.einsum('pi,pi->i', r_pc[:, (1, 0, 0)], f_pc[:, (2, 2, 1)])
])
virial_v *= 0.5
self.results.update({'free_energy': epot,
'energy': epot,
'stress': virial_v / atoms.get_volume(),
'forces': f_nc})
def get_hessian(self, atoms, format='sparse', divide_by_masses=False):
"""Calculate the Hessian matrix for a generic many-body potential.
For an atomic configuration with N atoms in d dimensions the hessian
matrix is a symmetric, hermitian matrix with a shape of (d*N,d*N). The
matrix is in general a sparse matrix, which consists of dense blocks of
shape (d,d), which are the mixed second derivatives.
Parameters
----------
atoms : ase.Atoms
Atomic configuration in a local or global minima.
format : "sparse" or "neighbour-list"
Output format of the hessian matrix.
divide_by_masses : bool
if true return the dynamic matrix else hessian matrix
Returns
-------
bsr_matrix
either hessian or dynamic matrix
Restrictions
----------
This method is currently only implemented for three dimensional systems
"""
if self.atoms is None:
self.atoms = atoms
# get internal atom types from atomic numbers
t_n = self.neighbourhood.atom_type(atoms.numbers)
cutoff = self.get_cutoff(atoms)
self.neighbourhood.cutoff = 2 * cutoff
# construct neighbor list
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, "ijdD")
mask_p = r_p > cutoff
nb_atoms = len(self.atoms)
nb_pairs = len(i_p)
# reverse pairs
tr_p = find_indices_of_reversed_pairs(i_p, j_p, r_p)
# normal vectors
n_pc = r_pc / r_p[_c]
# construct triplet list (need jk_t here, neighbor must be to 2*cutoff)
self.neighbourhood.cutoff = cutoff
ij_t, ik_t, jk_t, r_tq, r_tqc = \
self.neighbourhood.get_triplets(atoms, "ijkdD",
neighbours=[i_p, j_p, r_p, r_pc])
first_n = first_neighbours(nb_atoms, i_p)
first_p = first_neighbours(len(i_p), ij_t)
nb_triplets = len(ij_t)
# construct lists with atom and pair types
ti_p = t_n[i_p]
tij_p = self.neighbourhood.pair_type(ti_p, t_n[j_p])
ti_t = t_n[i_p[ij_t]]
tij_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ij_t]])
tik_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ik_t]])
# potential-dependent functions
G_t = self.G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d1G_tc = self.d1G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d2G_tc = self.d2G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d11G_tcc = self.d11G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d12G_tcc = self.d12G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d22G_tcc = self.d22G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
xi_p = np.bincount(ij_t, weights=G_t, minlength=nb_pairs)
d1F_p = self.d1F(r_p, xi_p, ti_p, tij_p)
d1F_p[mask_p] = 0.0 # explicitly exclude everything with r > cutoff
d2F_p = self.d2F(r_p, xi_p, ti_p, tij_p)
d2F_p[mask_p] = 0.0
d11F_p = self.d11F(r_p, xi_p, ti_p, tij_p)
d11F_p[mask_p] = 0.0
d12F_p = self.d12F(r_p, xi_p, ti_p, tij_p)
d12F_p[mask_p] = 0.0
d22F_p = self.d22F(r_p, xi_p, ti_p, tij_p)
d22F_p[mask_p] = 0.0
# normal vectors for triplets
n_tqc = r_tqc / r_tq[_c]
# Hessian term #4
nn_pcc = _o(n_pc, n_pc)
H_pcc = -(d1F_p[_cc] * (np.eye(3) - nn_pcc) / r_p[_cc])
# Hessian term #1
H_pcc -= d11F_p[_cc] * nn_pcc
# Hessian term #2
H_temp3_t = d12F_p[ij_t][_cc] * _o(d2G_tc, n_tqc[:, 0])
H_temp4_t = d12F_p[ij_t][_cc] * _o(d1G_tc, n_tqc[:, 0])
# Hessian term #5
H_temp2_t = d2F_p[ij_t][_cc] * d22G_tcc
H_temp_t = d2F_p[ij_t][_cc] * d11G_tcc
H_temp1_t = d2F_p[ij_t][_cc] * d12G_tcc
# Hessian term #3
# Terms involving D_1 * D_1
d1G_pc = mabincount(ij_t, d1G_tc, nb_pairs)
H_pcc -= d22F_p[_cc] * _o(d1G_pc, d1G_pc)
# Terms involving D_2 * D_2
d2G_pc = mabincount(ij_t, d2G_tc, nb_pairs)
Q1 = _o((d22F_p[_c] * d2G_pc)[ij_t], d2G_tc)
# Terms involving D_1 * D_2
Q2 = _o((d22F_p[_c] * d1G_pc)[ij_t], d2G_tc)
H_pcc -= d22F_p[_cc] * _o(d2G_pc, d1G_pc)
H_pcc += \
- mabincount(ij_t, weights=H_temp_t, minlength=nb_pairs) \
+ mabincount(jk_t, weights=H_temp1_t, minlength=nb_pairs) \
- mabincount(tr_p[ij_t], weights=H_temp1_t, minlength=nb_pairs) \
- mabincount(ik_t, weights=H_temp1_t, minlength=nb_pairs) \
- mabincount(ik_t, weights=H_temp2_t, minlength=nb_pairs) \
+ mabincount(tr_p[jk_t], weights=H_temp3_t, minlength=nb_pairs) \
- mabincount(ij_t, weights=H_temp3_t, minlength=nb_pairs) \
- mabincount(tr_p[ik_t], weights=H_temp3_t, minlength=nb_pairs) \
- mabincount(ij_t, weights=H_temp4_t, minlength=nb_pairs) \
- mabincount(tr_p[ij_t], weights=H_temp4_t, minlength=nb_pairs) \
- mabincount(ik_t, weights=Q1, minlength=nb_pairs) \
+ mabincount(jk_t, weights=Q2, minlength=nb_pairs) \
- mabincount(ik_t, weights=Q2, minlength=nb_pairs)
for il_im in range(nb_triplets):
il = ij_t[il_im]
im = ik_t[il_im]
lm = jk_t[il_im]
ti = ti_t[il_im]
tij = tij_t[il_im]
tim = tik_t[il_im]
til = tij_t[il_im]
for t in range(first_p[il], first_p[il + 1]):
ij = ik_t[t]
if ij != il and ij != im:
r_p_ij = r_pc[ij][np.newaxis, :]
r_p_il = r_pc[il][np.newaxis, :]
r_p_im = r_pc[im][np.newaxis, :]
H_pcc[lm, :, :] += np.squeeze(np.transpose(
0.5 * d22F_p[ij]
* (_o(self.d2G(r_p_ij, r_p_il, ti, tij, til),
self.d2G(r_p_ij, r_p_im, ti, tij, tim))).T))
# Add the conjugate terms (symmetrize Hessian)
H_pcc += H_pcc.transpose(0, 2, 1)[tr_p]
if format == "sparse":
# Construct full diagonal terms from off-diagonal terms
H_acc = np.zeros([nb_atoms, 3, 3])
for x in range(3):
for y in range(3):
H_acc[:, x, y] = -np.bincount(i_p, weights=H_pcc[:, x, y],
minlength=nb_atoms)
if divide_by_masses:
mass_nat = atoms.get_masses()
geom_mean_mass_n = np.sqrt(mass_nat[i_p] * mass_nat[j_p])
return \
bsr_matrix(((H_pcc.T / (2 * geom_mean_mass_n)).T, j_p, first_n),
shape=(3 * nb_atoms, 3 * nb_atoms)) \
+ bsr_matrix(((H_acc.T / (2 * mass_nat)).T, np.arange(nb_atoms),
np.arange(nb_atoms + 1)),
shape=(3 * nb_atoms, 3 * nb_atoms))
else:
return \
bsr_matrix((H_pcc / 2, j_p, first_n), shape=(3 * nb_atoms, 3 * nb_atoms)) \
+ bsr_matrix((H_acc / 2, np.arange(nb_atoms), np.arange(nb_atoms + 1)),
shape=(3 * nb_atoms, 3 * nb_atoms))
# Neighbour list format
elif format == "neighbour-list":
return H_pcc / 2, i_p, j_p, r_pc, r_p
def get_second_derivative(self, atoms, drda_pc, drdb_pc,
i_p=None, j_p=None, r_p=None, r_pc=None):
"""Calculate the second derivative of the energy with respect to
arbitrary variables a and b.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
drda_pc/drdb_pc: array_like
Derivative of atom positions with respect to variable a/b.
i_p: array
First atom index
j_p: array
Second atom index
r_p: array
Absolute distance
r_pc: array
Distance vector
"""
if self.atoms is None:
self.atoms = atoms
# get internal atom types from atomic numbers
t_n = self.neighbourhood.atom_type(atoms.numbers)
cutoff = self.get_cutoff(atoms)
self.neighbourhood.cutoff = cutoff
if i_p is None or j_p is None or r_p is None or r_pc is None:
# We need to construct the neighbor list ourselves
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, "ijdD")
nb_pairs = len(i_p)
# normal vectors
n_pc = r_pc / r_p[_c]
# derivative of the lengths of distance vectors
drda_p = np.einsum('...i,...i', n_pc, drda_pc)
drdb_p = np.einsum('...i,...i', n_pc, drdb_pc)
# construct triplet list (no jk_t here, hence 1x cutoff suffices)
ij_t, ik_t, r_tq, r_tqc = \
self.neighbourhood.get_triplets(atoms, "ijdD",
neighbours=[i_p, j_p, r_p, r_pc])
# construct lists with atom and pair types
ti_p = t_n[i_p]
tij_p = self.neighbourhood.pair_type(ti_p, t_n[j_p])
ti_t = t_n[i_p[ij_t]]
tij_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ij_t]])
tik_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ik_t]])
# potential-dependent functions
G_t = self.G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d1G_tc = self.d1G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d2G_tc = self.d2G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d11G_tcc = self.d11G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d12G_tcc = self.d12G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d22G_tcc = self.d22G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
xi_p = np.bincount(ij_t, weights=G_t, minlength=nb_pairs)
d1F_p = self.d1F(r_p, xi_p, ti_p, tij_p)
d2F_p = self.d2F(r_p, xi_p, ti_p, tij_p)
d11F_p = self.d11F(r_p, xi_p, ti_p, tij_p)
d12F_p = self.d12F(r_p, xi_p, ti_p, tij_p)
d22F_p = self.d22F(r_p, xi_p, ti_p, tij_p)
# Term 1
T1 = np.einsum('i,i,i', d11F_p, drda_p, drdb_p)
# Term 2
T2_path = np.einsum_path('p,pc,pc,p', d12F_p[ij_t], d2G_tc,
drda_pc[ik_t], drdb_p[ij_t])[0]
T2 = np.einsum('p,pc,pc,p', d12F_p[ij_t], d2G_tc,
drda_pc[ik_t], drdb_p[ij_t], optimize=T2_path)
T2 += np.einsum('p,pc,pc,p', d12F_p[ij_t], d2G_tc,
drdb_pc[ik_t], drda_p[ij_t], optimize=T2_path)
T2 += np.einsum('p,pc,pc,p', d12F_p[ij_t], d1G_tc,
drda_pc[ij_t], drdb_p[ij_t], optimize=T2_path)
T2 += np.einsum('p,pc,pc,p', d12F_p[ij_t], d1G_tc,
drdb_pc[ij_t], drda_p[ij_t], optimize=T2_path)
# Term 3
contract = lambda x, y: np.einsum('...j,...j', x, y)
dxida_t = \
contract(d1G_tc, drda_pc[ij_t]) + contract(d2G_tc, drda_pc[ik_t])
dxidb_t = \
contract(d1G_tc, drdb_pc[ij_t]) + contract(d2G_tc, drdb_pc[ik_t])
T3 = np.einsum(
'i,i,i',
d22F_p.flatten(),
np.bincount(ij_t, weights=dxida_t, minlength=nb_pairs).flatten(),
np.bincount(ij_t, weights=dxidb_t, minlength=nb_pairs).flatten())
# Term 4
Q_pcc = (np.eye(3) - _o(n_pc, n_pc)) / r_p[_cc]
T4 = np.einsum('p,pij,pi,pj', d1F_p, Q_pcc, drda_pc, drdb_pc)
# Term 5
T5_t = np.einsum('tij,ti,tj->t', d11G_tcc, drdb_pc[ij_t], drda_pc[ij_t])
T5_t += np.einsum('tij,tj,ti->t', d12G_tcc, drdb_pc[ik_t], drda_pc[ij_t])
T5_t += np.einsum('tij,ti,tj->t', d12G_tcc, drdb_pc[ij_t], drda_pc[ik_t])
T5_t += np.einsum('tij,ti,tj->t', d22G_tcc, drdb_pc[ik_t], drda_pc[ik_t])
T5 = contract(d2F_p.flatten(),
np.bincount(ij_t, weights=T5_t, minlength=nb_pairs)).sum()
return T1 + T2 + T3 + T4 + T5
def get_hessian_from_second_derivative(self, atoms):
"""
Compute the Hessian matrix from second derivatives.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
if self.atoms is None:
self.atoms = atoms
self.neighbourhood.cutoff = 2 * self.get_cutoff(atoms)
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, "ijdD")
nb_atoms = len(self.atoms)
nb_pairs = len(i_p)
H_ab = np.zeros((3 * nb_atoms, 3 * nb_atoms))
for m in range(0, nb_atoms):
for cm in range(3):
drda_pc = np.zeros((nb_pairs, 3))
drda_pc[i_p == m, cm] = 1
drda_pc[j_p == m, cm] = -1
for l in range(0, nb_atoms):
for cl in range(3):
drdb_pc = np.zeros((nb_pairs, 3))
drdb_pc[i_p == l, cl] = 1
drdb_pc[j_p == l, cl] = -1
H_ab[3 * m + cm, 3 * l + cl] = \
self.get_second_derivative(atoms, drda_pc, drdb_pc,
i_p=i_p, j_p=j_p,
r_p=r_p, r_pc=r_pc)
return H_ab / 2
def get_non_affine_forces_from_second_derivative(self, atoms):
"""
Compute the analytical non-affine forces.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
if self.atoms is None:
self.atoms = atoms
self.neighbourhood.cutoff = 2 * self.get_cutoff(atoms)
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, "ijdD")
nb_atoms = len(self.atoms)
nb_pairs = len(i_p)
naF_ncab = np.zeros((nb_atoms, 3, 3, 3))
for m in range(0, nb_atoms):
for cm in range(3):
drdb_pc = np.zeros((nb_pairs, 3))
drdb_pc[i_p == m, cm] = 1
drdb_pc[j_p == m, cm] = -1
for alpha in range(3):
for beta in range(3):
drda_pc = np.zeros((nb_pairs, 3))
drda_pc[:, alpha] = r_pc[:, beta]
naF_ncab[m, cm, alpha, beta] = \
self.get_second_derivative(atoms, drda_pc, drdb_pc, i_p=i_p, j_p=j_p, r_p=r_p, r_pc=r_pc)
return naF_ncab / 2
def get_born_elastic_constants(self, atoms):
"""
Compute the Born elastic constants.
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
"""
if self.atoms is None:
self.atoms = atoms
self.neighbourhood.cutoff = 2 * self.get_cutoff(atoms)
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, "ijdD")
nb_pairs = len(i_p)
C_abab = np.zeros((3, 3, 3, 3))
for alpha in range(3):
for beta in range(3):
drda_pc = np.zeros((nb_pairs, 3))
drda_pc[:, alpha] = r_pc[:, beta] / 2
drda_pc[:, beta] += r_pc[:, alpha] / 2
for nu in range(3):
for mu in range(3):
drdb_pc = np.zeros((nb_pairs, 3))
drdb_pc[:, nu] = r_pc[:, mu] / 2
drdb_pc[:, mu] += r_pc[:, nu] / 2
C_abab[alpha, beta, nu, mu] = \
self.get_second_derivative(atoms, drda_pc, drdb_pc, i_p=i_p, j_p=j_p, r_p=r_p, r_pc=r_pc)
C_abab /= (2 * atoms.get_volume())
return C_abab
def get_nonaffine_forces(self, atoms):
if self.atoms is None:
self.atoms = atoms
# get internal atom types from atomic numbers
t_n = self.neighbourhood.atom_type(atoms.numbers)
self.neighbourhood.cutoff = self.get_cutoff(atoms)
# construct neighbor list
i_p, j_p, r_p, r_pc = self.neighbourhood.get_pairs(atoms, 'ijdD')
nb_atoms = len(self.atoms)
nb_pairs = len(i_p)
# normal vectors
n_pc = r_pc / r_p[_c]
dn_pcc = (np.eye(3) - _o(n_pc, n_pc)) / r_p[_cc]
# construct triplet list (no jk_t here, neighbor to cutoff suffices)
ij_t, ik_t, r_tq, r_tqc = self.neighbourhood.get_triplets(atoms, "ijdD")
# construct lists with atom and pair types
ti_p = t_n[i_p]
tij_p = self.neighbourhood.pair_type(ti_p, t_n[j_p])
ti_t = t_n[i_p[ij_t]]
tij_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ij_t]])
tik_t = self.neighbourhood.pair_type(ti_t, t_n[j_p[ik_t]])
# potential-dependent functions
G_t = self.G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d1G_tc = self.d1G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d2G_tc = self.d2G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d11G_tcc = self.d11G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d12G_tcc = self.d12G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
d22G_tcc = self.d22G(r_tqc[:, 0], r_tqc[:, 1], ti_t, tij_t, tik_t)
xi_p = np.bincount(ij_t, weights=G_t, minlength=nb_pairs)
d1F_p = self.d1F(r_p, xi_p, ti_p, tij_p)
d2F_p = self.d2F(r_p, xi_p, ti_p, tij_p)
d11F_p = self.d11F(r_p, xi_p, ti_p, tij_p)
d12F_p = self.d12F(r_p, xi_p, ti_p, tij_p)
d22F_p = self.d22F(r_p, xi_p, ti_p, tij_p)
# Derivative of xi with respect to the deformation gradient
dxidF_pab = mabincount(ij_t, _o(d1G_tc, r_pc[ij_t])
+ _o(d2G_tc, r_pc[ik_t]), minlength=nb_pairs)
# Term 1
naF1_ncab = d11F_p.reshape(-1, 1, 1, 1) * _o(n_pc, n_pc, r_pc)
# Term 2
naF21_tcab = (d12F_p[ij_t] * (_o(n_pc[ij_t], d1G_tc, r_pc[ij_t])
+ _o(n_pc[ij_t], d2G_tc, r_pc[ik_t])
+ _o(d1G_tc, n_pc[ij_t], r_pc[ij_t])
+ _o(d2G_tc, n_pc[ij_t], r_pc[ij_t])).T).T
naF22_tcab = -(d12F_p[ij_t] *
(_o(n_pc[ij_t], d1G_tc, r_pc[ij_t])
+ _o(n_pc[ij_t], d2G_tc, r_pc[ik_t])
+ _o(d1G_tc, n_pc[ij_t], r_pc[ij_t])).T).T
naF23_tcab = -(d12F_p[ij_t] * (_o(d2G_tc, n_pc[ij_t], r_pc[ij_t])).T).T
# Term 3
naF31_tcab = (d22F_p[ij_t].reshape(-1, 1, 1, 1)
* d1G_tc.reshape(-1, 3, 1, 1)
* dxidF_pab[ij_t].reshape(-1, 1, 3, 3))
naF32_tcab = (d22F_p[ij_t].reshape(-1, 1, 1, 1)
* d2G_tc.reshape(-1, 3, 1, 1)
* dxidF_pab[ij_t].reshape(-1, 1, 3, 3))
# Term 4
naF4_ncab = (d1F_p * (dn_pcc.reshape(-1, 3, 3, 1)
* r_pc.reshape(-1, 1, 1, 3)).T).T
# Term 5
naF51_tcab = (d2F_p[ij_t] * (
d11G_tcc.reshape(-1, 3, 3, 1) * r_pc[ij_t].reshape(-1, 1, 1, 3)
+ d12G_tcc.reshape(-1, 3, 3, 1) * r_pc[ik_t].reshape(-1, 1, 1, 3)
+ d22G_tcc.reshape(-1, 3, 3, 1) * r_pc[ik_t].reshape(-1, 1, 1, 3)
+ (d12G_tcc.reshape(-1, 3, 3, 1)).swapaxes(1, 2)
* r_pc[ij_t].reshape(-1, 1, 1, 3)).T).T
naF52_tcab = -(d2F_p[ij_t] * (
d11G_tcc.reshape(-1, 3, 3, 1) * r_pc[ij_t].reshape(-1, 1, 1, 3)
+ d12G_tcc.reshape(-1, 3, 3, 1) * r_pc[ik_t].reshape(-1, 1, 1, 3)).T).T
naF53_tcab = -(d2F_p[ij_t] * (
d12G_tcc.reshape(-1, 3, 3, 1).swapaxes(1, 2) * r_pc[ij_t].reshape(-1, 1, 1, 3)
+ d22G_tcc.reshape(-1, 3, 3, 1) * r_pc[ik_t].reshape(-1, 1, 1, 3)).T).T
naforces_icab = \
mabincount(i_p, naF1_ncab, minlength=nb_atoms) \
- mabincount(j_p, naF1_ncab, minlength=nb_atoms) \
+ mabincount(i_p[ij_t], naF21_tcab, minlength=nb_atoms) \
+ mabincount(j_p[ij_t], naF22_tcab, minlength=nb_atoms) \
+ mabincount(j_p[ik_t], naF23_tcab, minlength=nb_atoms) \
+ mabincount(i_p[ij_t], naF31_tcab, minlength=nb_atoms) \
- mabincount(j_p[ij_t], naF31_tcab, minlength=nb_atoms) \
+ mabincount(i_p[ij_t], naF32_tcab, minlength=nb_atoms) \
- mabincount(j_p[ik_t], naF32_tcab, minlength=nb_atoms) \
+ mabincount(i_p, naF4_ncab, minlength=nb_atoms) \
- mabincount(j_p, naF4_ncab, minlength=nb_atoms) \
+ mabincount(i_p[ij_t], naF51_tcab, minlength=nb_atoms) \
+ mabincount(j_p[ij_t], naF52_tcab, minlength=nb_atoms) \
+ mabincount(j_p[ik_t], naF53_tcab, minlength=nb_atoms)
return naforces_icab / 2
class NiceManybody(Manybody):
"""Manybody calculator with nicer API."""
class F(ABC):
"""Pair potential."""
@abstractmethod
def __call__(self, r, xi, atom_type, pair_type):
"""Compute energy."""
@abstractmethod
def gradient(self, r, xi, atom_type, pair_type):
"""Compute gradient."""
@abstractmethod
def hessian(self, r, xi, atom_type, pair_type):
"""Compute hessian."""
class G(ABC):
"""Triplet potential."""
@abstractmethod
def __call__(self, r_ij, r_ik, atom_type, ij_type, ik_type):
"""Compute energy."""
@abstractmethod
def gradient(self, r_ij, r_ik, atom_type, ij_type, ik_type):
"""Compute gradient."""
@abstractmethod
def hessian(self, r_ij, r_ik, atom_type, ij_type, ik_type):
"""Compute hessian."""
@staticmethod
def _distance_triplet(rij, rik, cell, pbc):
D, d = find_mic(
np.concatenate([rij, rik, rik-rij]),
cell, pbc)
return D.reshape(3, -1, 3), d.reshape(3, -1)
def __init__(self, F, G, neighbourhood):
"""Init with pair & triplet potential + neighbourhood."""
d1F, d2F = self._split_call(F.gradient, 2)
d11F, d22F, d12F = self._split_call(F.hessian, 3)
d1G, d2G = self._split_call(G.gradient, 2)
d11G, d22G, d12G = self._split_call(G.hessian, 3)
super().__init__(
neighbourhood.atom_type,
neighbourhood.pair_type,
F=F.__call__, G=G.__call__,
d1F=d1F, d2F=d2F,
d11F=d11F, d22F=d22F, d12F=d12F,
d1G=d1G, d11G=d11G, d2G=d2G, d22G=d22G, d12G=d12G,
cutoff=neighbourhood.cutoff,
neighbourhood=neighbourhood)
@staticmethod
def _split_call(func, n):
def wrap(func, i, *args):
return np.asanyarray(func(*args))[i]
def make_lambda(i):
return lambda *args: wrap(func, i, *args)
return (make_lambda(i) for i in range(n))
| 29,894 | 36.746212 | 117 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/explicit_forms/harmonic.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Harmonic potentials for bonds and triplets."""
import numpy as np
from ase import Atoms
from ..calculator import NiceManybody
class ZeroPair(NiceManybody.G):
"""Defines a non-interacting pair potential."""
def __call__(self, r, xi, *args):
"""Return triplet energy only."""
return xi
def gradient(self, r, xi, *args):
"""Return triplet interaction only."""
return [np.zeros_like(xi), np.ones_like(xi)]
def hessian(self, r, xi, *args):
"""Zero hessian."""
return [np.zeros_like(r)] * 3
class ZeroTriplet(NiceManybody.G):
"""Defines a non-interacting triplet potential."""
def __call__(self, *args):
"""Zero triplet energy."""
return np.zeros(args[0].shape[0])
def gradient(self, *args):
"""Zero triplet force."""
return np.zeros([2] + list(args[0].shape))
def hessian(self, *args):
"""Zero triplet hessian."""
return np.zeros([3] + list(args[0].shape) + [args[0].shape[1]])
class HarmonicBond(NiceManybody.F):
"""Defines a harmonic bond."""
def __init__(self, r0, k):
"""Initialize with equilibrium distance and stiffness."""
self.r0 = r0
self.k = k
def __call__(self, r, xi, atype, ptype):
r"""Compute spring potential energy.
.. math:: E(r) = \frac{1}{2} k(r - r_0)^2 + \xi
"""
e = 0.5 * self.k * (r - self.r0)**2
e[ptype < 0] = 0 # ignore bonds from angles
return e + xi
def gradient(self, r, xi, atype, ptype):
"""Compute spring force."""
g = self.k * (r - self.r0)
g[ptype < 0] = 0
return [g, np.ones_like(xi)]
def hessian(self, r, xi, atype, ptype):
"""Compute spring stiffness."""
h = np.full_like(r, self.k)
h[ptype < 0] = 0
return [h, np.zeros_like(r), np.zeros_like(r)]
class HarmonicAngle(NiceManybody.G):
"""Defines a harmonic angle potential."""
def __init__(self, a0, k, atoms: Atoms):
"""Initialize with equilibrium angle and stiffness.
Note: atoms are needed because mics are calculated for triplet
distances. This will be removed once G is redefined to take triplet
distances instead of vectors.
"""
self.a0 = a0
self.k = k
self.atoms = atoms
def __call__(self, r_ij_c, r_ik_c, *args):
r"""Angle harmonic energy.
Define the following functional form for :math:`G`:
.. math::
E(a) & = \frac{1}{2} k(a - a_0)^2 \\
\vec{u} & = \vec{r_{ij}} \\
\vec{v} & = \vec{r_{ik}} \\
\vec{w}(\vec{u}, \vec{v}) & = \vec{r_{jk}} = \vec{v} - \vec{u} \\
f(u, v, w) & = -\frac{u^2 + w^2 - v^2}{2uw} \\
F(\vec{u}, \vec{v}) & = \frac{\vec{u}\cdot\vec{w}(\vec{u}, \vec{v})}{uw} \\
& = f(u, v, |\vec{w}(\vec{u}, \vec{v})|) \\
h(x) & = E(\arccos(x)) \\
G(\vec{u}, \vec{v}) & = h(F(\vec{u}, \vec{v})))
"""
_, (r_ij, r_ik, r_jk) = self._distance_triplet(
r_ij_c, r_ik_c, self.atoms.cell, self.atoms.pbc
)
a = np.arccos(-(r_ij**2 + r_jk**2 - r_ik**2) / (2 * r_ij * r_jk))
return 0.5 * self.k * (a - self.a0)**2
def gradient(self, r_ij_c, r_ik_c, *args):
r"""Compute derivatives of :math:`G` w/r to :math:`r_{ij}` and :math:`r_{ik}`.
We have the following partial derivatives:
.. math::
\frac{\partial G}{\partial u_i}(\vec{u}, \vec{v}) & = h'(F(\vec{u}, \vec{v})) \frac{\partial F}{\partial u_i}(\vec{u}, \vec{v}) \\
\frac{\partial G}{\partial v_i}(\vec{u}, \vec{v}) & = h'(F(\vec{u}, \vec{v})) \frac{\partial F}{\partial v_i}(\vec{u}, \vec{v}) \\
The partial derivatives of :math:`F` are expressed as:
.. math::
\frac{\partial F}{\partial u_i} = U_i & = \frac{\partial f}{\partial u}\frac{\partial u}{\partial u_i} + \frac{\partial f}{\partial w}\frac{\partial w}{\partial u_i}\\
\frac{\partial F}{\partial v_i} = V_i & = \frac{\partial f}{\partial v}\frac{\partial v}{\partial v_i} + \frac{\partial f}{\partial w}\frac{\partial w}{\partial v_i}
We note the normal vectors as:
.. math::
\bar{u}_i & = \frac{u_i}{u}\\
\bar{v}_i & = \frac{v_i}{v}\\
\bar{w}_i & = \frac{w_i}{w}
So that we can write the following partial derivatives:
.. math::
\frac{\partial u}{\partial u_i} & = \bar{u}_i\\
\frac{\partial v}{\partial v_i} & = \bar{v}_i\\
\frac{\partial w}{\partial u_i} & = -\bar{w}_i\\
\frac{\partial w}{\partial v_i} & = \bar{w}_i
Which gives the final expressions for :math:`U_i` and :math:`V_i`:
.. math::
U_i &= \frac{\partial f}{\partial u} \bar{u}_i + \frac{\partial f}{\partial w} (-\bar{w}_i)\\
V_i &= \frac{\partial f}{\partial v} \bar{v}_i + \frac{\partial f}{\partial w} \bar{w}_i
The remaining scalar partial derivatives are simple to derive and left
to the reader :P .
"""
D, d = self._distance_triplet(
r_ij_c, r_ik_c, self.atoms.cell, self.atoms.pbc
)
# Broadcast slices
_c = np.s_[:, np.newaxis]
# Mapping: u <- r_ij, v <- r_ik, w <- r_jk = |r_ik_c - r_ij_c|
u, v, w = d
# Normal vectors
nu, nv, nw = (D[i] / d[i][_c] for i in range(3))
# cos of angle
f = -(u**2 + w**2 - v**2) / (2 * u * w)
# derivatives with respect to triangle lengths
df_u = -(u**2 - w**2 + v**2) / (2 * u**2 * w)
df_w = -(w**2 - u**2 + v**2) / (2 * w**2 * u)
df_v = v / (u * w)
# Scalar derivatives
def E_(a):
return self.k * (a - self.a0) # noqa
def h_(f):
with np.errstate(divide="raise"):
d_arccos = -1 / np.sqrt(1 - f**2)
return E_(np.arccos(f)) * d_arccos
# Derivatives with respect to vectors rij and rik
dG = np.zeros([2] + list(r_ij_c.shape))
# dG_rij
dG[0] = df_u[_c] * nu + df_w[_c] * (-nw)
# dG_rik
dG[1] = df_v[_c] * nv + df_w[_c] * (+nw)
dG *= h_(f)[_c]
return dG
def hessian(self, r_ij_c, r_ik_c, *args):
r"""Compute derivatives of :math:`G` w/r to :math:`r_{ij}` and :math:`r_{ik}`.
We have the following partial derivatives:
.. math::
\frac{\partial^2 G}{\partial u_i\partial u_j}(\vec{u}, \vec{v}) & = h''(F) U_i U_j + h'(F)\frac{\partial U_i}{\partial u_j}\\
\frac{\partial^2 G}{\partial v_i\partial v_j}(\vec{u}, \vec{v}) & = h''(F) V_i V_j + h'(F)\frac{\partial V_i}{\partial v_j}\\
\frac{\partial^2 G}{\partial u_i\partial v_j}(\vec{u}, \vec{v}) & = h''(F) U_i V_j + h'(F)\frac{\partial U_i}{\partial v_j}
The derivatives of :math:`U_i` and :math:`V_i` need careful treatment:
.. math::
\frac{\partial U_i}{\partial u_j} = \frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial u}{\partial u_i} + \frac{\partial f}{\partial u}\frac{\partial^2 u}{\partial u_i\partial u_j} + \frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial w}{\partial u_i} + \frac{\partial f}{\partial w} \frac{\partial^2 w}{\partial u_i\partial u_j}\\
\frac{\partial V_i}{\partial v_j} = \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial v}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial v}{\partial v_i} + \frac{\partial f}{\partial v}\frac{\partial^2 v}{\partial v_i\partial v_j} + \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial w}{\partial v_i} + \frac{\partial f}{\partial w} \frac{\partial^2 w}{\partial v_i\partial v_j}\\
\frac{\partial U_i}{\partial v_j} = \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial u}{\partial u_i} + \frac{\partial f}{\partial u}\frac{\partial^2 u}{\partial u_i\partial v_j} + \frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) \frac{\partial w}{\partial u_i} + \frac{\partial f}{\partial w} \frac{\partial^2 w}{\partial u_i\partial v_j}
For the simple partial derivatives in the above section, we have:
.. math::
\frac{\partial^2 u}{\partial u_i\partial u_j} & = \bar{\bar{u}}_{ij} = \frac{\delta_{ij} - \bar{u}_i \bar{u}_j}{u}\\
\frac{\partial^2 v}{\partial v_i\partial v_j} & = \bar{\bar{u}}_{ij} = \frac{\delta_{ij} - \bar{v}_i \bar{v}_j}{v}\\
\frac{\partial^2 u}{\partial u_i\partial v_j} & = 0\\
\frac{\partial^2 w}{\partial u_i\partial u_j} & = \bar{\bar{w}}_{ij} = \frac{\delta_{ij} - \bar{w}_i \bar{w}_j}{w}\\
\frac{\partial^2 w}{\partial v_i\partial v_j} & = \bar{\bar{w}}_{ij}\\
\frac{\partial^2 w}{\partial u_i\partial v_j} & = -\bar{\bar{w}}_{ij}
For the more complex partial derivatives:
.. math::
\frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial u^2} \frac{\partial u}{\partial u_j} + \frac{\partial^2 f}{\partial u\partial w}\frac{\partial w}{\partial u_j}\\
\frac{\partial}{\partial u_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial w\partial u} \frac{\partial u}{\partial u_j} + \frac{\partial^2 f}{\partial w^2}\frac{\partial w}{\partial u_j}\\
\frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial v}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial v^2} \frac{\partial v}{\partial v_j} + \frac{\partial^2 f}{\partial v\partial w}\frac{\partial w}{\partial v_j}\\
\frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial w}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial w\partial v} \frac{\partial v}{\partial v_j} + \frac{\partial^2 f}{\partial w^2}\frac{\partial w}{\partial v_j}\\
\frac{\partial}{\partial v_j}\left(\frac{\partial f}{\partial u}(u, v, w(\vec{u}, \vec{v}))\right) & = \frac{\partial^2 f}{\partial u\partial v} \frac{\partial v}{\partial v_j} + \frac{\partial^2 f}{\partial u\partial w}\frac{\partial w}{\partial v_j}\\
The remaining scalar derivatives are left to the reader.
"""
D, d = self._distance_triplet(
r_ij_c, r_ik_c, self.atoms.cell, self.atoms.pbc
)
# Utilities
_c = np.s_[:, np.newaxis]
_cc = np.s_[:, np.newaxis, np.newaxis]
_o = lambda u, v: np.einsum('...i,...j', u, v, optimize=True) # noqa
# Scalar functions
dE = lambda a: self.k * (a - self.a0) # Force
ddE = lambda a: self.k # Stiffness
arccos = np.arccos
darccos = lambda x: -1 / np.sqrt(1 - x**2)
ddarccos = lambda x: -x / (1 - x**2)**(3/2)
dh = lambda f: dE(arccos(f)) * darccos(f)
ddh = lambda f: (
ddE(arccos(f)) * darccos(f) * darccos(f)
+ dE(arccos(f)) * ddarccos(f)
)
# Mapping: u <- r_ij, v <- r_ik, w <- r_jk = |r_ik_c - r_ij_c|
u, v, w = d
# Normal vectors
nu, nv, nw = (D[i] / d[i][_c] for i in range(3))
# Outer products
nunu, nvnv, nwnw = (_o(n, n) for n in (nu, nv, nw))
# Normal tensors
Id = np.eye(3)[np.newaxis, :]
nnu, nnv, nnw = ((Id - o) / d[i][_cc]
for i, o in enumerate((nunu, nvnv, nwnw)))
# cos of angle
f = -(u**2 + w**2 - v**2) / (2 * u * w)
# derivatives with respect to triangle lengths
df_u = -(u**2 - w**2 + v**2) / (2 * u**2 * w)
df_w = -(w**2 - u**2 + v**2) / (2 * w**2 * u)
df_v = v / (u * w)
# second derivatives
ddf_uu = (v**2 - w**2) / (u**3 * w)
ddf_ww = (v**2 - u**2) / (w**3 * u)
ddf_vv = 1 / (u * w)
ddf_uv = -v / (u**2 * w)
ddf_uw = (u**2 + w**2 + v**2) / (2 * u**2 * w**2)
ddf_vw = -v / (w**2 * u)
# Compond derivatives w/r to vectors
U = df_u[_c] * nu + df_w[_c] * (-nw)
V = df_v[_c] * nv + df_w[_c] * (+nw)
# Second derivatives w/r to vectors
dU_u = (
_o(nu, ddf_uu[_c] * nu + ddf_uw[_c] * (-nw))
+ df_u[_cc] * nnu
+ _o(-nw, ddf_uw[_c] * nu + ddf_ww[_c] * (-nw))
+ df_w[_cc] * nnw
)
dV_v = (
_o(nv, ddf_vv[_c] * nv + ddf_vw[_c] * nw)
+ df_v[_cc] * nnv
+ _o(nw, ddf_vw[_c] * nv + ddf_ww[_c] * nw)
+ df_w[_cc] * nnw
)
dU_v = (
_o(nu, ddf_uv[_c] * nv + ddf_uw[_c] * nw)
+ _o(-nw, ddf_vw[_c] * nv + ddf_ww[_c] * nw)
+ df_w[_cc] * (-nnw)
)
# Scalar parts
dh = dh(f)
ddh = ddh(f)
# Defining full derivatives
ddG = np.zeros([3, r_ij_c.shape[0], r_ij_c.shape[1], r_ij_c.shape[1]])
ddG[0] = ddh[_cc] * _o(U, U) + dh[_cc] * dU_u
ddG[1] = ddh[_cc] * _o(V, V) + dh[_cc] * dV_v
ddG[2] = ddh[_cc] * _o(U, V) + dh[_cc] * dU_v
return ddG
| 14,226 | 42.243161 | 468 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/explicit_forms/kumagai.py | #
# Copyright 2021 Jan Griesser (U. Freiburg)
# 2021 Lars Pastewka (U. Freiburg)
# 2021 [email protected]
# 2020-2021 Jonas Oldenstaedt (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
#
# Parameter sets
# The '__ref__' dictionary entry is the journal reference
#
Kumagai_Comp_Mat_Sci_39_Si = {
'__ref__': 'T. Kumagai et. al., Comp. Mat. Sci. 39 (2007)',
'el': 'Si' ,
'A': 3281.5905 ,
'B': 121.00047 ,
'lambda_1': 3.2300135 ,
'lambda_2': 1.3457970 ,
'eta': 1.0000000 ,
'delta': 0.53298909 ,
'alpha': 2.3890327 ,
'c_1': 0.20173476 ,
'c_2': 730418.72 ,
'c_3': 1000000.0 ,
'c_4': 1.0000000 ,
'c_5': 26.000000 ,
'h': -0.36500000 ,
'R_1': 2.70 ,
'R_2': 3.30
}
def ab(x):
"""
Compute absolute value (norm) of an array of vectors
"""
return np.linalg.norm(x, axis=1)
def Kumagai(parameters):
"""
Implementation of functional form of Kumagai´s potential.
Reference
------------
T. Kumagai et. al., Computational materials science 39.2 (2007): 457-464.
"""
el = parameters["el"]
A = parameters["A"]
B = parameters["B"]
lambda_1 = parameters["lambda_1"]
lambda_2 = parameters["lambda_2"]
eta = parameters["eta"]
delta = parameters["delta"]
alpha = parameters["alpha"]
c_1 = parameters["c_1"]
c_2 = parameters["c_2"]
c_3 = parameters["c_3"]
c_4 = parameters["c_4"]
c_5 = parameters["c_5"]
h = parameters["h"]
R_1 = parameters["R_1"]
R_2 = parameters["R_2"]
f = lambda r: np.where(
r <= R_1, 1.0,
np.where(r >= R_2, 0.0,
(1/2+(9/16) * np.cos(np.pi*(r - R_1)/(R_2 - R_1))
- (1/16) * np.cos(3*np.pi*(r - R_1)/(R_2 - R_1)))
)
)
df = lambda r: np.where(
r >= R_2, 0.0,
np.where(r <= R_1, 0.0,
(3*np.pi*(3*np.sin(np.pi * (R_1 - r) / (R_1 - R_2))
- np.sin(3*np.pi*(R_1 - r) / (R_1 - R_2))))/(16*(R_1 - R_2))
)
)
ddf = lambda r: np.where(
r >= R_2, 0.0,
np.where(r <= R_1, 0.0,
((9*np.pi**2*(np.cos(3*np.pi*(R_1 - r)/(R_1 - R_2))
- np.cos(np.pi*(R_1 - r)/(R_1 - R_2))))/(16*(R_1 - R_2)**2))
)
)
fR = lambda r: A*np.exp(-lambda_1 * r)
dfR = lambda r: -lambda_1 * fR(r)
ddfR = lambda r: lambda_1**2 * fR(r)
fA = lambda r: -B*np.exp(-lambda_2 * r)
dfA = lambda r: -lambda_2 * fA(r)
ddfA = lambda r: lambda_2**2 * fA(r)
b = lambda xi: 1/((1+xi**eta)**(delta))
db = lambda xi: -delta*eta*xi**(eta-1)*(xi**eta+1)**(-delta-1)
ddb = lambda xi: delta*eta*xi**(eta - 1)*(delta + 1)*(xi**eta + 1)**(-delta - 2)
g = lambda cost: c_1 + (1 + c_4*np.exp(-c_5*(h-cost)**2)) * \
((c_2*(h-cost)**2)/(c_3 + (h-cost)**2))
dg = lambda cost: 2*c_2*(cost - h)*(
(c_3 + (cost - h)**2) *
(-c_4*c_5*(cost - h)**2 + c_4 +
np.exp(c_5*(cost - h)**2)) -
(c_4 + np.exp(c_5*(cost - h)**2))
* (cost - h)**2) * np.exp(-c_5*(cost - h)**2)/(c_3 + (cost - h)**2)**2
ddg = lambda cos_theta: \
(2*c_2*((c_3 + (cos_theta - h)**2)**2
* (2*c_4*c_5**2*(cos_theta - h)**4
- 5*c_4*c_5*(cos_theta - h)**2 + c_4
+ np.exp(c_5*(cos_theta - h)**2))
+ (c_3 + (cos_theta - h)**2)*(cos_theta - h)**2
* (4*c_4*c_5*(cos_theta - h)**2
- 5*c_4 - 5*np.exp(c_5*(cos_theta - h)**2))
+ 4*(c_4 + np.exp(c_5*(cos_theta - h)**2))*(cos_theta - h)**4)
* np.exp(-c_5*(cos_theta - h)**2)/(c_3 + (cos_theta - h)**2)**3
)
hf = lambda rij, rik: f(ab(rik)) * np.exp(alpha * (ab(rij) - ab(rik)))
d1h = lambda rij, rik: alpha * hf(rij, rik)
d2h = lambda rij, rik: \
- alpha * hf(rij, rik) \
+ df(ab(rik)) * np.exp(alpha * (ab(rij) - ab(rik)))
d11h = lambda rij, rik: alpha**2 * hf(rij, rik)
d12h = lambda rij, rik: alpha * d2h(rij, rik)
d22h = lambda rij, rik: \
- alpha * ( 2 * df(ab(rik)) * np.exp(alpha * (ab(rij) - ab(rik))) \
- alpha * hf(rij, rik)) \
+ ddf(ab(rik)) * np.exp(alpha * (ab(rij) - ab(rik)))
F = lambda r, xi, i, p: f(r) * (fR(r) + b(xi) * fA(r))
d1F = lambda r, xi, i, p: df(r) * (fR(r) + b(xi) * fA(r)) + f(r) * (dfR(r) + b(xi) * dfA(r))
d2F = lambda r, xi, i, p: f(r) * fA(r) * db(xi)
d11F = lambda r, xi, i, p: f(r) * (ddfR(r) + b(xi) * ddfA(r)) + 2 * df(r) * (dfR(r) + b(xi) * dfA(r)) + ddf(r) * (fR(r) + b(xi) * fA(r))
d22F = lambda r, xi, i, p: f(r) * fA(r) * ddb(xi)
d12F = lambda r, xi, i, p: f(r) * dfA(r) * db(xi) + fA(r) * df(r) * db(xi)
G = lambda rij, rik, i, ij, ik: g(costh(rij, rik)) * hf(rij, rik)
d1G = lambda rij, rik, i, ij, ik: (Dh1(rij, rik).T * g(costh(rij, rik)) + hf(rij, rik) * Dg1(rij, rik).T).T
d2G = lambda rij, rik, i, ij, ik: (Dh2(rij, rik).T * g(costh(rij, rik)) + hf(rij, rik) * Dg2(rij, rik).T).T
Dh1 = lambda rij, rik: (d1h(rij, rik) * rij.T / ab(rij)).T
Dh2 = lambda rij, rik: (d2h(rij, rik) * rik.T / ab(rik)).T
Dg1 = lambda rij, rik: (dg(costh(rij, rik)) * c1(rij, rik).T).T
Dg2 = lambda rij, rik: (dg(costh(rij, rik)) * c2(rij, rik).T).T
d11G = lambda rij, rik, i, ij, ik: \
Dg1(rij, rik).reshape(-1, 3, 1) * Dh1(rij, rik).reshape(-1, 1, 3) + Dh1(rij, rik).reshape(-1, 3, 1) * Dg1(rij, rik).reshape(-1, 1, 3) \
+ ((g(costh(rij, rik)) * Dh11(rij, rik).T).T + (hf(rij, rik) * Dg11(rij, rik).T).T)
Dh11 = lambda rij, rik: \
(d11h(rij, rik) * (((rij.reshape(-1, 3, 1) * rij.reshape(-1, 1, 3)).T/ab(rij)**2).T).T \
+ d1h(rij, rik) * ((np.eye(3) - ((rij.reshape(-1, 3, 1) * rij.reshape(-1, 1, 3)).T/ab(rij)**2).T).T/ab(rij))).T
Dg11 = lambda rij, rik: \
(ddg(costh(rij, rik)) * (c1(rij, rik).reshape(-1, 3, 1) * c1(rij, rik).reshape(-1, 1, 3)).T
+ dg(costh(rij, rik)) * dc11(rij, rik).T).T
d22G = lambda rij, rik, i, ij, ik: \
Dg2(rij, rik).reshape(-1, 3, 1) * Dh2(rij, rik).reshape(-1, 1, 3) + Dh2(rij, rik).reshape(-1, 3, 1) * Dg2(rij, rik).reshape(-1, 1, 3) \
+ ((g(costh(rij, rik)) * Dh22(rij, rik).T).T + (hf(rij, rik) * Dg22(rij, rik).T).T)
Dh22 = lambda rij, rik: \
(d22h(rij, rik) * (((rik.reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/ab(rik)**2).T).T \
+ d2h(rij, rik) * ((np.eye(3) - ((rik.reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/ab(rik)**2).T).T/ab(rik))).T
Dg22 = lambda rij, rik: \
(ddg(costh(rij, rik)) * (c2(rij, rik).reshape(-1, 3, 1) * c2(rij, rik).reshape(-1, 1, 3)).T
+ dg(costh(rij, rik)) * dc22(rij, rik).T).T
Dh12 = lambda rij, rik: \
(d12h(rij, rik) * (rij.reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/(ab(rij)*ab(rik))).T
d12G = lambda rij, rik, i, ij, ik: \
Dg1(rij, rik).reshape(-1, 3, 1) * Dh2(rij, rik).reshape(-1, 1, 3) + Dh1(rij, rik).reshape(-1, 3, 1) * Dg2(rij, rik).reshape(-1, 1, 3) \
+ ((g(costh(rij, rik)) * Dh12(rij, rik).T).T + (hf(rij, rik) * Dg12(rij, rik).T).T)
Dg12 = lambda rij, rik: \
(ddg(costh(rij, rik)) * (c1(rij, rik).reshape(-1, 3, 1) * c2(rij, rik).reshape(-1, 1, 3)).T
+ dg(costh(rij, rik)) * dc12(rij, rik).T).T
# Helping functions
costh = lambda rij, rik: np.sum(rij*rik, axis=1) / (ab(rij)*ab(rik))
c1 = lambda rij, rik: ((rik.T/ab(rik) - rij.T/ab(rij) * costh(rij, rik)) / ab(rij)).T
c2 = lambda rij, rik: ((rij.T/ab(rij) - rik.T/ab(rik) * costh(rij, rik)) / ab(rik)).T
dc11 = lambda rij, rik: \
((- c1(rij, rik).reshape(-1, 3, 1) * rij.reshape(-1, 1, 3) \
- rij.reshape(-1, 3, 1) * c1(rij, rik).reshape(-1, 1, 3) \
- (costh(rij, rik) * (np.eye(3) - ((rij.reshape(-1, 1, 3)*rij.reshape(-1, 3, 1)).T/ab(rij)**2).T).T).T \
).T/ab(rij)**2).T
dc22 = lambda rij, rik: \
((- c2(rij, rik).reshape(-1, 3, 1) * rik.reshape(-1, 1, 3) \
- rik.reshape(-1, 3, 1) * c2(rij, rik).reshape(-1, 1, 3) \
- (costh(rij, rik) * (np.eye(3) - ((rik.reshape(-1, 1, 3)*rik.reshape(-1, 3, 1)).T/ab(rik)**2).T).T).T \
).T/ab(rik)**2).T
dc12 = lambda rij, rik: \
(((np.eye(3) - ((rij.reshape(-1, 1, 3)*rij.reshape(-1, 3, 1)).T/ab(rij)**2).T).T/ab(rij)
- (c1(rij, rik).reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/ab(rik) \
)/ab(rik)).T
return {
'atom_type': lambda n: np.zeros_like(n),
'pair_type': lambda i, j: np.zeros_like(i),
'F': F,
'G': G,
'd1F': d1F,
'd2F': d2F,
'd11F': d11F,
'd12F': d12F,
'd22F': d22F,
'd1G': d1G,
'd2G': d2G,
'd11G': d11G,
'd22G': d22G,
'd12G': d12G,
'cutoff': R_2,
} | 10,028 | 39.934694 | 143 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/explicit_forms/tersoff_brenner.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2021 Jan Griesser (U. Freiburg)
# 2021 [email protected]
# 2020 Jonas Oldenstaedt (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from math import sqrt
import numpy as np
import ase.data as data
# The parameter sets are compatible with Atomistica.
# See: https://github.com/Atomistica/atomistica/blob/master/src/python/atomistica/parameters.py
def pair_index(i, j, maxval):
return np.minimum(i + j * maxval, j + i * maxval) - np.minimum(i * (i + 1) // 2, j * (j + 1) // 2)
def triplet_index(i, maxval):
return k + maxval * (j + maxval * i)
# Mixing rules
def mix(p, key, rule):
nel = len(p['el'])
for i in range(nel):
for j in range(i + 1, nel):
ii = pair_index(i, i, nel)
jj = pair_index(j, j, nel)
ij = pair_index(i, j, nel)
p[key][ij] = rule(p[key][ii], p[key][jj])
def mix_arithmetic(p, key):
mix(p, key, lambda x,y: (x+y)/2)
def mix_geometric(p, key):
mix(p, key, lambda x,y: sqrt(x*y))
#
# Parameter sets
# The '__ref__' dictionary entry is the journal reference
#
Tersoff_PRB_39_5566_Si_C = {
'__ref__': 'Tersoff J., Phys. Rev. B 39, 5566 (1989)',
'style': 'Tersoff',
'el': [ 'C', 'Si' ],
'A': [ 1.3936e3, sqrt(1.3936e3*1.8308e3), 1.8308e3 ],
'B': [ 3.4674e2, sqrt(3.4674e2*4.7118e2), 4.7118e2 ],
'chi': [ 1.0, 0.9776e0, 1.0 ],
'lambda': [ 3.4879e0, (3.4879e0+2.4799e0)/2, 2.4799e0 ],
'mu': [ 2.2119e0, (2.2119e0+1.7322e0)/2, 1.7322e0 ],
'lambda3': [ 0.0, 0.0, 0.0 ],
'beta': [ 1.5724e-7, 1.1000e-6 ],
'n': [ 7.2751e-1, 7.8734e-1 ],
'c': [ 3.8049e4, 1.0039e5 ],
'd': [ 4.3484e0, 1.6217e1 ],
'h': [ -5.7058e-1, -5.9825e-1 ],
'r1': [ 1.80, sqrt(1.80*2.70), 2.70 ],
'r2': [ 2.10, sqrt(2.10*3.00), 3.00 ],
}
Goumri_Said_ChemPhys_302_135_Al_N = {
'__ref__': 'Goumri-Said S., Kanoun M.B., Merad A.E., Merad G., Aourag H., Chem. Phys. 302, 135 (2004)',
'el': [ 'Al', 'N' ],
'r1': [ 3.20, 2.185, 1.60 ],
'r2': [ 3.60, 2.485, 2.00 ],
'A': [ 746.698, 3000.214, 636.814 ],
'B': [ 40.451, 298.81, 511.76 ],
'chi': [ 1.0, 1.0, 1.0 ],
'lambda': [ 2.4647, 3.53051, 5.43673 ],
'mu': [ 0.9683, 1.99995, 2.7 ],
'beta': [ 1.094932, 5.2938e-3 ],
'n': [ 6.085605, 1.33041 ],
'c': [ 0.074836, 2.0312e4 ],
'd': [ 19.569127, 20.312 ],
'h': [ -0.659266, -0.56239 ]
}
Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N = {
'__ref__': 'Matsunaga K., Fisher C., Matsubara H., Jpn. J. Appl. Phys. 39, 48 (2000)',
'el': [ 'C', 'N', 'B' ],
'style': 'Tersoff',
'A': [ 1.3936e3, -1.0, -1.0, 1.1e4, -1.0, 2.7702e2 ],
'B': [ 3.4674e2, -1.0, -1.0, 2.1945e2, -1.0, 1.8349e2 ],
'chi': [ 1.0, 0.9685, 1.0025, 1.0, 1.1593, 1.0 ],
'lambda': [ 3.4879, -1.0, -1.0, 5.7708, -1.0, 1.9922 ],
'mu': [ 2.2119, -1.0, -1.0, 2.5115, -1.0, 1.5856 ],
'omega': [ 1.0, 0.6381, 1.0, 1.0, 1.0, 1.0 ],
'lambda3': [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
'r1': [ 1.80, -1.0, -1.0, 2.0, -1.0, 1.8 ],
'r2': [ 2.10, -1.0, -1.0, 2.3, -1.0, 2.1 ],
'beta': [ 1.5724e-7, 1.0562e-1, 1.6e-6 ],
'n': [ 7.2751e-1, 12.4498, 3.9929 ],
'c': [ 3.8049e4, 7.9934e4, 5.2629e-1 ],
'd': [ 4.3484e0, 1.3432e2, 1.5870e-3 ],
'h': [ -5.7058e-1, -0.9973, 0.5 ],
}
# Apply mixing rules
mix_geometric(Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N, 'A')
mix_geometric(Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N, 'B')
mix_arithmetic(Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N, 'lambda')
mix_arithmetic(Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N, 'mu')
mix_geometric(Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N, 'r1')
mix_geometric(Matsunaga_Fisher_Matsubara_Jpn_J_Appl_Phys_39_48_B_C_N, 'r2')
Erhart_PRB_71_035211_SiC = {
'__ref__': 'Erhart P., Albe K., Phys. Rev. B 71, 035211 (2005)',
'style': 'Brenner',
'el': [ 'C', 'Si' ],
'D0': [ 6.00, 4.36, 3.24 ],
'r0': [ 1.4276, 1.79, 2.232 ],
'S': [ 2.167, 1.847, 1.842 ],
'beta': [ 2.0099, 1.6991, 1.4761 ],
'gamma': [ 0.11233, 0.011877, 0.114354 ],
'c': [ 181.910, 273987.0, 2.00494 ],
'd': [ 6.28433, 180.314, 0.81472 ],
'h': [ 0.5556, 0.68, 0.259 ],
'mu': [ 0.0, 0.0, 0.0 ],
'n': [ 1.0, 1.0, 1.0 ],
'r1': [ 1.85, 2.20, 2.68 ],
'r2': [ 2.15, 2.60, 2.96 ]
}
Erhart_PRB_71_035211_Si = {
'__ref__': 'Erhart P., Albe K., Phys. Rev. B 71, 035211 (2005)',
'style': 'Brenner',
'el': [ 'Si' ],
'D0': [ 3.24 ],
'r0': [ 2.222 ],
'S': [ 1.57 ],
'beta': [ 1.4760 ],
'gamma': [ 0.09253 ],
'c': [ 1.13681 ],
'd': [ 0.63397 ],
'h': [ 0.335 ],
'mu': [ 0.0 ],
'n': [ 1.0 ],
'r1': [ 2.75 ],
'r2': [ 3.05 ]
}
Albe_PRB_65_195124_PtC = {
'__ref__': 'Albe K., Nordlund K., Averback R. S., Phys. Rev. B 65, 195124 (2002)',
'style': 'Brenner',
'el': [ 'Pt', 'C' ],
'D0': [ 3.683, 5.3, 6.0 ],
'r0': [ 2.384, 1.84, 1.39 ],
'S': [ 2.24297, 1.1965, 1.22 ],
'beta': [ 1.64249, 1.836, 2.1 ],
'gamma': [ 8.542e-4, 9.7e-3, 2.0813e-4 ],
'c': [ 34.0, 1.23, 330.0 ],
'd': [ 1.1, 0.36, 3.5 ],
'h': [ 1.0, 1.0, 1.0 ],
'mu': [ 1.335, 0.0, 0.0 ],
'n': [ 1.0, 1.0, 1.0 ],
'r1': [ 2.9, 2.5, 1.7 ],
'r2': [ 3.3, 2.8, 2.0 ]
}
Henriksson_PRB_79_114107_FeC = {
'__ref__': 'Henriksson K.O.E., Nordlund K., Phys. Rev. B 79, 144107 (2009)',
'style': 'Brenner',
'el': [ 'Fe', 'C' ],
'D0': [ 1.5, 4.82645134, 6.0 ],
'r0': [ 2.29, 1.47736510, 1.39 ],
'S': [ 2.0693109, 1.43134755, 1.22 ],
'beta': [ 1.4, 1.63208170, 2.1 ],
'gamma': [ 0.0115751, 0.00205862, 0.00020813 ],
'c': [ 1.2898716, 8.95583221, 330.0 ],
'd': [ 0.3413219, 0.72062047, 3.5 ],
'h': [ -0.26, 0.87099874, 1.0 ],
'mu': [ 0.0, 0.0, 0.0 ],
'n': [ 1.0, 1.0, 1.0 ],
'r1': [ 2.95, 2.3, 1.70 ],
'r2': [ 3.35, 2.7, 2.00 ]
}
Kioseoglou_PSSb_245_1118_AlN = {
'__ref__': 'Kioseoglou J., Komninou Ph., Karakostas Th., Phys. Stat. Sol. (b) 245, 1118 (2008)',
'style': 'Brenner',
'el': [ 'N', 'Al' ],
'D0': [ 9.9100, 3.3407, 1.5000 ],
'r0': [ 1.1100, 1.8616, 2.4660 ],
'S': [ 1.4922, 1.7269, 2.7876 ],
'beta': [ 2.05945, 1.7219, 1.0949 ],
'gamma': [ 0.76612, 1.1e-6, 0.3168 ],
'c': [ 0.178493, 100390, 0.0748 ],
'd': [ 0.20172, 16.2170, 19.5691 ],
'h': [ 0.045238, 0.5980, 0.6593 ],
'mu': [ 0.0, 0.0, 0.0 ],
'n': [ 1.0, 0.7200, 6.0865 ],
'r1': [ 2.00, 2.19, 3.40 ],
'r2': [ 2.40, 2.49, 3.60 ]
}
# Juslin's W-C-H parameterization
Juslin_JAP_98_123520_WCH = {
'__ref__': 'Juslin N., Erhart P., Traskelin P., Nord J., Henriksson K.O.E, Nordlund K., Salonen E., Albe K., J. Appl. Phys. 98, 123520 (2005)',
'style': 'Brenner',
'el': [ 'W', 'C', 'H' ],
'D0': [ 5.41861, 6.64, 2.748, 0.0, 6.0, 3.6422, 0.0, 3.642, 4.7509 ],
'r0': [ 2.34095, 1.90547, 1.727, -1.0, 1.39, 1.1199, -1.0, 1.1199, 0.74144 ],
'S': [ 1.92708, 2.96149, 1.2489, 0.0, 1.22, 1.69077, 0.0, 1.69077, 2.3432 ],
'beta': [ 1.38528, 1.80370, 1.52328, 0.0, 2.1, 1.9583, 0.0, 1.9583, 1.9436 ],
'gamma': [ 0.00188227, 0.072855, 0.0054, 0.0, 0.00020813, 0.00020813, 0.0, 12.33, 12.33 ],
'c': [ 2.14969, 1.10304, 1.788, 0.0, 330.0, 330.0, 0.0, 0.0, 0.0 ],
'd': [ 0.17126, 0.33018, 0.8255, 0.0, 3.5, 3.5, 0.0, 1.0, 1.0 ],
'h': [-0.27780, 0.75107, 0.38912, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0 ],
'n': [ 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0 ],
'alpha': [ 0.45876, 0.0, 0.0, 0.45876, 0.0, 0.0, 0.45876, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 0.0, 4.0, 4.0 ],
'omega': [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.94586, 4.54415, 1.0, 1.0, 1.0, 1.0, 0.33946, 0.22006, 1.0, 1.0, 1.0 ],
'r1': [ 3.20, 2.60, 2.68, 0.0, 1.70, 1.30, 0.0, 1.30, 1.10 ],
'r2': [ 3.80, 3.00, 2.96, 0.0, 2.00, 1.80, 0.0, 1.80, 1.70 ],
}
Kuopanportti_CMS_111_525_FeCH = {
'__ref__' : 'Kuopanportti P., Hayward, N., Fu C., Kuronen A., Nordlund K., Comp. Mat. Sci. 111, 525 (2016)',
'style': 'Brenner',
'el': [ 'Fe', 'C', 'H'],
'D0': [ 1.5, 4.82645134, 1.630, 0.0, 6.0, 3.6422, 0.0, 3.642, 4.7509 ],
'r0': [ 2.29, 1.47736510, 1.589, -1.0, 1.39, 1.1199, -1.0, 1.1199, 0.74144 ],
'S': [ 2.0693, 1.43134755, 4.000, 0.0, 1.22, 1.69077, 0.0, 1.69077, 2.3432 ],
'beta': [ 1.4, 1.63208170, 1.875, 0.0, 2.1, 1.9583, 0.0, 1.9583, 1.9436 ],
'gamma': [ 0.01158, 0.00205862, 0.01332, 0.0, 0.00020813, 0.00020813, 0.0, 12.33, 12.33 ],
'c': [ 1.2899, 8.95583221, 424.5, 0.0, 330.0, 330.0, 0.0, 0.0, 0.0 ],
'd': [ 0.3413, 0.72062047, 7.282, 0.0, 3.5, 3.5, 0.0, 1.0, 1.0 ],
'h': [-0.26, 0.87099874, -0.1091, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0 ],
'n': [ 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0 ],
'alpha': [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 4.0, 4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 0.0, 4.0, 4.0 ],
'omega': [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.94586, 4.54415, 1.0, 1.0, 1.0, 1.0, 0.33946, 0.22006, 1.0, 1.0, 1.0 ],
'r1': [ 2.95, 2.30, 2.2974, 0.0, 1.70, 1.30, 0.0, 1.30, 1.10 ],
'r2': [ 3.35, 2.70, 2.6966, 0.0, 2.00, 1.80, 0.0, 1.80, 1.70 ],
}
Brenner_PRB_42_9458_C_I = {
'__ref__': 'Brenner D., Phys. Rev. B 42, 9458 (1990) [potential I]',
'style': 'Brenner',
'el': [ 'C' ],
'D0': [ 6.325 ],
'r0': [ 1.315 ],
'S': [ 1.29 ],
'beta': [ 1.5 ],
'gamma': [ 0.011304 ],
'c': [ 19.0 ],
'd': [ 2.5 ],
'h': [ 1.0 ],
'mu': [ 0.0 ],
'n': [ 1.0/(2*0.80469) ],
'r1': [ 1.70 ],
'r2': [ 2.00 ]
}
Brenner_PRB_42_9458_C_II = {
'__ref__': 'Brenner D., Phys. Rev. B 42, 9458 (1990) [potential II]',
'style': 'Brenner',
'el': [ 'C' ],
'D0': [ 6.0 ],
'r0': [ 1.39 ],
'S': [ 1.22 ],
'beta': [ 2.1 ],
'gamma': [ 0.00020813 ],
'c': [ 330.0 ],
'd': [ 3.5 ],
'h': [ 1.0 ],
'mu': [ 0.0 ],
'n': [ 1.0/(2*0.5) ],
'r1': [ 1.70 ],
'r2': [ 2.00 ]
}
def _a(x):
'''
Compute absolute value (norm) of an array of vectors
'''
return np.linalg.norm(x, axis=1)
def _o(x, y):
"""Outer product"""
return x.reshape(-1, 3, 1) * y.reshape(-1, 1, 3)
def TersoffBrenner(parameters):
"""
Implementation of the function form for Abell-Tersoff-Brenner potentials.
Reference
------------
J. Tersoff, Physical review B 39.8 (1989): 5566.
"""
style = parameters['style'].lower()
el = parameters['el']
nb_elements = len(el)
nb_pairs = nb_elements * (nb_elements + 1) // 2
c = np.array(parameters['c'])
d = np.array(parameters['d'])
h = np.array(parameters['h'])
r1 = np.array(parameters['r1'])
r2 = np.array(parameters['r2'])
if style == 'tersoff':
# These are Tersoff-style parameters. The symbols follow the notation in
# Tersoff J., Phys. Rev. B 39, 5566 (1989)
#
# In particular, pair terms are characterized by A, B, lam, mu and parameters for the three body terms ijk
# depend only on the type of atom i
A = np.array(parameters['A'])
B = np.array(parameters['B'])
lam = np.array(parameters['lambda'])
mu = np.array(parameters['mu'])
beta = np.array(parameters['beta'])
lambda3 = np.array(parameters['lambda3'])
chi = np.array(parameters['chi'])
n = np.array(parameters['n'])
# Consistency check
assert len(A) == nb_pairs
assert len(B) == nb_pairs
assert len(lam) == nb_pairs
assert len(mu) == nb_pairs
assert len(beta) == nb_elements
assert len(lambda3) == nb_pairs
assert len(chi) == nb_pairs
assert len(n) == nb_elements
assert len(c) == nb_elements
assert len(d) == nb_elements
assert len(h) == nb_elements
assert len(r1) == nb_pairs
assert len(r2) == nb_pairs
elif style == 'brenner':
# These are Brenner/Erhart-Albe-style parameters. The symbols follow the notation in
# Brenner D., Phys. Rev. B 42, 9458 (1990) and
# Erhart P., Albe K., Phys. Rev. B 71, 035211 (2005)
#
# In particular, pairs terms are characterized by D0, S, beta, r0, the parameters n, chi are always unity and
# parameters for the three body terms ijk depend on the type of the bond ij
_D0 = np.array(parameters['D0'])
_S = np.array(parameters['S'])
_r0 = np.array(parameters['r0'])
_beta = np.array(parameters['beta'])
_mu = np.array(parameters['mu'])
gamma = np.array(parameters['gamma'])
# Convert to Tersoff parameters
lambda3 = 2 * _mu
lam = _beta * np.sqrt(2 * _S)
mu = _beta * np.sqrt(2 / _S)
A = _D0 / (_S - 1) * np.exp(lam * _r0)
B = _S * _D0 / (_S - 1) * np.exp(mu * _r0)
# Consistency check
assert len(A) == nb_pairs
assert len(B) == nb_pairs
assert len(lam) == nb_pairs
assert len(mu) == nb_pairs
assert len(gamma) == nb_pairs
assert len(lambda3) == nb_pairs
assert len(c) == nb_pairs
assert len(d) == nb_pairs
assert len(h) == nb_pairs
assert len(r1) == nb_pairs
assert len(r2) == nb_pairs
else:
raise ValueError(f'Unknown parameter style {style}')
# Number of elements in parameter set. We will assign a consecutive internal element number.
nb_elements = len(el)
atomic_numbers = [data.atomic_numbers[e] for e in el]
atomic_number_to_internal_type = np.zeros(np.max(atomic_numbers)+1, dtype=int)
atomic_number_to_internal_type[atomic_numbers] = np.arange(len(atomic_numbers))
# Assign internal element number given the atomic number
atom_type = lambda n: atomic_number_to_internal_type[n]
# Combine two internal element number into an index for a pair property
pair_type = lambda i, j: pair_index(i, j, nb_elements)
f = lambda r, p: np.where(
r < r1[p],
np.ones_like(r),
np.where(r > r2[p],
np.zeros_like(r),
(1 + np.cos((np.pi * (r - r1[p]) / (r2[p] - r1[p])))) / 2
)
)
df = lambda r, p: np.where(
r < r1[p],
np.zeros_like(r),
np.where(r > r2[p],
np.zeros_like(r),
-np.pi * np.sin(np.pi * (r - r1[p]) / (r2[p] - r1[p])) / (2 * (r2[p] - r1[p]))
)
)
ddf = lambda r, p: np.where(
r < r1[p],
np.zeros_like(r),
np.where(r > r2[p],
np.zeros_like(r),
-np.pi ** 2 * np.cos(np.pi * (r - r1[p]) / (r2[p] - r1[p])) / (2 * (r2[p] - r1[p]) ** 2)
)
)
fR = lambda r, p: A[p] * np.exp(-lam[p] * r)
dfR = lambda r, p: -lam[p] * fR(r, p)
ddfR = lambda r, p: lam[p] ** 2 * fR(r, p)
fA = lambda r, p: -B[p] * np.exp(-mu[p] * r)
dfA = lambda r, p: -mu[p] * fA(r, p)
ddfA = lambda r, p: mu[p] ** 2 * fA(r, p)
if style == 'tersoff':
b = lambda xi, i, p: \
chi[p] * (1 + (beta[i] * xi) ** n[i]) ** (-1 / (2 * n[i]))
db = lambda xi, i, p: \
chi[p] * np.where(xi == 0.0, 0.0, -0.5 * beta[i] * np.power(beta[i] * xi, n[i] - 1, where=xi != 0.0)
* (1 + (beta[i] * xi) ** n[i]) ** (-1 - 1 / (2 * n[i])))
ddb = lambda xi, i, p: \
chi[p] * np.where(xi == 0.0, 0.0, -0.5 * beta[i] ** 2 * (n[i] - 1)
* np.power(beta[i] * xi, n[i] - 2, where=xi != 0.0)
* np.power(1 + (beta[i] * xi) ** n[i], -1 - 1 / (2 * n[i]))
- 0.5 * beta[i] ** 2 * n[i] * np.power(beta[i] * xi, -2 + 2 * n[i], where=xi != 0.0)
* (-1 - 1 / (2 * n[i])) * np.power(1 + (beta[i] * xi) ** n[i], -2 - 1 / (2 * n[i])))
g = lambda cost, i, p:\
1 + c[i] ** 2 / d[i] ** 2 - c[i] ** 2 / (d[i] ** 2 + (h[i] - cost) ** 2)
dg = lambda cost, i, p:\
-2 * c[i] ** 2 * (h[i] - cost) / (d[i] ** 2 + (h[i] - cost) ** 2) ** 2
ddg = lambda cost, i, p:\
2 * c[i] ** 2 / (d[i] ** 2 + (h[i] - cost) ** 2) ** 2 \
- 8 * c[i] ** 2 * (h[i] - cost) ** 2 / (d[i] ** 2 + (h[i] - cost) ** 2) ** 3
else:
b = lambda xi, i, p: np.power(1 + gamma[p] * xi, -0.5)
db = lambda xi, i, p: -0.5 * gamma[p] * np.power(1 + gamma[p] * xi, -1.5)
ddb = lambda xi, i, p: 0.75 * (gamma[p] ** 2) * np.power(1 + gamma[p] * xi, -2.5)
g = lambda cost, i, p:\
1 + c[p] ** 2 / d[p] ** 2 - c[p] ** 2 / (d[p] ** 2 + (h[p] + cost) ** 2)
dg = lambda cost, i, p:\
2 * c[p] ** 2 * (h[p] + cost) / (d[p] ** 2 + (h[p] + cost) ** 2) ** 2
ddg = lambda cost, i, p:\
2 * c[p] ** 2 / (d[p] ** 2 + (h[p] + cost) ** 2) ** 2\
- 8 * c[p] ** 2 * (h[p] + cost) ** 2 / (d[p] ** 2 + (h[p] + cost) ** 2) ** 3
hf = lambda rij, rik, ij, ik: \
f(_a(rik), ik) * np.exp(lambda3[ik] * (_a(rij) - _a(rik)))
d1h = lambda rij, rik, ij, ik: \
lambda3[ik] * hf(rij, rik, ij, ik)
d2h = lambda rij, rik, ij, ik: \
-lambda3[ik] * hf(rij, rik, ij, ik) + df(_a(rik), ik) * np.exp(lambda3[ik] * (_a(rij) - _a(rik)))
d11h = lambda rij, rik, ij, ik: \
lambda3[ik] ** 2 * hf(rij, rik, ij, ik)
d12h = lambda rij, rik, ij, ik: \
(df(_a(rik), ik) * lambda3[ik] * np.exp(lambda3[ik] * (_a(rij) - _a(rik)))
- lambda3[ik] * hf(rij, rik, ij, ik))
d22h = lambda rij, rik, ij, ik: \
(ddf(_a(rik), ik) * np.exp(lambda3[ik] * (_a(rij) - _a(rik)))
+ 2 * lambda3[ik] * np.exp(lambda3[ik] * (_a(rij) - _a(rik))) * df(_a(rik), ik)
+ lambda3[ik] ** 2 * hf(rij, rik, ij, ik))
# Derivatives of F
F = lambda r, xi, i, p: \
f(r, p) * (fR(r, p) + b(xi, i, p) * fA(r, p))
d1F = lambda r, xi, i, p: \
df(r, p) * (fR(r, p) + b(xi, i, p) * fA(r, p)) \
+ f(r, p) * (dfR(r, p) + b(xi, i, p) * dfA(r, p))
d2F = lambda r, xi, i, p: \
f(r, p) * fA(r, p) * db(xi, i, p)
d11F = lambda r, xi, i, p: \
f(r, p) * (ddfR(r, p) + b(xi, i, p) * ddfA(r, p)) \
+ 2 * df(r, p) * (dfR(r, p) + b(xi, i, p) * dfA(r, p)) + ddf(r, p) * (fR(r, p) + b(xi, i, p) * fA(r, p))
d22F = lambda r, xi, i, p: \
f(r, p) * fA(r, p) * ddb(xi, i, p)
d12F = lambda r, xi, i, p: \
f(r, p) * dfA(r, p) * db(xi, i, p) + fA(r, p) * df(r, p) * db(xi, i, p)
# Helping functions
costh = lambda rij, rik: np.sum(rij * rik, axis=1) / (_a(rij) * _a(rik))
c1 = lambda rij, rik: ((rik.T / _a(rik) - rij.T / _a(rij) * costh(rij, rik)) / _a(rij)).T
c2 = lambda rij, rik: ((rij.T / _a(rij) - rik.T / _a(rik) * costh(rij, rik)) / _a(rik)).T
dc11 = lambda rij, rik: \
((- _o(c1(rij, rik), rij) - _o(rij, c1(rij, rik))
- (costh(rij, rik) * (np.eye(3) - (_o(rij, rij).T / _a(rij) ** 2).T).T).T).T / _a(rij) ** 2).T
dc22 = lambda rij, rik:\
((- _o(c2(rij, rik), rik) - _o(rik, c2(rij, rik))
- (costh(rij, rik) * (np.eye(3) - (_o(rik, rik).T / _a(rik) ** 2).T).T).T).T / _a(rik) ** 2).T
dc12 = lambda rij, rik: \
(((np.eye(3) - (_o(rij, rij).T / _a(rij) ** 2).T).T / _a(rij) - _o(c1(rij, rik), rik).T / _a(rik)) / _a(rik)).T
Dh1 = lambda rij, rik, ij, ik: (d1h(rij, rik, ij, ik) * rij.T / _a(rij)).T
Dh2 = lambda rij, rik, ij, ik: (d2h(rij, rik, ij, ik) * rik.T / _a(rik)).T
Dg1 = lambda rij, rik, i, ij: (dg(costh(rij, rik), i, ij) * c1(rij, rik).T).T
Dg2 = lambda rij, rik, i, ij: (dg(costh(rij, rik), i, ij) * c2(rij, rik).T).T
# Derivatives of G
G = lambda rij, rik, i, ij, ik: g(costh(rij, rik), i, ij) * hf(rij, rik, ij, ik)
d1G = lambda rij, rik, i, ij, ik: (
Dh1(rij, rik, ij, ik).T * g(costh(rij, rik), i, ij) + hf(rij, rik, ij, ik) * Dg1(rij, rik, i, ij).T).T
d2G = lambda rij, rik, i, ij, ik: (
Dh2(rij, rik, ij, ik).T * g(costh(rij, rik), i, ij) + hf(rij, rik, ij, ik) * Dg2(rij, rik, i, ij).T).T
d11G = lambda rij, rik, i, ij, ik: \
_o(Dg1(rij, rik, i, ij), Dh1(rij, rik, ij, ik)) + _o(Dh1(rij, rik, ij, ik), Dg1(rij, rik, i, ij)) \
+ (g(costh(rij, rik), i, ij) * Dh11(rij, rik, ij, ik).T).T + (hf(rij, rik, ij, ik) * Dg11(rij, rik, i, ij).T).T
Dh11 = lambda rij, rik, ij, ik: \
(d11h(rij, rik, ij, ik) * _o(rij, rij).T / _a(rij) ** 2
+ d1h(rij, rik, ij, ik) * ((np.eye(3) - (_o(rij, rij).T / _a(rij) ** 2).T).T / _a(rij))).T
Dg11 = lambda rij, rik, i, ij: \
(ddg(costh(rij, rik), i, ij) * _o(c1(rij, rik), c1(rij, rik)).T
+ dg(costh(rij, rik), i, ij) * dc11(rij, rik).T).T
d22G = lambda rij, rik, i, ij, ik: \
_o(Dg2(rij, rik, i, ij), Dh2(rij, rik, ij, ik)) + _o(Dh2(rij, rik, ij, ik), Dg2(rij, rik, i, ij)) \
+ ((g(costh(rij, rik), i, ij) * Dh22(rij, rik, ij, ik).T).T
+ (hf(rij, rik, ij, ik) * Dg22(rij, rik, i, ij).T).T)
Dh22 = lambda rij, rik, ij, ik: \
(d22h(rij, rik, ij, ik) * _o(rik, rik).T / _a(rik) ** 2
+ d2h(rij, rik, ij, ik) * ((np.eye(3) - (_o(rik, rik).T / _a(rik) ** 2).T).T / _a(rik))).T
Dg22 = lambda rij, rik, i, ij: \
(ddg(costh(rij, rik), i, ij) * _o(c2(rij, rik), c2(rij, rik)).T
+ dg(costh(rij, rik), i, ij) * dc22(rij, rik).T).T
d12G = lambda rij, rik, i, ij, ik: \
_o(Dg1(rij, rik, i, ij), Dh2(rij, rik, ij, ik)) + _o(Dh1(rij, rik, ij, ik), Dg2(rij, rik, i, ij)) \
+ ((g(costh(rij, rik), i, ij) * Dh12(rij, rik, ij, ik).T).T
+ (hf(rij, rik, ij, ik) * Dg12(rij, rik, i, ij).T).T)
Dh12 = lambda rij, rik, ij, ik: \
(d12h(rij, rik, ij, ik) * _o(rij, rik).T / (_a(rij) * _a(rik))).T
Dg12 = lambda rij, rik, i, ij: \
(ddg(costh(rij, rik), i, ij) * _o(c1(rij, rik), c2(rij, rik)).T
+ dg(costh(rij, rik), i, ij) * dc12(rij, rik).T).T
return {
'atom_type': atom_type,
'pair_type': pair_type,
'F': F,
'G': G,
'd1F': d1F,
'd2F': d2F,
'd11F': d11F,
'd12F': d12F,
'd22F': d22F,
'd1G': d1G,
'd2G': d2G,
'd11G': d11G,
'd22G': d22G,
'd12G': d12G,
'cutoff': r2,
}
| 26,429 | 43.345638 | 169 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/explicit_forms/__init__.py | #
# Copyright 2014-2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2018-2021 Jan Griesser (U. Freiburg)
# 2020 Jonas Oldenstaedt (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .kumagai import Kumagai
from .tersoff_brenner import TersoffBrenner
from .stillinger_weber import StillingerWeber
from .harmonic import ZeroPair, ZeroTriplet, HarmonicBond, HarmonicAngle
| 1,181 | 39.758621 | 72 | py |
matscipy | matscipy-master/matscipy/calculators/manybody/explicit_forms/stillinger_weber.py | #
# Copyright 2021 Jan Griesser (U. Freiburg)
# 2021 Lars Pastewka (U. Freiburg)
# 2021 [email protected]
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
#
# Parameter sets
# The '__ref__' dictionary entry is the journal reference
#
Stillinger_Weber_PRB_31_5262_Si = {
'__ref__': 'F. Stillinger and T. Weber, Phys. Rev. B 31, 5262 (1985)',
'el': 'Si' ,
'epsilon': 2.1683 ,
'sigma': 2.0951 ,
'costheta0': 0.333333333333 ,
'A': 7.049556277 ,
'B': 0.6022245584 ,
'p': 4 ,
'a': 1.80 ,
'lambda1': 21.0 ,
'gamma': 1.20
}
Holland_Marder_PRL_80_746_Si = {
'__ref__': 'D. Holland and M. Marder, Phys. Rev. Lett. 80, 746 (1998)',
'el': 'Si' ,
'epsilon': 2.1683 ,
'sigma': 2.0951 ,
'costheta0': 0.333333333333 ,
'A': 7.049556277 ,
'B': 0.6022245584 ,
'p': 4 ,
'a': 1.80 ,
'lambda1': 42.0 ,
'gamma': 1.20
}
RLC_Vink_JNCS_282_746_Si = {
'__ref__': 'RLC Vink et al., J. Non-Cryst. Solids 282 (2001)',
'el': 'Si' ,
'epsilon': 1.64833 ,
'sigma': 2.0951 ,
'costheta0': 0.333333333333 ,
'A': 7.049556277 ,
'B': 0.6022245584 ,
'p': 4 ,
'a': 1.80 ,
'lambda1': 31.5 ,
'gamma': 1.20
}
Russo_PRX_8_021040_Si = {
'__ref__': 'J. Russo et. al. Phys. Rev. X 8, 021040 (2018)',
'el': 'Si' ,
'epsilon': 2.1683 ,
'sigma': 2.0951 ,
'costheta0': 0.333333333333 ,
'A': 7.049556277 ,
'B': 0.6022245584 ,
'p': 4 ,
'a': 1.80 ,
'lambda1': 18.75 ,
'gamma': 1.20
}
def ab(x):
"""
Compute absolute value (norm) of an array of vectors
"""
return np.linalg.norm(x, axis=1)
def StillingerWeber(parameters):
"""
Implementation of the functional form of the Stillinger-Weber potential.
Reference
------------
F. Stillinger and T. Weber, Physical review B 31.8 5262 (1985)
"""
el = parameters['el']
epsilon = parameters['epsilon']
sigma = parameters['sigma']
costheta0 = parameters['costheta0']
A = parameters['A']
B = parameters['B']
p = parameters['p']
a = parameters['a']
lambda1 = parameters['lambda1']
gamma = parameters['gamma']
fR = lambda r: A * epsilon * (B*np.power(sigma/r, p) - 1) * np.exp(sigma/(r-a*sigma))
dfR = lambda r: - A*epsilon*B*p/r * np.power(sigma/r, p) * np.exp(sigma/(r-a*sigma)) - sigma/np.power(r-a*sigma, 2)*fR(r)
ddfR = lambda r: A*B*p*epsilon/r * np.power(sigma/r, p) * np.exp(sigma/(r-a*sigma)) * (sigma/np.power(r-a*sigma, 2) + (p+1)/r) \
+ 2 * sigma / np.power(r-a*sigma, 3) * fR(r) - sigma / np.power(r-a*sigma, 2) * dfR(r)
fA = lambda r: np.exp(gamma*sigma/(r-a*sigma))
dfA = lambda r: - gamma * sigma / np.power(r - a*sigma, 2) * fA(r)
ddfA = lambda r: 2 * gamma * sigma / np.power(r-a*sigma, 3) * fA(r) - gamma * sigma / np.power(r-a*sigma, 2) * dfA(r)
hf = lambda rik: np.where(ab(rik)<a*sigma, np.exp(gamma*sigma/(ab(rik)-a*sigma)), 0)
d2h = lambda rik: -gamma * sigma / np.power(ab(rik)-a*sigma, 2) * hf(rik)
d22h = lambda rik: 2 * gamma * sigma / np.power(ab(rik)-a*sigma, 3) * hf(rik) - gamma * sigma / np.power(ab(rik)-a*sigma, 2) * d2h(rik)
g = lambda cost: np.power(cost + costheta0, 2)
dg = lambda cost: 2 * (cost + costheta0)
ddg = lambda cost: 2*cost**0
def F(r, xi, i, p):
mask = (r < a*sigma)
F_n = np.zeros_like(r)
F_n[mask] = fR(r[mask]) + lambda1 * epsilon * fA(r[mask]) * xi[mask]
return F_n
def d1F(r, xi, i, p):
mask = (r < a*sigma)
d1F_n = np.zeros_like(r)
d1F_n[mask] = dfR(r[mask]) + lambda1 * epsilon * xi[mask] * dfA(r[mask])
return d1F_n
def d2F(r, xi, i, p):
mask = (r < a*sigma)
d2F_n = np.zeros_like(r)
d2F_n[mask] = lambda1 * epsilon * fA(r[mask])
return d2F_n
def d11F(r, xi, i, p):
mask = (r < a*sigma)
d11F_n = np.zeros_like(r)
d11F_n[mask] = ddfR(r[mask]) + lambda1 * epsilon * xi[mask] * ddfA(r[mask])
return d11F_n
def d22F(r, xi, i, p):
return np.zeros_like(r)
def d12F(r, xi, i, p):
mask = (r < a*sigma)
d12F_n = np.zeros_like(r)
d12F_n[mask] = lambda1 * epsilon * dfA(r[mask])
return d12F_n
G = lambda rij, rik, i, ij, ik: hf(rik) * g(costh(rij, rik))
d1G = lambda rij, rik, i, ij, ik: (hf(rik) * Dg1(rij, rik).T).T
d2G = lambda rij, rik, i, ij, ik: (Dh2(rik).T * g(costh(rij, rik)) + hf(rik) * Dg2(rij, rik).T).T
Dh2 = lambda rik: (d2h(rik) * rik.T / ab(rik)).T
Dg1 = lambda rij, rik: (dg(costh(rij, rik)) * c1(rij, rik).T).T
Dg2 = lambda rij, rik: (dg(costh(rij, rik)) * c2(rij, rik).T).T
d11G = lambda rij, rik, i, ij, ik: \
((hf(rik) * Dg11(rij, rik).T).T)
Dg11 = lambda rij, rik: \
(ddg(costh(rij, rik)) * (c1(rij, rik).reshape(-1, 3, 1) * c1(rij, rik).reshape(-1, 1, 3)).T
+ dg(costh(rij, rik)) * dc11(rij, rik).T).T
d22G = lambda rij, rik, i, ij, ik: \
Dg2(rij, rik).reshape(-1, 3, 1) * Dh2(rik).reshape(-1, 1, 3) + Dh2(rik).reshape(-1, 3, 1) * Dg2(rij, rik).reshape(-1, 1, 3) \
+ ((g(costh(rij, rik)) * Dh22(rij, rik).T).T + (hf(rik) * Dg22(rij, rik).T).T)
Dg22 = lambda rij, rik: \
(ddg(costh(rij, rik)) * (c2(rij, rik).reshape(-1, 3, 1) * c2(rij, rik).reshape(-1, 1, 3)).T
+ dg(costh(rij, rik)) * dc22(rij, rik).T).T
Dh22 = lambda rij, rik: \
(d22h(rik) * (((rik.reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/ab(rik)**2).T).T \
+ d2h(rik) * ((np.eye(3) - ((rik.reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/ab(rik)**2).T).T/ab(rik))).T
d12G = lambda rij, rik, i, ij, ik: \
Dg1(rij, rik).reshape(-1, 3, 1) * Dh2(rik).reshape(-1, 1, 3) + ((hf(rik) * Dg12(rij, rik).T).T)
Dg12 = lambda rij, rik: \
(ddg(costh(rij, rik)) * (c1(rij, rik).reshape(-1, 3, 1) * c2(rij, rik).reshape(-1, 1, 3)).T
+ dg(costh(rij, rik)) * dc12(rij, rik).T).T
# Helping functions
c1 = lambda rij, rik: ((rik.T/ab(rik) - rij.T/ab(rij) * costh(rij, rik)) / ab(rij)).T
c2 = lambda rij, rik: ((rij.T/ab(rij) - rik.T/ab(rik) * costh(rij, rik)) / ab(rik)).T
dc11 = lambda rij, rik: \
((- c1(rij, rik).reshape(-1, 3, 1) * rij.reshape(-1, 1, 3) \
- rij.reshape(-1, 3, 1) * c1(rij, rik).reshape(-1, 1, 3) \
- (costh(rij, rik) * (np.eye(3) - ((rij.reshape(-1, 1, 3)*rij.reshape(-1, 3, 1)).T/ab(rij)**2).T).T).T \
).T/ab(rij)**2).T
dc22 = lambda rij, rik: \
((- c2(rij, rik).reshape(-1, 3, 1) * rik.reshape(-1, 1, 3) \
- rik.reshape(-1, 3, 1) * c2(rij, rik).reshape(-1, 1, 3) \
- (costh(rij, rik) * (np.eye(3) - ((rik.reshape(-1, 1, 3)*rik.reshape(-1, 3, 1)).T/ab(rik)**2).T).T).T \
).T/ab(rik)**2).T
dc12 = lambda rij, rik: \
(((np.eye(3) - ((rij.reshape(-1, 1, 3)*rij.reshape(-1, 3, 1)).T/ab(rij)**2).T).T/ab(rij)
- (c1(rij, rik).reshape(-1, 3, 1) * rik.reshape(-1, 1, 3)).T/ab(rik) \
)/ab(rik)).T
costh = lambda rij, rik: np.sum(rij*rik, axis=1) / (ab(rij)*ab(rik))
return {
'atom_type': lambda n: np.zeros_like(n),
'pair_type': lambda i, j: np.zeros_like(i),
'F': F,
'G': G,
'd1F': d1F,
'd2F': d2F,
'd11F': d11F,
'd12F': d12F,
'd22F': d22F,
'd1G': d1G,
'd2G': d2G,
'd11G': d11G,
'd22G': d22G,
'd12G': d12G,
'cutoff': a*sigma,
}
| 8,961 | 37.796537 | 139 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/qm_cluster.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from .qm_cluster_tools.qm_flagging_tool import QMFlaggingTool
from .qm_cluster_tools.qm_clustering_tool import QMClusteringTool
class QMCluster(object):
"""This is a class responsible for managing the QM clusters in the simulation.
It acts as a mediator between
-----------------------------
QM flagginig module
QM clustering module
neighbour list
and contains interface to those classes
Attributes
----------
clustering_module : matscipy.calculators.mcfm.qmClusterModule.QMClusteringTool
module responsible for carving a qm cluster
flagging_module : matscipy.calculators.mcfm.qmClusterModule.QMFlaggingTool
module responsible for flagging atoms
neighbour_list : matscipy.calculators.mcfm.neighbour_list_mcfm.NeighborListBase
object holding the neighbour list
verbose : int
Set verbosity level
"""
def __init__(self, special_atoms_list=[], verbose=0):
"""This is a class responsible for managing the QM clusters in the simulation.
It acts as a mediator between
-----------------------------
QM flagginig module
QM clustering module
neighbour list
and contains interface to those classes
Parameters
----------
special_atoms_list : list of ints (atomic indices)
In case a group of special atoms are specified (special molecule),
If one of these atoms is in the buffer region, the rest are also added to it.
verbose : int
verbosity level to be passed to other objects
"""
self.flagging_module = None
self.clustering_module = None
self.neighbour_list = None
self.special_atoms_list = special_atoms_list
self.verbose = verbose
def attach_neighbour_list(self, neighbour_list):
"""attach a neighbour list"""
self.neighbour_list = neighbour_list
def attach_flagging_module(self, **kwargs):
"""Initialize and attach matscipy.calculators.mcfm.QMFlaggingTool
The function calls the class initializer with given parameters"""
self.flagging_module = QMFlaggingTool(mediator=self, **kwargs)
def attach_clustering_module(self, **kwargs):
"""Initialize and attach matscipy.calculators.mcfm.QMClusteringTool
The function calls the class initializer with given parameters"""
self.clustering_module = QMClusteringTool(mediator=self, **kwargs)
def reset_energized_list(self):
"""Reset old_energized_atoms list in flaggingModule to facilitate
MCFM potential warmup"""
self.flagging_module.old_energized_list = []
def update_qm_region(self, *args, **kwargs):
"""Interface to
self.flagging_module.update_qm_region(self,
atoms,
potential_energies=None,
)"""
return self.flagging_module.update_qm_region(*args, **kwargs)
def carve_cluster(self, *args, **kwargs):
"""Interface to
self.clustering_module.carve_cluster(self,
atoms,
core_qm_list,
buffer_hops=10)"""
return self.clustering_module.carve_cluster(*args, **kwargs)
| 4,297 | 38.796296 | 89 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/__init__.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from .calculator import MultiClusterForceMixingPotential
__all__ = ["MultiClusterForceMixingPotential"]
| 955 | 38.833333 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/cluster_data.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
class ClusterData(object):
"""Class for storing cluster data
Attributes
----------
forces : np.array
Atomic forces
mark : list
Marks assigning atoms as:
1: core QM region
2: buffer region
3: terminal atoms (final atom included in the buffer region)
4: additional terminal atoms
5: Hydrogens used ot terminate cut-off bonds
qm_list : list
list of inner QM atoms
"""
def __init__(self, nAtoms, mark=None, qm_list=None, forces=None):
if len(mark) != nAtoms:
raise ValueError(
"mark length not compatible with atoms length in this ClusterData object")
if np.shape(forces) != (nAtoms, 3):
raise ValueError(
"forces shape not compatible with atoms length in this ClusterData object")
self.forces = forces
self.mark = mark
self.qm_list = qm_list
self.nClusterAtoms = None
def __str__(self):
return str(self.mark)
| 1,913 | 32.578947 | 91 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/calculator.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from ase.calculators.calculator import Calculator
from .mcfm_parallel import mcfm_parallel_control as mcfm_parallel_control
from .cluster_data import ClusterData
import ase.io
class MultiClusterForceMixingPotential(Calculator):
"""Subclass of ASE.Calculator facilitating a support for multiple
QM clusters. It utilizes the given classical_calculator and qm_calculator to initialize
an instace of ForceMixingPotential
Extends:
Calculator
Variables:
implemented_properties {list} -- ["energy", "forces", "potential_energies", "stress"]
all_changes {list} -- ['positions', 'numbers', 'cell', 'pbc']
"""
implemented_properties = ["energy", "forces", "potential_energies", "stress"]
all_changes = ['positions', 'numbers', 'cell', 'pbc']
def __init__(self, atoms=None, classical_calculator=None, qm_calculator=None,
qm_cluster=None, forced_qm_list=None, change_bonds=True,
calculate_errors=False, calculation_always_required=False,
buffer_hops=10, verbose=0, enable_check_state=True):
"""Initialize a generic ASE potential without any calculating power,
This is only to have access to the necessary functions, all the
evaluations will be performes in self.mm_pot and self.qm_calculator
Parameters
----------
atoms : ASE.atoms
atoms object
classical_calculator : AE.calculator
classical calculator
qm_calculator : ASE.calculator
qm calculator
qm_cluster : matscipy.calculators.mcfm.qm_cluster
flagging/cluster carving utility
forced_qm_list : list
add this list to enforce a set of atoms for qm treatment
change_bonds : bool
call the classical potential to update topology
calculate_errors : bool
evaluate errors after each step
calculation_always_required : bool
as name
buffer_hops : int
number of neighbours hop used to construct the core QM region
verbose : int
For now verbose levels are:
0 - nothing is printed
1 - More data is added to Atoms object
10 - Calculate steps are listed
100 - Information about specific QM clusters
(the default is 0)
enable_check_state : bool
Save the atoms after each evaluation to enable meth::check_state
"""
# Set the verbose status
self.verbose = verbose
self.debug_cluster_carving = False
# Set storing atoms - slows down evaluation but enables check_state funtion
self.enable_check_state = enable_check_state
# Flag for warmup
self.warmup = False
# Init ASE calculator as a parent class
self._calc_args = {}
self._default_properties = []
self.calculation_always_required = calculation_always_required
Calculator.__init__(self)
# If an atoms objct has been specified, attach a copy to the calculator to facilitate
# the proper use of meth:check_state()
if atoms is not None:
self.atoms = atoms.copy()
atoms.set_calculator(self)
# Set some flags and values
self.errors = {}
self.calculate_errors = calculate_errors
self.change_bonds = change_bonds
self.buffer_hops = buffer_hops
self.conserve_momentum = False
self.long_range_weight = 0.0
self.doParallel = True
# Flag for QM debugging
self.debug_qm_calculator = False
# Set the cluster carving object
self.qm_cluster = qm_cluster
if forced_qm_list is None:
self.forced_qm_list = None
else:
self.forced_qm_list = [forced_qm_list]
self.cluster_list = []
self.cluster_data_list = None
# Set qm and mm calculators
self.classical_calculator = classical_calculator
self.qm_calculator = qm_calculator
# Set up writing clusters
self.clusterDebug_cluster_atoms = None
if (self.verbose >= 100):
self.debug_cluster_carving = True
self.clusterDebug_cluster_atoms = open("clusterDebug_cluster_atoms.xyz", "w")
self.clusterDebug_full_structure = None
if (self.verbose >= 100):
self.debug_cluster_carving = True
self.clusterDebug_full_structure = open("clusterDebug_full_structure.xyz", "w")
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
"""Calculate selected properties of the given Atoms object.
Initially, a classical potential is called to evaluate potential
energies for each atom and afterwards a qm_cluster object is employed
to analyze them. If no atom is flagged for QM treatment, classical forces
are returned. In case some atoms are flagged for QM treatment
each Qm cluster is independently send to a qmmm potential to evaluate
more accurate forces. The results of qmmm evaluations are used to modify
the classical forces and the final array is given
results are
-----------
energy = potential energy from classical evaluation
potential energies = pot energy per atom from classical evaluation
forces = classical or qmmm forces depending on whether any atoms are flagged
Parameters
----------
atoms : ASE.atoms
atoms object
properties : list
properties ot evaluate
system_changes : TYPE
changes in the system
Raises
------
AttributeError
Must provide an atoms object
"""
Calculator.calculate(self, atoms=atoms, properties=properties, system_changes=system_changes)
# Check for atoms and if present wrap them to their cell
if atoms is None:
raise AttributeError("No atoms object provided")
else:
pass
self.print_message("\nCalculation with MCFM potential!", limit=10)
self.print_message("Calculating parameters using classical potential", limit=10)
forces, potential_energy, potential_energies =\
self.produce_classical_results(atoms=atoms)
self.results["classical_forces"] = forces.copy()
self.print_message(
"Update the qm_cluster based on potential energies obtained from mm_pot calculation", limit=10)
# If in warmup mode, do not use the clusters
self.produce_qm_clusters(atoms,
potential_energies=potential_energies)
if (self.warmup):
self.print_message("Warmup mode: not using any clusters", limit=10)
self.cluster_list = []
self.qm_cluster.reset_energized_list()
self.print_message("Cluster list", limit=100)
self.print_message(self.cluster_list, limit=100)
# Perform parallel QM calculations on each cluster
cluster_data_list = np.empty(len(self.cluster_list), dtype=object)
if (len(cluster_data_list) > 0):
if (self.doParallel):
mcfm_parallel_control.get_cluster_data(atoms=atoms,
clusterData=cluster_data_list,
mcfm_pot=self)
else:
for i, cluster in enumerate(self.cluster_list):
# Evaluate qm cluster
cluster_data_list[i] = self.evaluate_qm_cluster_serial(
atoms=atoms, cluster=cluster, clusterNumber=i)
# ------ Attach the cluster data list
self.cluster_data_list = cluster_data_list
# Stitch the forces using data from cluster_data_list
forces = self.combine_qm_mm_forces(atoms=atoms,
forces=forces,
cluster_data_list=cluster_data_list)
# Create the full QM list and QM mask
full_qm_atoms_list = [item for sublist in self.cluster_list for item in sublist]
full_qm_atoms_mask = np.zeros(len(atoms), dtype=bool)
full_qm_atoms_mask[full_qm_atoms_list] = True
# If the potential can update topology, do it.
do_bond_change = (not (len(full_qm_atoms_list) == 0)) and\
(self.change_bonds is True) and\
hasattr(self.classical_calculator, "update_topology")
if do_bond_change:
self.print_message("Updating Topology!", limit=10)
self.classical_calculator.update_topology(full_qm_atoms_list)
# Mark atoms that are treated quantm mmechanically for more comprehensive ooutput
self.attach_hybrid_data(
atoms=atoms, full_qm_atoms_mask=full_qm_atoms_mask, cluster_data=cluster_data_list)
# Compute stress
if "stress" in properties:
self.results["stress"] = self.compute_stress(atoms, forces)
# Attach the updated version of atoms so that check-state would work properly.
if self.enable_check_state:
self.atoms = atoms.copy()
self.results["forces"] = forces
self.results["potential_energy"] = potential_energy
self.results["energy"] = potential_energy
self.results["potential_energies"] = potential_energies
if (self.calculate_errors):
self.evaluate_errors(atoms=atoms)
def produce_classical_results(self, atoms=None):
"""Call the classical potential ot obtain forces, potential energy
and potential energies per atom
Parameters
----------
atoms : ASE.atoms
atoms object
Returns
-------
forces : np.array
Atomic forces
potential_energy : np.array
Potential energy of the system
potential_energies : np.array
Per atom potential energies
"""
# Get forces
forces = self.classical_calculator.get_forces(atoms)
# Get potential energies
if "potential_energies" in self.classical_calculator.results:
potential_energies = self.classical_calculator.results["potential_energies"]
else:
potential_energies = self.classical_calculator.get_potential_energies(atoms)
# Get total potential energy
# (summing over individual contributions isusually faster then a calculation)
if "energy" in self.classical_calculator.results:
potential_energy = self.classical_calculator.results["energy"]
elif "potential_energy" in self.classical_calculator.results:
potential_energy = self.classical_calculator.results["potential_energy"]
else:
potential_energy = potential_energies.sum()
return forces, potential_energy, potential_energies
def produce_qm_clusters(self, atoms,
potential_energies=None):
"""Update qm clusters based on potential energies per atom
Parameters
----------
atoms : ASE.atoms
atoms object
potential_energies : np.array
Per atom potential energies
"""
if self.forced_qm_list is None:
# Use the newly calculated values to find the quantum mechanical regions
self.cluster_list = self.qm_cluster.update_qm_region(atoms,
potential_energies=potential_energies
)
else:
if len(self.forced_qm_list) == 0:
self.cluster_list = []
else:
self.cluster_list = self.forced_qm_list
# Safeguard against empty clusters
self.cluster_list = [item for item in self.cluster_list if len(item) > 0]
def evaluate_qm_cluster_serial(self, atoms=None, cluster=None, clusterNumber=0):
"""Evaluate forces for a single QM cluster given the buffer hops
Parameters
----------
atoms : ASE.atoms
atoms object
cluster : list
list of core qm atoms
clusterNumber : int
cluster number
Returns
-------
Cluster : cluster_data
object with forces
qm_atoms mark
core qm list
"""
self.print_message("Evaluating cluster", limit=100)
self.print_message(cluster, limit=100)
# Create and evaluate hybrid cluster
self.print_message("Creating cluster", limit=10)
atomic_cluster = self.qm_cluster.carve_cluster(atoms, cluster, buffer_hops=self.buffer_hops)
self.print_message("Size of the atomic cluster: " + str(len(atomic_cluster)), limit=10)
# Debug cluster carving by printing the structures and clusters
if (self.debug_cluster_carving):
self.print_message("Writing cluster to file", limit=10)
extension = "_" + str(clusterNumber + 1) + ".xyz"
ase.io.write("cluster" + extension, atomic_cluster, format="xyz")
ase.io.write("cluster_ext" + extension, atomic_cluster, format="extxyz")
ase.io.write("structure.xyz", atoms, format="xyz")
ase.io.write("structure_ext.xyz", atoms, format="extxyz", write_results=False)
if (self.clusterDebug_cluster_atoms is not None):
ase.io.write(self.clusterDebug_cluster_atoms, atomic_cluster,
format="extxyz", write_results=False)
if (self.clusterDebug_full_structure is not None):
ase.io.write(self.clusterDebug_full_structure, atoms,
format="extxyz", write_results=False)
self.print_message("Evaluating", limit=100)
qm_forces_array = self.qm_calculator.get_forces(atomic_cluster)
self.print_message("qmmm pot cluster, " + str(len(atomic_cluster)) + " atoms long", limit=100)
self.print_message(atomic_cluster.arrays["orig_index"], limit=100)
# Create a cluster data object with relevan values
mark = np.zeros(len(atoms), dtype=int)
full_qm_forces = np.zeros((len(atoms), 3))
for i in range(atomic_cluster.info["no_quantum_atoms"]):
orig_index = atomic_cluster.arrays["orig_index"][i]
full_qm_forces[orig_index, :] = qm_forces_array[i, :]
mark[orig_index] = atomic_cluster.arrays["cluster_mark"][i]
cluster_data = ClusterData(len(atoms), mark, cluster, full_qm_forces)
# Try to add additional details to the cluster data
qm_charges = np.zeros(len(atoms))
if (self.debug_qm_calculator):
try:
# print('eigenvalues cluster:')
# self.qm_calculator.print_eigenvalues(scope=15, offset=0)
qm_charges -= 10
atomic_cluster.arrays["qm_charges"] = self.qm_calculator.results["charges"].copy()
atomic_cluster.arrays["qm_forces"] = self.qm_calculator.results["forces"].copy()
ase.io.write("structure_ext.xyz", atoms, format="extxyz", write_results=False)
ase.io.write("cluster_ext" + extension, atomic_cluster,
format="extxyz", write_results=False)
for i in range(atomic_cluster.info["no_quantum_atoms"]):
orig_index = atomic_cluster.arrays["orig_index"][i]
qm_charges[orig_index] = self.qm_calculator.results["charges"][i]
except KeyError:
pass
cluster_data.qm_charges = qm_charges
cluster_data.nClusterAtoms = len(atomic_cluster)
return cluster_data
def combine_qm_mm_forces(self, atoms=None, forces=None, cluster_data_list=None):
"""This combines QM and MM forces
Parameters
----------
atoms : ASE.atoms
atoms object
forces : np.array
atomic forces
cluster_data_list : list of matscipy.calculators.mcfm.ClusterData
information about the clusters
Returns
-------
forces : np.array
atomic forces
"""
if (self.verbose >= 1):
atoms.arrays["classical_forces"] = forces.copy()
self.raw_qm_cluster_forces = np.zeros_like(forces)
# If any clusters present, combine QM/MM forces, otherwise just pass forces along
if ((self.long_range_weight > 0.0) and (len(cluster_data_list) > 0)):
self.print_message(
"Splitting forces into bonding/longRange and combining with QM if needed.", limit=10)
# Obtain bonding forces
forcesLR = self.classical_calculator.get_pairwise_forces(atoms)
# Calculate long range forces
# forcesLR = forces - forcesB
# Replace bonding forces with QM forces for QM atoms only
for cluster_data in cluster_data_list:
for aI in cluster_data.qm_list:
# Combine short ranged forces with a fraction of the long range ones
# The long range forces are there for stability, should not affect the dynamics much.
forces[aI, :] = cluster_data.forces[aI, :] + forcesLR[aI, :] * self.long_range_weight
self.raw_qm_cluster_forces[aI, :] = cluster_data.forces[aI, :]
# Add long range forces to the output
if (self.verbose >= 1):
atoms.arrays["Long_range_forces"] = forcesLR * self.long_range_weight
elif (len(cluster_data_list) > 0):
self.print_message("Combining QM and MM forces.", limit=10)
for cluster_data in cluster_data_list:
for aI in cluster_data.qm_list:
forces[aI, :] = cluster_data.forces[aI, :]
self.raw_qm_cluster_forces[aI, :] = cluster_data.forces[aI, :]
if (self.verbose >= 1):
atoms.arrays["raw_qm_forces"] = self.raw_qm_cluster_forces
if (self.conserve_momentum) and ((len(cluster_data_list) > 0)):
avg_force = forces.mean(axis=0)
forces -= avg_force
return forces
def attach_hybrid_data(self, atoms=None, full_qm_atoms_mask=None, cluster_data=None):
"""Mark atoms that are treated quantm mmechanically
for more comprehensive ooutput
Parameters
----------
atoms : ASE.atoms
atoms object
full_qm_atoms_mask : list
list of all qm atoms
cluster_data_list : list of matscipy.calculators.mcfm.ClusterData
information about the clusters
"""
# Store infrmation on individual clusters in atoms file
atoms.arrays["hybrid_clusters"] = np.zeros(len(atoms))
index = 0
for cluster in self.cluster_list:
# Safeguard against empty, nested lists
if len(cluster) == 0:
continue
atoms.arrays["hybrid_clusters"][cluster] = index + 1
atoms.arrays["cluster_marks_" + str(index + 1)] = cluster_data[index].mark.copy()
# Add information about qm caharges
if (self.verbose >= 1):
atoms.arrays["qm_charges_clus_" + str(index + 1)] = cluster_data[index].qm_charges.copy()
index += 1
def evaluate_errors(self, atoms=None, heavy_only=False, r_force=None):
"""Use the forces and reference forces to get errors on hybrid atom
force evaluations
Parameters
----------
atoms : ASE.atoms
atoms object
heavy_only : bool
Do not evaluate errors on hydrogens
r_force : np.array
array with reference forces
"""
# Create the full QM list and QM mask
full_qm_atoms_list = [item for sublist in self.cluster_list for item in sublist]
full_qm_atoms_mask = np.zeros(len(atoms), dtype=bool)
full_qm_atoms_mask[full_qm_atoms_list] = True
forces = self.raw_qm_cluster_forces
if (r_force is None):
if len(full_qm_atoms_list) > 0:
r_force = self.qm_calculator.get_forces(atoms)
else:
r_force = forces
atoms.arrays["reference_qm_force"] = r_force.copy()
atoms.arrays["simulation_force"] = self.results["forces"].copy()
atoms.arrays["qmmm_raw_force"] = self.raw_qm_cluster_forces.copy()
try:
if (len(self.qm_calculator.results["charges"]) == len(atoms) and (self.verbose >= 1)):
atoms.arrays["reference_qm_charges"] = self.qm_calculator.results["charges"].copy()
except KeyError:
pass
# Calculate errors for the QM regions
# Only evaluate errors on heavy atoms if flag is set
if (heavy_only is True):
for i in range(len(full_qm_atoms_mask)):
if (atoms.numbers[i] == 1):
full_qm_atoms_mask[i] = False
if (full_qm_atoms_mask.sum() > 0):
f_errorFull = r_force - forces
f_error = np.linalg.norm(abs(f_errorFull), ord=2, axis=1)
f_errFull = np.zeros((len(atoms), 3))
f_errFull[full_qm_atoms_mask] = f_errorFull[full_qm_atoms_mask]
f_err = np.zeros(len(atoms))
f_err[full_qm_atoms_mask] = f_error[full_qm_atoms_mask]
# Calculate if the errors are dumping or extrcting energy from the system
# According to E = Force * velocity * timestep
# Taking timestep as constant = 1
try:
energyChange = np.einsum("ij, ij -> i", f_errFull, atoms.arrays["momenta"])
totalEChange = np.sum(energyChange)
totalEnergyVector = np.einsum("ij, ij -> i", forces[full_qm_atoms_mask],
atoms.arrays["momenta"][full_qm_atoms_mask])
totalEnergy = np.sum(totalEnergyVector)
totalEChange /= totalEnergy
except KeyError:
totalEChange = 0
# Get the relative force error
cumulative_forces = np.linalg.norm(forces, ord=2, axis=1)
cumulative_forces = np.mean(cumulative_forces[full_qm_atoms_mask])
relative_error = np.divide(f_err, cumulative_forces)
max_relative_error = relative_error[full_qm_atoms_mask].max()
rms_relative_error = np.sqrt(np.mean(np.square(relative_error[full_qm_atoms_mask])))
max_absolute_error = f_err[full_qm_atoms_mask].max()
rms_absolute_error = np.sqrt(np.mean(np.square(f_err[full_qm_atoms_mask])))
# Provide max and RMS relative error
# print "\tRMS of absolute errors: %0.5f, MAX absolute error: %0.5f" %
# (rms_absolute_error, max_absolute_error)
else:
f_err = np.zeros(len(atoms))
f_errFull = np.zeros((len(atoms), 3))
relative_error = np.zeros(len(atoms))
max_absolute_error = 0
rms_absolute_error = 0
max_relative_error = 0
rms_relative_error = 0
totalEChange = 0
try:
self.errors["Cumulative fError vector"] += f_errFull
self.errors["Cumulative energy change"] += totalEChange
except KeyError:
self.errors["Cumulative fError vector"] = f_errFull
self.errors["Cumulative energy change"] = totalEChange
self.errors["vector force error"] = f_errFull
self.errors["Cumulative fError vector length"] =\
np.linalg.norm(abs(self.errors["Cumulative fError vector"]), ord=2, axis=1)
self.errors["energy Change"] = totalEChange
self.errors["absolute force error"] = f_err
self.errors["relative force error"] = relative_error
self.errors["max absolute error"] = max_absolute_error
self.errors["rms absolute error"] = rms_absolute_error
self.errors["max relative error"] = max_relative_error
self.errors["rms relative error"] = rms_relative_error
self.errors["rms force"] = np.sqrt(np.mean(np.square(forces)))
self.errors["no of QM atoms"] = full_qm_atoms_mask.sum()
# Add the relative error th the atoms object for visualization
atoms.arrays["relative_Ferror"] = self.errors["relative force error"].copy()
atoms.arrays["absolute_Ferror"] = self.errors["absolute force error"].copy()
# #Calculate errors of the full system
# f_err = np.linalg.norm(abs(r_force - forces), ord = 2, axis = 1)
# relative_error = np.divide(f_err,
# np.linalg.norm(forces, ord = 2, axis = 1))
# max_relative_error = relative_error.max()
# rms_relative_error = np.sqrt(np.mean(np.square(relative_error)))
# max_absolute_error = f_err.max()
# rms_absolute_error = np.sqrt(np.mean(np.square(f_err)))
# self.errors_full = {}
# self.errors_full["absolute force error"] = f_err
# self.errors_full["relative force error"] = relative_error
# self.errors_full["max absolute error"] = max_absolute_error
# self.errors_full["rms absolute error"] = rms_absolute_error
# self.errors_full["max relative error"] = max_relative_error
# self.errors_full["rms relative error"] = rms_relative_error
# self.errors_full["rms force"] = np.sqrt(np.mean(np.square(forces)))
def set_qm_atoms(self, qm_list, atoms=None):
"""Force a certian set of clusters for qmmm evaluation,
If forced_qm_list is assigned, the cluster list is not updated
throughout the run
Parameters
----------
qm_list : list
list of atoms
atoms : ASE.atoms
atoms object
"""
if qm_list is None:
self.forced_qm_list = None
else:
self.forced_qm_list = [qm_list]
def compute_stress(self, atoms, forces):
"""Compute total stresses using viral theorem.
WARNING: only works for non-PBC structures
the formula for stress evaluation is
------------------------------------
Sij = sum_k (m_k v_ik v_jk)/ volume + sum_k (r_ik f_jk)/volume
m: mass
v: velocity
r: position
f: force
where i,j are taken from {x, y, z}
and sum_k represents a sum over all atoms
Parameters
----------
atoms : ASE.atoms
atoms object
forces : np.array
atomic forces
Returns
-------
stress : np.array
stress tensof in matrix notation
"""
stress_mat = np.zeros((3, 3))
stress = np.zeros(6)
vol = atoms.get_volume()
velo = atoms.get_velocities()
mom = atoms.get_momenta()
pos = atoms.get_positions()
f = forces
for i in range(3):
for j in range(3):
stress_mat[i, j] = - np.dot(pos[:, i], f[:, j]) / vol
stress[0] = stress_mat[0, 0]
stress[1] = stress_mat[1, 1]
stress[2] = stress_mat[2, 2]
stress[3] = stress_mat[1, 2]
stress[4] = stress_mat[0, 2]
stress[5] = stress_mat[0, 1]
return stress
def print_message(self, message, limit=100):
"""Print a message if the calculators verbosity level is above the
given threshold
For now verbose levels are
--------------------------
0 - nothing is printed
1 - Message when meth::calculate is called
10 - Calculate steps are listed
100 - Information about specific QM clusters
(the default is 0)
Parameters
----------
message : str
The message to be printed
limit : int
the verbosity threshold for this mesage
"""
if (self.verbose >= limit):
print(message)
| 29,044 | 39.509066 | 107 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/mcfm_parallel/mcfm_parallel_control.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import os
import time
import multiprocessing as mp
from . import mcfm_parallel_worker as mpw
def get_cluster_data(atoms=None,
clusterData=None,
mcfm_pot=None):
"""Obtain a list of cluster data with calculations being done in parallel
Parameters
----------
atoms : ase.Atoms
atoms object representing the structure
clusterData : list
List of empty objects to be filled with clusterData instances
mcfm_pot : matscipy.calculators.mcfm.MultiClusterForceMixing
qmmm potential
"""
# number of porcessors
try:
nProc = int(os.environ["OMP_NUM_THREADS"])
except KeyError:
nProc = mp.cpu_count() / 2
# number of threads - number of clusters
numThreads = len(mcfm_pot.cluster_list)
# In case there are not enough cpu's,
# have the number of processes artificially increased
if (numThreads > nProc):
nProc = numThreads
# Create atomic clusters and evaluate their sizes
atomicClustersList = []
for cluster in mcfm_pot.cluster_list:
atomicCluster = mcfm_pot.qm_cluster.carve_cluster(atoms,
cluster,
buffer_hops=mcfm_pot.buffer_hops)
atomicClustersList.append(atomicCluster)
# ------ Evaluate work balancing
valenceElectrons = [np.sum(np.abs(item.numbers - 2)) for item in atomicClustersList]
fractionWorkloadPerCluster = [(item ** 2) for item in valenceElectrons]
totalWorkload = sum(fractionWorkloadPerCluster)
fractionWorkloadPerCluster = [item / totalWorkload for item in fractionWorkloadPerCluster]
nProcPerCluster = [1 for item in atomicClustersList]
leftoverProcs = nProc - sum(nProcPerCluster)
# Distribute leftoverProcs
for i in range(numThreads):
nProcPerCluster[i] += int(fractionWorkloadPerCluster[i] * leftoverProcs)
# Disribute leftover procs (if any)
leftoverProcs = nProc - sum(nProcPerCluster)
running = True
while running:
for i in np.argsort(fractionWorkloadPerCluster)[::-1]:
if (leftoverProcs <= 0):
running = False
break
nProcPerCluster[i] += 1
leftoverProcs -= 1
if (mcfm_pot.debug_qm_calculator):
print(fractionWorkloadPerCluster, nProcPerCluster, ":parallelTime")
# Set up the Manager
mpManager = mp.Manager()
sharedList = mpManager.list(list(range(numThreads)))
# Setup a list of processes that we want to run
processes = []
for rank in range(numThreads):
p = mp.Process(target=mpw.worker_populate_cluster_data,
name=None,
args=(rank, numThreads),
kwargs=dict(nProcLocal=nProcPerCluster[rank],
atomic_cluster=atomicClustersList[rank],
clusterIndexes=mcfm_pot.cluster_list[rank],
nAtoms=len(atoms),
qmCalculator=mcfm_pot.qm_calculator,
sharedList=sharedList,
debug_qm_calculator=mcfm_pot.debug_qm_calculator))
processes.append(p)
# Run processes
for p in processes:
p.start()
# Each QM calculation takes between 1 and 100s so the wait shouldnt affect the performance,
# It helps prevent any I/O clashes when setting up the simulations
time.sleep(1e-3)
# Exit the completed processes
for p in processes:
p.join()
# Extract results
for index in range(len(clusterData)):
clusterData[index] = sharedList[index]
| 4,636 | 36.096 | 99 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/mcfm_parallel/__init__.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = []
| 863 | 38.272727 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/mcfm_parallel/mcfm_parallel_worker.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import random
import timeit
import os
import ase.io
from ..cluster_data import ClusterData
random.seed(123)
def worker_populate_cluster_data(rank, size,
nProcLocal=None,
atomic_cluster=None,
clusterIndexes=None,
nAtoms=None,
qmCalculator=None,
sharedList=None,
debug_qm_calculator=False):
"""Function to calcuate total energy with TB
Parameters
----------
rank : int
process number
size : int
total number of processes
nProcLocal : int
number of CPUS to be used for this calculation
atomic_cluster : ASE.atoms
Stucture on which ot perform the evaluation
clusterIndexes : np.array
list with indexes of different cluster atoms
nAtoms : int
number of atoms in the cluster
qmCalculator : ASE.calculator
calculator to be used for the evaluation
sharedList : list
mp shared list used ot store output data
debug_qm_calculator : bool
run the simulation in debug mode
"""
# ------ MultiProcessing library pickes all objects and
# ------ each workr thread recieves a copy
# If a caluclator has the options, set parallel parameters
try:
# Create a new calculation seed
qmCalculator.calculationSeed = str(int(random.random() * 1e7)) + str(rank)
# Set OMP values for the potential
qmCalculator.omp_set_threads = True
qmCalculator.omp_num_threads = nProcLocal
except AttributeError:
pass
# Create a cluster data object with relevan values
mark = np.zeros(nAtoms, dtype=int)
full_qm_forces = np.zeros((nAtoms, 3))
if (debug_qm_calculator):
ase.io.write("cluster_ext_" + str(rank) + ".xyz", atomic_cluster, format="extxyz")
# ------ Run the calculation
if (debug_qm_calculator):
print("Starting evaluation of cluster %d :parallelTime" %
(rank))
t0 = timeit.default_timer()
qm_forces_array = qmCalculator.get_forces(atomic_cluster)
t1 = timeit.default_timer()
if (debug_qm_calculator):
print("Time taken for cluster %d: %.7e w %d atoms :parallelTime" %
(rank, (t1 - t0), len(atomic_cluster)))
for i in range(atomic_cluster.info["no_quantum_atoms"]):
orig_index = atomic_cluster.arrays["orig_index"][i]
full_qm_forces[orig_index, :] = qm_forces_array[i, :]
mark[orig_index] = atomic_cluster.arrays["cluster_mark"][i]
cluster_data = ClusterData(nAtoms, mark, clusterIndexes, full_qm_forces)
# Try to add additional details to the cluster data
cluster_data.nClusterAtoms = len(atomic_cluster)
qm_charges = np.zeros(nAtoms) - 10
try:
for i in range(atomic_cluster.info["no_quantum_atoms"]):
orig_index = atomic_cluster.arrays["orig_index"][i]
qm_charges[orig_index] = qmCalculator.results["charges"][i]
except KeyError:
pass
cluster_data.qm_charges = qm_charges
sharedList[rank] = cluster_data
if (debug_qm_calculator):
try:
atomic_cluster.arrays["qm_charges"] = qmCalculator.results["charges"].copy()
atomic_cluster.arrays["qm_forces"] = qmCalculator.results["forces"].copy()
except KeyError:
pass
ase.io.write("cluster_ext_" + str(rank) + ".xyz", atomic_cluster, format="extxyz")
| 4,424 | 35.570248 | 90 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/neighbour_list_mcfm/neighbour_list_base.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
class NeighbourListBase(object):
"""Interface for the neighbour list.
mcfm module can use any neighbour list object as long
as it provides the implementation of the two routines below.
"""
def update(self, atoms):
"""Make sure the list is up to date. If clled for the first
time, build the list
Parameters
----------
atoms : ase.Atoms
atoms to initialize the list from
Returns
-------
bool
True of the update was sucesfull
"""
raise NotImplementedError("Must implement this function!")
def get_neighbours(self, a):
"""Return neighbors of atom number a.
A list of indices to neighboring atoms is
returned.
Parameters
----------
a : int
atomic index
Returns
-------
np.array
array of neighbouring indices
"""
raise NotImplementedError("Must implement this function!")
| 1,886 | 28.030769 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/neighbour_list_mcfm/__init__.py | 0 | 0 | 0 | py |
|
matscipy | matscipy-master/matscipy/calculators/mcfm/neighbour_list_mcfm/neighbour_list_mcfm.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 [email protected]
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from math import sqrt
from .neighbour_list_base import NeighbourListBase
from ....neighbours import neighbour_list as mspy_nl
class NeighbourListMCFM(NeighbourListBase):
"""Neighbor list object. Wrapper aroud matscipy.neighbour_list
atoms: ase.Atoms
Atomic configuration.
cutoffs: float or dict
Cutoff for neighbour search. If single float is given, a global cutoff
is used for all elements. A dictionary specifies cutoff for element
pairs. Specification accepts element numbers of symbols.
Example: {(1, 6): 1.1, (1, 1): 1.0, ('C', 'C'): 1.85}
skin: float
If no atom has moved more than the skin-distance since the
last call to the ``update()`` method, then the neighbor list
can be reused. This will save some expensive rebuilds of
the list, but extra neighbors outside the cutoff will be
returned.
hysteretic_break_factor: float
If atoms are connected, the link will break only of they move apart
further than cutoff * hysteretic_break_factor
"""
def __init__(self, atoms, cutoffs, skin=0.3, hysteretic_break_factor=1):
self.cutoffs = cutoffs.copy()
self.cutoffs_hysteretic = cutoffs.copy()
if hysteretic_break_factor > 1:
self.do_hysteretic = True
for key in self.cutoffs_hysteretic:
self.cutoffs_hysteretic[key] *= hysteretic_break_factor
else:
self.do_hysteretic = False
self.skin = skin
self.nupdates = 0
# Additional data
self.neighbours = [np.zeros(0) for idx in range(len(atoms))]
self.old_neighbours = [[] for idx in range(len(atoms))]
def update(self, atoms):
"""Make sure the list is up to date. If clled for the first
time, build the list
Parameters
----------
atoms : ase.Atoms
atoms to initialize the list from
Returns
-------
bool
True of the update was sucesfull
"""
if self.nupdates == 0:
self.do_update(atoms)
return True
elif ((self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any() or
((self.positions - atoms.get_positions())**2).sum(1).max() >
self.skin**2):
self.do_update(atoms)
return True
return False
def do_update(self, atoms):
"""Build the neighbour list based on pairwise distances.
Parameters
----------
atoms : ase.Atoms
atoms to initialize the list from
Raises
------
ValueError
Must specify cutoff radii for all atoms
"""
self.positions = atoms.get_positions()
self.pbc = atoms.get_pbc()
self.cell = atoms.get_cell()
shorti, shortj = mspy_nl(str("ij"), atoms, self.cutoffs)
new_neighbours = [[] for idx in range(len(atoms))]
for idx in range(len(shorti)):
new_neighbours[shorti[idx]].append(shortj[idx])
if self.do_hysteretic:
longi, longj = mspy_nl(str("ij"), atoms, self.cutoffs_hysteretic)
for idx in range(len(longi)):
# Split for profiling
previously_connected = longj[idx] in self.old_neighbours[longi[idx]]
not_added = longj[idx] not in new_neighbours[longi[idx]]
if previously_connected and not_added:
new_neighbours[longi[idx]].append(longj[idx])
self.old_neighbours = new_neighbours
for idx in range(len(new_neighbours)):
self.neighbours[idx] = np.asarray(list(new_neighbours[idx]))
self.nupdates += 1
def get_neighbours(self, a):
"""Return neighbors of atom number a.
A list of indices to neighboring atoms is
returned.
Parameters
----------
a : int
atomic index
Returns
-------
np.array
array of neighbouring indices
Raises
------
RuntimeError
Must update the list at least once!
"""
if self.nupdates == 0:
raise RuntimeError("Must update the list at least once!")
return self.neighbours[a]
| 5,247 | 31.196319 | 84 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/qm_cluster_tools/qm_clustering_tool.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from .base_qm_cluster_tool import BaseQMClusterTool
from ase import Atom
class QMClusteringTool(BaseQMClusterTool):
"""This class is responsible for carving and hydrogenating
a qm cluster"""
def __init__(self, mediator=None, double_bonded_atoms_list=[]):
"""This class is responsible for carving and hydrogenating
a qm cluster
Parameters
----------
mediator : matscipy.calculators.mcfm.QMCluster
class responsible for managing the QM clusters in the simulation
double_bonded_atoms_list : list
list of doubly bonded atoms, needed for double hydrogenation.
"""
# Initialize the QMClusterObject with a mediator
super(QMClusteringTool, self).__init__(mediator)
self.double_bonded_atoms_list = double_bonded_atoms_list
def create_buffer_region(self, atoms, qm_atoms_list, buffer_hops=10):
"""GIven a list of active QM atoms, returns a list containing buffer atoms indices
Parameters
----------
atoms : ase.Atoms
whole structure
qm_atoms_list : list of ints (atomic indexes)
List of atoms in the inner QM region
buffer_hops : int
Expand the cluster by this many neighbour hops to create the buffer
Returns
-------
list of ints (atomic indexes)
buffer_list: List of atoms in the buffer region
list of ints (atomic indexes)
terminal_atoms: List of atoms from the buffer region that are on the verge of it
list of ints (atomic indexes)
cutoff_atoms_list: List of atoms that are not in the buffer but are bonded to the
atoms in the buffer.
"""
innerQM_region_set = set(qm_atoms_list)
edge_neighbours = set(qm_atoms_list)
terminal_atoms = []
if len(qm_atoms_list) == len(atoms):
return [], [], []
for i in range(buffer_hops):
new_neighbours = set()
# For each atom in edge neighbours list, expand the list
for index in edge_neighbours:
new_neighbours |= set(self.find_neighbours(atoms, index)[0])
# Remove atoms already in the qm list
edge_neighbours = new_neighbours - innerQM_region_set
# If the cluster is still growing, update the list of atoms at the edge
if len(edge_neighbours) > 0:
terminal_atoms = edge_neighbours
# Make a union of the sets
innerQM_region_set |= edge_neighbours
# GO throught the loop one more time to find the last atoms not in the cluster
new_neighbours = set()
# For each atom in edge neighbours list, expand the list
for index in edge_neighbours:
new_neighbours |= set(self.find_neighbours(atoms, index)[0])
# Remove atoms already in the qm list
cutoff_atoms_set = new_neighbours - innerQM_region_set
cutoff_atoms_list = list(cutoff_atoms_set)
# Create buffer list
innerQM_region_set -= set(qm_atoms_list)
buffer_list = list(innerQM_region_set)
terminal_atoms = list(terminal_atoms)
return buffer_list, terminal_atoms, cutoff_atoms_list
def carve_cluster(self, atoms, core_qm_list, buffer_hops=10):
"""Create a cluster with the list as core atoms, returns an ase.Atoms object
Parameters
----------
atoms : ase.Atoms
whole structure
core_qm_list : list of ints (atomic indexes)
Indexes of atoms in the core QM region
buffer_hops : int
Expand the cluster by this many neighbour hops to create the buffer
Returns
-------
ase.Atoms
atoms object representing the QM cluster including
the inner region and the buffer
"""
# Define some lists
total_supplementary_terminal_atoms = []
# Buffer atoms - the buffer region
# terminal_atoms_list - last atoms in the buffer region, bonded to the atoms not in the buffer
# cutoff_atoms_list - atoms to be changed into hydrogen (cut-off from the buffer)
# total_supplementary_terminal_atoms - atoms added to the buffer to make clusters more physical
self.print_message("Creating buffer list", 1)
buffer_list, terminal_atoms_list, cutoff_atoms_list =\
self.create_buffer_region(atoms, core_qm_list, buffer_hops=buffer_hops)
# If a spcial atoms was to be cut-off, add all of them to the cluster
specialFlag = True
while (specialFlag):
specialFlag = False
for specialMolecule in self.mediator.special_atoms_list:
# Only operate on the special molecule if a part of it is inside the buffer
if not any([specialAtom in buffer_list for specialAtom in specialMolecule]):
continue
for specialAtomIndex in specialMolecule:
if (specialAtomIndex in cutoff_atoms_list):
self.include_special_atom(specialAtomIndex,
atoms,
buffer_list,
terminal_atoms_list,
cutoff_atoms_list)
# If at least one atom was added, cotinue to the next loop
specialFlag = True
# Complete aromatic rings, repeat the process untill no more atoms are added
iterMax = 10
for i in range(iterMax):
completeFlag = self.complete_aromatic_rings(
atoms, buffer_list, terminal_atoms_list, cutoff_atoms_list,
total_supplementary_terminal_atoms)
if (not completeFlag):
break
# Create joint list. Buffer list is the original buffer while the
# supplementary list is composed of new additions
self.print_message("Creating joint listt", 10)
if len(buffer_list) > 0:
total_list = core_qm_list + buffer_list + total_supplementary_terminal_atoms
else:
total_list = core_qm_list
# Add missing hydrogens (all routines operate only on heavy atoms)
total_set = set(total_list)
self.hydrogenate_cluster(atoms, total_set)
total_list = list(total_set)
self.print_message("finished adding atoms", 10)
self.print_message("Buffer complete, creating cluster from mark", 1)
atomic_cluster = self.create_cluster_from_marks(atoms, total_list)
atomic_cluster.info["no_quantum_atoms"] = len(atomic_cluster)
# Add properties for core region and buffer
atomic_cluster.arrays["cluster_mark"] = np.zeros(len(atomic_cluster), dtype=int)
atomic_cluster.arrays["cluster_mark"] += 5
for i in range(len(atomic_cluster)):
if atomic_cluster.arrays["orig_index"][i] in total_supplementary_terminal_atoms:
atomic_cluster.arrays["cluster_mark"][i] = 4
elif atomic_cluster.arrays["orig_index"][i] in terminal_atoms_list:
atomic_cluster.arrays["cluster_mark"][i] = 3
elif atomic_cluster.arrays["orig_index"][i] in buffer_list:
atomic_cluster.arrays["cluster_mark"][i] = 2
elif atomic_cluster.arrays["orig_index"][i] in core_qm_list:
atomic_cluster.arrays["cluster_mark"][i] = 1
# Change the cut-off atoms into hydrogens
self.print_message("Change cutoff atoms into hydrogens", 1)
if (len(cutoff_atoms_list) > 0) and(len(terminal_atoms_list) > 0):
terminal_atoms_list = list(set(terminal_atoms_list))
self.hydrogenate_dangling_bonds(terminal_atoms_list, cutoff_atoms_list, atomic_cluster, atoms)
self.print_message("Center the atomic_cluster and remove PBC's", 1)
# Center the cluster and remove PBC's
# atomic_cluster.positions += np.array([0, 0, 20])
atomic_cluster.wrap()
atomic_cluster.center(vacuum=30)
atomic_cluster.pbc = np.array([0, 0, 0], dtype=bool)
self.print_message("Fished!", 1)
return atomic_cluster
def hydrogenate_dangling_bonds(self, terminal_atoms_list, cutoff_atoms_list, atomic_cluster, atoms):
"""Change atoms that were cut-off into hydrogens
Parameters
----------
terminal_atoms_list : list of ints (atomic indexes)
last atoms in the buffer region, bonded to the atoms not in the buffer
cutoff_atoms_list : list of ints (atomic indexes)
atoms to be changed into hydrogen, first atoms not in the buffer
atomic_cluster : ase.Atoms
QM region structure (with core and buffer atoms)
atoms : ase.Atoms
whole structure
"""
pos = atoms.get_positions()
# Change cutoff list into a numpy array
cutoff_atoms_list = np.asarray(cutoff_atoms_list)
for tAI in terminal_atoms_list:
# Check if any of the cut off atoms are neighbours of the terminal atom
cutoff_neighs = [item for item in self.mediator.neighbour_list.get_neighbours(
tAI) if (item in cutoff_atoms_list)]
# Iterate over all cut-off atoms that are neighburs of tAI to
# Effectively loop over all cut bonds
for cAI in cutoff_neighs:
if ((cAI in self.double_bonded_atoms_list) and (tAI in self.double_bonded_atoms_list)):
self.replace_double_bond(tAI, cAI, atomic_cluster, atoms, pos)
else:
self.replace_single_bond(tAI, cAI, atomic_cluster, atoms, pos)
def replace_single_bond(self,
terminal_atom_index,
cutoff_atom_index,
atomic_cluster,
atoms,
atomic_positions):
"""Replace a cut-off atom with a single hydrogen
Parameters
----------
terminal_atoms_list : list of ints (atomic indexes)
last atoms in the buffer region, bonded to the atoms not in the buffer
cutoff_atoms_list : list of ints (atomic indexes)
atoms to be changed into hydrogen, first atoms not in the buffer
atomic_cluster : ase.Atoms
QM region structure (with core and buffer atoms)
atoms : ase.Atoms
whole structure
atomic_positions : np.array
Positions of atoms in the whole structure (copy of the atoms.positions)
"""
vector = atomic_positions[cutoff_atom_index] - atomic_positions[terminal_atom_index]
# Make the bond approximately 1 angstrom
vector /= np.linalg.norm(vector)
vector *= 1
# Add a hydrogen instead of the cutoff atom
pos = atomic_positions[terminal_atom_index] + vector
cutoff_hydro = Atom(symbol=1, position=pos, charge=0.1)
atomic_cluster.append(cutoff_hydro)
atomic_cluster.arrays["orig_index"][len(atomic_cluster) - 1] = len(atoms) + 1
atomic_cluster.arrays["cluster_mark"][len(atomic_cluster) - 1] = 6
def replace_double_bond(self,
terminal_atom_index,
cutoff_atom_index,
atomic_cluster,
atoms,
atomic_positions):
"""Replace a cut-off atom with two hydrogens
Parameters
----------
terminal_atoms_list : list of ints (atomic indexes)
last atoms in the buffer region, bonded to the atoms not in the buffer
cutoff_atoms_list : list of ints (atomic indexes)
atoms to be changed into hydrogen, first atoms not in the buffer
atomic_cluster : ase.Atoms
QM region structure (with core and buffer atoms)
atoms : ase.Atoms
whole structure
atomic_positions : np.array
Positions of atoms in the whole structure (copy of the atoms.positions)
"""
# Find a vector to from the terminal atom to the cutoff atom
vector = atomic_positions[cutoff_atom_index] - atomic_positions[terminal_atom_index]
# ------ Find the displacement between two hydrogens
# Find two closest neighbours of the cut-off atom
neighbours = np.asarray(self.find_neighbours(atoms, terminal_atom_index)[0])
dispVectors = atomic_positions[neighbours] - atomic_positions[terminal_atom_index]
distances = np.sum(np.square(dispVectors), axis=1)
closeNeighbours = np.argsort(distances)
closeNeighbours = neighbours[closeNeighbours][: 2]
# Find the vectors to those two atoms
a1 = atomic_positions[terminal_atom_index] - atomic_positions[closeNeighbours[0]]
a2 = atomic_positions[terminal_atom_index] - atomic_positions[closeNeighbours[1]]
# Find the cross product of a1 and a2 thus finding a vector perpendicular to
# the plane they define
aPerp = np.cross(a1, a2)
aPerp /= np.linalg.norm(aPerp)
aPerp *= 2
# Create two vectors, the initial displacement +/- the perpendicular vector
vector1 = vector + aPerp
vector2 = vector - aPerp
# Make the bonds approximately 1 angstrom
vector1 /= np.linalg.norm(vector1)
vector2 /= np.linalg.norm(vector2)
# Add a hydrogen instead of the cutoff atom
pos = atomic_positions[terminal_atom_index] + vector1
cutoff_hydro = Atom(symbol=1, position=pos, charge=0.1)
atomic_cluster.append(cutoff_hydro)
atomic_cluster.arrays["orig_index"][len(atomic_cluster) - 1] = len(atoms) + 1
atomic_cluster.arrays["cluster_mark"][len(atomic_cluster) - 1] = 6
pos = atomic_positions[terminal_atom_index] + vector2
cutoff_hydro = Atom(symbol=1, position=pos, charge=0.1)
atomic_cluster.append(cutoff_hydro)
atomic_cluster.arrays["orig_index"][len(atomic_cluster) - 1] = len(atoms) + 1
atomic_cluster.arrays["cluster_mark"][len(atomic_cluster) - 1] = 6
def include_special_atom(self,
specialAtomIndex,
atoms,
buffer_list,
terminal_atoms_list,
cutoff_atoms_list):
"""Add a special atom to the buffer and update indexes.
In case a group of special atoms are specified (special molecule),
If one of these atoms is in the buffer regios, the rest are also added to it.
Parameters
----------
specialAtomIndex : int (atomic index)
Add a specified atoms to the buffer
atoms : ase.Atoms
whole structure
buffer_list : list of ints (atomic indexes)
List of atoms in the buffer region
terminal_atoms_list : list of ints (atomic indexes)
last atoms in the buffer region, bonded to the atoms not in the buffer
cutoff_atoms_list : list of ints (atomic indexes)
atoms to be changed into hydrogen, first atoms not in the buffer
"""
buffer_list.append(specialAtomIndex)
terminal_atoms_list.append(specialAtomIndex)
cutoff_atoms_list.remove(specialAtomIndex)
# ------ Add new cutoff atoms
specialAtomNeighbours = self.find_neighbours(atoms, specialAtomIndex)[0]
for neighIndex in specialAtomNeighbours:
if (neighIndex not in buffer_list) and (neighIndex not in cutoff_atoms_list):
cutoff_atoms_list.append(neighIndex)
def complete_aromatic_rings(self, atoms, buffer_list,
terminal_atoms_list, cutoff_atoms_list,
total_supplementary_terminal_atoms):
"""Check if a terminal atom is not connected ot two atoms at once
If it is, add it. This deals with aromatic ring structures
Parameters
----------
atoms : ase.Atoms
whole structure
buffer_list : list of ints (atomic indexes)
List of atoms in the buffer region
terminal_atoms_list : list of ints (atomic indexes)
last atoms in the buffer region, bonded to the atoms not in the buffer
cutoff_atoms_list : list of ints (atomic indexes)
atoms to be changed into hydrogen, first atoms not in the buffer
total_supplementary_terminal_atoms : NONE
atoms added to the buffer to make clusters more physical.
Example: last atom in the aromnatic ring
Returns
-------
bool
Return True if any atoms were added
"""
supplementary_terminal_atoms = []
# Buffer atoms - the buffer region
# terminal_atoms_list - last atoms in the buffer region, bonded to the atoms not in the buffer
# cutoff_atoms_list - atoms to be changed into hydrogen (cut-off from the buffer)
# supplementary_terminal_atoms - atoms added to the buffer to make clusters more physical
self.print_message("Completing destroyed rings", 1)
for index, cI in enumerate(cutoff_atoms_list):
msg = "Working on atom {0} with number {1}".format(cI, atoms.numbers[cI])
self.print_message(msg, 100)
# Check if a cutoff atom has more than 1 neighbour in terminal atoms list
neighs = self.find_neighbours(atoms, cI)[0]
cutoff_atom_neighs = [item for item in neighs if item in (terminal_atoms_list + buffer_list)]
# If two or more, add it
if (len(cutoff_atom_neighs) >= 2):
supplementary_terminal_atoms.append(cI)
self.print_message("Adding {0} to supplementary index".format(cI), 10)
self.print_message("Finished adding atoms.", 10)
# Return False if no atoms were added
if (len(supplementary_terminal_atoms) == 0):
return False
# Keep track of all the additions to the supplementary atoms
total_supplementary_terminal_atoms += supplementary_terminal_atoms
terminal_atoms_list += total_supplementary_terminal_atoms
if (self.verbose >= 10):
print("Added", len(supplementary_terminal_atoms), "Atoms!")
if (self.verbose >= 100):
print("Added list:", supplementary_terminal_atoms)
# Find new cutoff atoms
if (self.verbose >= 10):
print("Finding new cutoff atoms")
cutoff_atoms_list[:] = []
outer_qm_list = buffer_list + total_supplementary_terminal_atoms
for eqI in terminal_atoms_list:
new_cutoff_atoms = [item for item in self.find_neighbours(atoms, eqI)[0] if
(item not in outer_qm_list)]
cutoff_atoms_list += new_cutoff_atoms
# Get rid of duplicates
cutoff_atoms_list[:] = list(set(cutoff_atoms_list))
terminal_atoms_list[:] = list(set(terminal_atoms_list))
# Return True if any atoms were added:
return True
def create_cluster_from_marks(self, atoms, select_list):
"""Return an ase.Atoms object containing selected atoms from
a larger structure
Parameters
----------
atoms : ase.Atoms
whole structure
select_list : list of ints (atomic indexes)
List of atoms to include in the new structure
Returns
-------
ase.Atoms
Structure composed of selected atoms
"""
if (len(select_list) > len(atoms)):
select_list = np.unique(select_list)
pbc = atoms.get_pbc()
cell = atoms.get_cell()
cluster = atoms.__class__(cell=cell, pbc=pbc)
cluster.arrays = {}
# for name, a in atoms.arrays.items():
# cluster.arrays[name] = a[select_list].copy()
for name in ["numbers", "positions"]:
cluster.arrays[name] = atoms.arrays[name][select_list].copy()
cluster.arrays["orig_index"] = np.asarray(select_list, dtype=int)
return cluster
def print_message(self, message, limit=100):
"""Print a message if the calculators verbosity level is above the
given threshold
Parameters
----------
message : str
The message to be printed
limit : int
the verbosity threshold for this mesage
"""
if (self.verbose >= limit):
print(message)
| 21,592 | 42.0998 | 106 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/qm_cluster_tools/qm_flagging_tool.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from .base_qm_cluster_tool import BaseQMClusterTool
class QMFlaggingTool(BaseQMClusterTool):
"""This class is responsible for flagging atoms
that move out of their equilibrium"""
def __init__(self, mediator=None, qm_flag_potential_energies=None,
small_cluster_hops=3, only_heavy=False, ema_parameter=0.1, energy_cap=None,
energy_increase=1):
"""This class is responsible for flagging atoms
that move out of their equilibrium
Parameters
----------
mediator : matscipy.calculators.mcfm.QMCluster
class responsible for managing the QM clusters in the simulation
qm_flag_potential_energies : np.array
threshholds for flagging indivual atoms.
The diensions are (nAtoms, 2) where:
column 1: threshold to enter the QM regios
column 2: threshold to stay the QM region
small_cluster_hops : int
Each flagged atom and atoms around it within small_cluster_hops neighbour hops
will generate a single cluster, clusters are later joined.
only_heavy : bool
If True, only consider non-hydrogen atoms in cluster expansion.
Hydrogens are added later
ema_parameter : float
parameter lambda in the exponential mean average calculation
energy_cap : float
if not None, cap potential energy per atom at this value
energy_increase : int
Multiplier for potential energy per atom, used to scale it for convininece
"""
# Initialize the QMClusterObject with a mediator
super(QMFlaggingTool, self).__init__(mediator)
try:
self.qm_flag_potential_energies = qm_flag_potential_energies
except AttributeError:
raise AttributeError("QM flag PE/force tolerance must be defined")
self.small_cluster_hops = small_cluster_hops
self.only_heavy = only_heavy
self.ema_parameter = ema_parameter
self.energy_cap = energy_cap
self.energy_increase = energy_increase
self.qm_atoms_list = []
self.old_energized_list = []
self.verbose = 0
def get_energized_list(self, atoms, data_array, property_str, hysteretic_tolerance):
"""Produce a list of atoms that are ot be flagged as a QM region
based on the properties given in the array according to the
tolerance given.
Parameters
----------
atoms : ase.Atoms
Whole structure
data_array : array
an array of per atom data providing information
property_str : str
name of th property so that it can be stored in atoms.properties.
hysteretic_tolerance : array
Threshholds for flagging indivual atoms.
The diensions are (nAtoms, 2) where:
column 1: threshold to enter the QM regios
column 2: threshold to stay the QM region
Returns
-------
list
List of flagged atoms
"""
# ------ Update EPA
update_avg_property_per_atom(atoms, data_array, property_str, self.ema_parameter)
avg_property_per_atom = atoms.arrays[property_str]
tolerance = np.zeros(len(atoms)) + hysteretic_tolerance[:, 0]
tolerance[self.old_energized_list] = hysteretic_tolerance[self.old_energized_list, 1]
energized_mask = np.greater_equal(avg_property_per_atom, tolerance)
energized_list = np.arange(len(atoms))[energized_mask]
return energized_list
def create_cluster_around_atom(self, atoms, atom_id, hydrogenate=False):
"""Carve a cluster around the atom with atom_id
This function operates on sets and returns a set
Parameters
----------
atoms : ase.Atoms
Whole structure
atom_id : int
Atomic index
hydrogenate : bool
If true, hydrogenate the resulting structure
Returns
-------
list
atoms in the new cluster
"""
cluster_set = set([atom_id])
edge_neighbours = set([atom_id])
for i in range(self.small_cluster_hops):
new_neighbours = set()
# For each atom in edge neighbours list, expand the list
for index in edge_neighbours:
new_neighbours |= set(self.find_neighbours(atoms, index)[0])
# Remove atoms already in the qm list
edge_neighbours = new_neighbours - cluster_set
# Make a union of the sets
cluster_set = cluster_set | edge_neighbours
# ----- If specified, add hydrogens ot the cluster
if hydrogenate:
self.hydrogenate_cluster(atoms, cluster_set)
return cluster_set
def join_clusters(self, verbose=False):
"""This function will join the clusters if they overlap
Input is an array of sets each representing individual
small cluster
Parameters
----------
verbose : bool
Print messages during calculation
"""
i = 0
# Iterate over the whole list C taking into account that it might get
# throughout the loop
while (i < len(self.qm_atoms_list)):
# Iterate over the sets taking into account that C can change
# Do not repeat pairise disjointment checks
# i.e. for a list of sets [A, B, C, D]
# first loop included checks A-B, A-C, A-D (pairs 0 - 1:3)
# Then make sure the second only does B-C, B-D (pairs 1 - 2:3)
for j in range(i + 1, len(self.qm_atoms_list)):
if verbose is True:
print(i, j, self.qm_atoms_list[i], self.qm_atoms_list[j],
not set.isdisjoint(self.qm_atoms_list[i], self.qm_atoms_list[j]))
if not set.isdisjoint(self.qm_atoms_list[i], self.qm_atoms_list[j]):
# If intersection detected, unify sets
self.qm_atoms_list[i] |= self.qm_atoms_list[j]
# Then delete the second set to avoid duplicates
# Then restart the j loop to see if now, any set
# has an intersection with the new union
del self.qm_atoms_list[j]
i -= 1
if verbose is True:
for entry in self.qm_atoms_list:
print(entry)
break
i += 1
def expand_cluster(self, special_atoms_list):
"""Include extra atoms in the cluster.
If one of the special atoms is included in one of the clusters,
add all other special atoms to this cluster
Parameters
----------
special_atoms_list : list
list of the special atoms
"""
for specialMolecule in special_atoms_list:
specialMoleculeSet = set(specialMolecule)
for clusterIndex in range(len(self.qm_atoms_list)):
if (not specialMoleculeSet.isdisjoint(self.qm_atoms_list[clusterIndex])):
self.qm_atoms_list[clusterIndex] |= specialMoleculeSet
def update_qm_region(self, atoms,
potential_energies=None,
):
"""Update the QM region while the simulation is running
Parameters
----------
atoms : ase.Atoms
whole structure
potential_energies : array
Potential energy per atom
Returns
-------
list of lists of ints
list of individual clusters as lists of atoms
"""
# Make sure the right atoms object is in
# ------ Increase the energy by a common factor - makes it more readable in some cases
if (self.energy_increase is not None):
potential_energies *= self.energy_increase
# ------ Cap maximum energy according to the flag
if (self.energy_cap is not None):
np.minimum(potential_energies, self.energy_cap, potential_energies)
# ------ Get the energized atoms list
flagged_atoms_dict = {}
flagged_atoms_dict["potential_energies"] = self.get_energized_list(atoms,
potential_energies,
"avg_potential_energies",
self.qm_flag_potential_energies)
energized_set = set()
for key in flagged_atoms_dict:
energized_set = set(flagged_atoms_dict[key]) | energized_set
energized_list = list(energized_set)
self.old_energized_list = list(energized_list)
if (len(energized_list) != 0):
self.mediator.neighbour_list.update(atoms)
# TODO if energized list include the whole system just pass it along
for array_i, atom_i in enumerate(energized_list):
energized_list[array_i] = self.create_cluster_around_atom(atoms, atom_i, hydrogenate=False)
self.qm_atoms_list = energized_list
if (len(self.qm_atoms_list) > 0):
self.join_clusters()
self.expand_cluster(self.mediator.special_atoms_list)
self.join_clusters()
if self.only_heavy is False:
for index in range(len(self.qm_atoms_list)):
self.qm_atoms_list[index] = self.hydrogenate_cluster(atoms, self.qm_atoms_list[index])
self.qm_atoms_list = list(map(list, self.qm_atoms_list))
return self.qm_atoms_list
# print "QM cluster", self.qm_atoms_list
def exponential_moving_average(oldset, newset=None, ema_parameter=0.1):
"""Apply the exponential moving average to the given array
Parameters
----------
oldset : array
old values
newset : array
new data set
ema_parameter : float
parameter lambda
"""
if newset is None:
pass
else:
oldset *= (1 - ema_parameter)
oldset += ema_parameter * newset
def update_avg_property_per_atom(atoms, data_array, property_str, ema_parameter):
"""Update the per atom property using running avarages
and store it in atoms.properties[property_str]
Parameters
----------
atoms : ase.Atoms
structure that need updated values
data_array : array
data that need to be attached to atoms
property_str : str
key for structure properties dictionary
ema_parameter : float
Coefficient for the Exponential Moving Average
"""
# Abbreviations
# ppa - (property per atom
# appa - average property per atom
ppa = data_array
# ------ Get average ppa
if (property_str in atoms.arrays):
exponential_moving_average(atoms.arrays[property_str],
ppa, ema_parameter)
else:
atoms.arrays[property_str] = ppa.copy()
| 11,970 | 36.409375 | 107 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/qm_cluster_tools/__init__.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = []
| 864 | 36.608696 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/mcfm/qm_cluster_tools/base_qm_cluster_tool.py | #
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
class BaseQMClusterTool(object):
"""Class that hold all the functions common to
qm clustering objects"""
def __init__(self, mediator):
"""Class that hold all the functions common to
qm clustering objects
Parameters
----------
mediator : matscipy.calculators.mcfm.QMCluster
class responsible for managing the QM clusters in the simulation
"""
self.mediator = mediator
self.verbose = mediator.verbose
def find_neighbours(self, atoms, index):
"""Find the neighbours of atom i using self.neighbour_list
returns a list of [heavy_neighbours, hydrogen_neighbours]
Parameters
----------
atoms : ase.Atoms object
structure in which it is necessary to find the neighbours
index : int
atomic index
Returns
-------
list
non-hydrogen neighbours
list
hydrogen neighbours
"""
neighbours = self.mediator.neighbour_list.get_neighbours(index)
heavy_n = []
hydro_n = []
for arr_i, atom_i in enumerate(neighbours):
if (atoms.numbers[atom_i] == 1):
hydro_n.append(atom_i)
else:
heavy_n.append(atom_i)
return [heavy_n, hydro_n]
def hydrogenate_cluster(self, atoms, cluster):
"""Add neigoburing hydrogens to a cluster composed of heavy ions
The input should be a set representing heavy ions in a cluster
This functions operates on sets
Parameters
----------
atoms : ase.Atoms object
structure in which it is necessary to find the neighbours
cluster :ase.Atoms object
sub-structure of the larger struct that needs its dangling
bonds hydrogenated
Returns
-------
ase.Atoms
The original cluster but now hydrogenated
"""
for atom_id in cluster.copy():
# find_neighbours returns a list where
# [0] - heavy neighbours
# [1] - hydrogen neighbours
cluster |= set(self.find_neighbours(atoms, atom_id)[1])
return cluster
| 3,112 | 31.092784 | 76 | py |
matscipy | matscipy-master/matscipy/calculators/eam/__init__.py | #
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2020 Wolfram G. Nöhring (U. Freiburg)
# 2015 Adrien Gola (KIT)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Implements the Embedded Atom Method"""
from .calculator import EAM
from .io import mix_eam, read_eam, write_eam
from .average_atom import average_potential
| 1,095 | 38.142857 | 71 | py |
matscipy | matscipy-master/matscipy/calculators/eam/io.py | #
# Copyright 2015, 2021 Lars Pastewka (U. Freiburg)
# 2019-2020 Wolfram G. Nöhring (U. Freiburg)
# 2015 Adrien Gola (KIT)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Read and write tabulated EAM potentials"""
from collections import namedtuple
import numpy as np
try:
from scipy import interpolate
except:
print('Warning: No scipy')
interpolate = False
import os
from ase.units import Hartree, Bohr
###
"""Conversion factor from Hartree to Electronvolt. \
Use 1 Hartree = 27.2 eV to be consistent with Lammps EAM."""
_hartree_in_electronvolt = 27.2
"""Conversion factor from Bohr radii to Angstrom. \
Use 1 Bohr radius = 0.529 A to be consistent with Lammps EAM."""
_bohr_in_angstrom = 0.529
"""Conversion factor for charge function of EAM potentials \
of kind 'eam' (DYNAMO funcfl format)."""
_hartree_bohr_in_electronvolt_angstrom = _hartree_in_electronvolt * _bohr_in_angstrom
# Todo: replace by data class (requires Python > 3.7)
class EAMParameters(
namedtuple(
"EAMParameters",
"symbols atomic_numbers "
"atomic_masses lattice_constants crystal_structures "
"number_of_density_grid_points "
"number_of_distance_grid_points "
"density_grid_spacing distance_grid_spacing "
"cutoff",
)
):
"""Embedded Atom Method potential parameters
:param array_like symbols: Symbols of the elements coverered by
this potential (only for eam/alloy and
eam/fs, EMPTY for eam
:param array_like atomic_numbers: Atomic numbers of the elements
covered by this potential
:param array_like atomic_masses: Atomic masses of the elements
covered by this potential
:param array_like lattice_constants: Lattice constant of a pure crystal
with crystal structure as specified in crystal_structures
:param array_like crystal_structures: Crystal structure of the pure metal.
:param int number_of_density_grid_points: Number of grid points
of the embedding energy functional
:param int number_of_distance_grid_points: Number of grid points of
the electron density function and the pair potential
:param float density_grid_spacing: Grid spacing in electron density space
:param float distance_grid_spacing: Grid spacing in pair distance space
:param float cutoff: Cutoff distance of the potential
"""
__slots__ = ()
###
def _strip_comments_from_line(string, marker="#"):
"""Strip comments from lines but retain newlines
Parameters
----------
string : str
string which may contain comments
marker : str
marker which indicates the start of a comment
Returns
-------
stripped_string : str
string without commments; if the string terminated
with a newline, then the newline is retained
"""
start = string.find(marker)
if start != -1:
stripped_string = string[:start]
if string.endswith("\n"):
stripped_string += "\n"
else:
stripped_string = string
return stripped_string
def read_eam(eam_file, kind="eam/alloy"):
"""Read a tabulated EAM potential
There are differnt flavors of EAM, with different storage
formats. This function supports a subset of the formats supported
by Lammps (http://lammps.sandia.gov/doc/pair_eam.html),
* eam (DYNAMO funcfl format)
* eam/alloy (DYNAMO setfl format)
* eam/fs (DYNAMO setfl format)
Parameters
----------
eam_file : string
eam alloy file name
kind : {'eam', 'eam/alloy', 'eam/fs'}
kind of EAM file to read
Returns
-------
source : string
Source informations or comment line for the file header
parameters : EAMParameters
EAM potential parameters
F : array_like
contain the tabulated values of the embedded functions
shape = (nb elements, nb of data points)
f : array_like
contain the tabulated values of the density functions
shape = (nb elements, nb of data points)
rep : array_like
contain the tabulated values of pair potential
shape = (nb elements,nb elements, nb of data points)
"""
supported_kinds = ["eam", "eam/alloy", "eam/fs"]
if kind not in supported_kinds:
raise ValueError(f"EAM kind {kind} not supported")
with open(eam_file, 'r') as file:
eam = file.readlines()
if kind == "eam":
with open(eam_file, 'r') as file:
# ignore comment characters on first line but strip them from subsequent lines
lines = [file.readline()]
lines.extend(_strip_comments_from_line(line) for line in file.readlines())
# reading first comment line as source for eam potential data
source = lines[0].strip()
words = lines[1].strip().split()
if len(words) != 4:
raise ValueError(
"expected four values on second line of EAM setfl file: "
"atomic number, mass, lattice constant, lattice"
)
# Turn atomic numbers and masses, lattice parameter, and crystal
# structures into arrays to be consistent with the other EAM styles
atomic_numbers = np.array((int(words[0]), ), dtype=int)
atomic_masses = np.array((float(words[1]), ), dtype=float)
lattice_parameters = np.array((float(words[2]),), dtype=float)
crystal_structures = np.empty(1).astype(str)
crystal_structures[0] = words[3]
words = lines[2].strip().split()
if len(words) != 5:
raise ValueError(
"expected five values on third line of EAM setfl file: "
"Nrho, drho, Nr, dr, cutoff"
)
Nrho = int(words[0]) # Nrho (number of values for the embedding function F(rho))
drho = float(words[1]) # spacing in density space
Nr = int(words[2]) # Nr (number of values for the effective charge function Z(r) and density function rho(r))
dr = float(words[3]) # spacing in distance space
cutoff = float(words[4])
parameters = EAMParameters(
np.zeros(1), atomic_numbers, atomic_masses, lattice_parameters,
crystal_structures, Nrho, Nr, drho, dr, cutoff
)
# Strip empty lines
remaining_lines = [line for line in lines[3:] if len(line.strip()) > 0]
remaining_words = []
for line in remaining_lines:
words = line.split()
remaining_words.extend(words)
expected_length = Nrho + 2 * Nr
true_length = len(remaining_words)
if true_length != expected_length:
raise ValueError(f"expected {expected_length} tabulated values, but there are {true_length}")
data = np.array(remaining_words, dtype=float)
F = data[0:Nrho]
# 'eam' (DYNAMO funcfl) tables contain the charge function :math:`Z`,
# and not the pair potential :math:`\phi`. :math:`Z` needs to be
# converted into :math:`\phi` first, which involves unit conversion.
# To be consistent with the other eam styles (and avoid complications
# later), we convert into :math:`r*\phi`, where :math:`r` is the pair distance, i.e.
# r = np.arange(0, rep.size) * dr
charge = data[Nrho:Nrho+Nr]
rep = charge**2
rep *= _hartree_bohr_in_electronvolt_angstrom
f = data[Nrho+Nr:2*Nr+Nrho]
# Reshape in order to be consistent with other EAM styles
return source, parameters, F.reshape(1, Nrho), f.reshape(1, Nr), rep.reshape(1, 1, Nr)
if kind in ["eam/alloy", "eam/fs"]:
"""eam/alloy and eam/fs have almost the same structure, except for the electron density section"""
with open(eam_file, 'r') as file:
# ignore comment characters on first line but strip them from subsequent lines
lines = [file.readline() for _ in range(3)]
lines.extend(_strip_comments_from_line(line) for line in file.readlines())
# reading 3 first comment lines as source for eam potential data
source = "".join(line.strip() for line in lines[:3])
words = lines[3].strip().split()
alleged_num_elements = int(words[0])
elements = words[1:]
true_num_elements = len(elements)
if alleged_num_elements != true_num_elements:
raise ValueError(
f"Header claims there are tables for {alleged_num_elements} elements, "
f"but actual element list has {true_num_elements} elements: {' '.join(elements)}"
)
words = lines[4].strip().split()
Nrho = int(words[0]) # Nrho (number of values for the embedding function F(rho))
drho = float(words[1]) # spacing in density space
Nr = int(words[2]) # Nr (number of values for the effective charge function Z(r) and density function rho(r))
dr = float(words[3]) # spacing in distance space
cutoff = float(words[4])
# Strip empty lines and check that the table contains the expected number of values
remaining_lines = [line for line in lines[5:] if len(line.strip()) > 0]
remaining_words = []
for line in remaining_lines:
words = line.split()
remaining_words.extend(words)
if kind == "eam/fs":
expected_num_density_functions_per_element = true_num_elements
else:
expected_num_density_functions_per_element = 1
expected_num_words_per_element = (
4 +
Nrho +
expected_num_density_functions_per_element * Nr
)
expected_num_pair_functions = np.sum(np.arange(1, true_num_elements+1)).astype(int)
expected_length = true_num_elements * expected_num_words_per_element + expected_num_pair_functions * Nr
true_length = len(remaining_words)
if true_length != expected_length:
raise ValueError(f"expected {expected_length} tabulated values, but there are {true_length}")
atomic_numbers = np.zeros(true_num_elements, dtype=int)
atomic_masses = np.zeros(true_num_elements)
lattice_parameters = np.zeros(true_num_elements)
crystal_structures = np.empty(true_num_elements).astype(str) # fixme: be careful with string length
F = np.zeros((true_num_elements, Nrho))
for i in range(true_num_elements):
offset = i * expected_num_words_per_element
atomic_numbers[i] = int(remaining_words[offset])
atomic_masses[i] = float(remaining_words[offset+1])
lattice_parameters[i] = float(remaining_words[offset+2])
crystal_structures[i] = remaining_words[offset+3]
F[i, :] = np.array(remaining_words[offset+4:offset+4+Nrho], dtype=float)
# Read data for individual elemements
if kind == "eam/alloy":
f = np.zeros((true_num_elements, Nr))
for i in range(true_num_elements):
offset = i * expected_num_words_per_element + 4 + Nrho
f[i, :] = np.array(remaining_words[offset:offset+Nr], dtype=float)
if kind == "eam/fs":
f = np.zeros((true_num_elements, true_num_elements, Nr))
for i in range(true_num_elements):
offset = i * expected_num_words_per_element + 4 + Nrho
for j in range(true_num_elements):
f[i, j, :] = np.array(remaining_words[offset+j*Nr:offset+(j+1)*Nr], dtype=float)
# Read pair data
rep = np.zeros((true_num_elements, true_num_elements, Nr))
rows, cols = np.tril_indices(true_num_elements)
for pair_number, (i, j) in enumerate(zip(rows, cols)):
offset = true_num_elements * expected_num_words_per_element + pair_number * Nr
rep[i, j, :] = np.array(remaining_words[offset:offset+Nr], dtype=float)
rep[j, i, :] = rep[i, j, :]
parameters = EAMParameters(
elements, atomic_numbers, atomic_masses,
lattice_parameters, crystal_structures,
Nrho, Nr, drho, dr, cutoff
)
return source, parameters, F, f, rep
def mix_eam(files,kind,method,f=[],rep_ab=[],alphas=[],betas=[]):
"""
mix eam alloy files data set and compute the interspecies pair potential part using the
mean geometric value from each pure species
Parameters
----------
files : array of strings
Contain all the files to merge and mix
kind : string
kinf of eam. Supported eam/alloy, eam/fs
method : string, {geometric, arithmetic, weighted, fitted}
Method used to mix the pair interaction terms. The geometric,
arithmetic, and weighted arithmetic average are available. The weighted
arithmetic method is using the electron density function values of atom
:code:`a` and :code:`b` to ponderate the pair potential between species
:code:`a` and :math:`b`, :code:`rep_ab = 0.5(fb/fa * rep_a + fa/fb *
rep_b)`, see [1]. The fitted method is to be used if :code:`rep_ab`
has been previously fitted and is parse as :math:`rep_ab` karg.
f : np.array
fitted density term (for FS eam style)
rep_ab : np.array
fitted rep_ab term
alphas : array
fitted alpha values for the fine tuned mixing.
:code:`rep_ab = alpha_a*rep_a+alpha_b*rep_b`
betas : array
fitted values for the fine tuned mixing.
:code:`f_ab = beta_00*rep_a+beta_01*rep_b`
:code:`f_ba = beta_10*rep_a+beta_11*rep_b`
Returns
-------
sources : string
Source informations or comment line for the file header
parameters_mix: EAMParameters
EAM potential parameters
F_ : array_like
contain the tabulated values of the embedded functions
shape = (nb elements, nb elements, nb of data points)
f_ : array_like
contain the tabulated values of the density functions
shape = (nb elements, nb elements, nb of data points)
rep_ : array_like
contain the tabulated values of pair potential
shape = (nb elements, nb elements, nb of data points)
References
----------
1. X. W. Zhou, R. A. Johnson, and H. N. G. Wadley, Phys. Rev. B, 69, 144113 (2004)
"""
nb_at = 0
# Counting elements and repartition and select smallest tabulated set Nrho*drho // Nr*dr
Nrho,drho,Nr,dr,cutoff = np.empty((len(files))),np.empty((len(files))),np.empty((len(files))),np.empty((len(files))),np.empty((len(files)))
sources = ""
if kind == "eam/alloy":
for i,f_eam in enumerate(files):
source,parameters, F,f,rep = read_eam(f_eam,kind="eam/alloy")
sources+= source
source += " "
nb_at+=len(parameters[0])
Nrho[i] = parameters[5]
drho[i] = parameters[7]
cutoff[i] = parameters[9]
Nr[i] = parameters[6]
dr[i] = parameters[8]
# --- #
max_cutoff = cutoff.argmax()
max_prod = (Nrho*drho).argmax()
max_prod_r = (Nr*dr).argmax()
atomic_numbers,atomic_masses,lattice_parameters,crystal_structures,elements = np.empty(0),np.empty(0),np.empty(0),np.empty(0).astype(str),np.empty(0).astype(str)
Nr_ = Nr[max_prod_r]
dr_ = ((Nr*dr).max())/Nr_
Nrho_ = Nrho[max_prod]
drho_ = ((Nrho*drho).max())/Nrho_
if Nr_ > 2000:
Nr_ = 2000 # reduce
dr_ = ((Nr*dr).max())/Nr_
if Nrho_ > 2000:
Nrho_ = 2000 # reduce
drho_ = ((Nrho*drho).max())/Nrho_
F_,f_,rep_ = np.empty((nb_at,Nrho_)),np.empty((nb_at,Nr_)),np.empty((nb_at,nb_at,Nr_))
at = 0
for i,f_eam in enumerate(files):
source,parameters, F,f,rep = read_eam(f_eam,kind="eam/alloy")
elements = np.append(elements,parameters[0])
atomic_numbers = np.append(atomic_numbers,parameters[1])
atomic_masses = np.append(atomic_masses,parameters[2])
lattice_parameters = np.append(lattice_parameters,parameters[3])
crystal_structures = np.append(crystal_structures,parameters[4])
for j in range(len(parameters[0])):
F_[at,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,Nrho[i]*drho[i],Nrho[i]),F[j,:])(np.linspace(0,Nrho_*drho_,Nrho_))
f_[at,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,Nr[i]*dr[i],Nr[i]),f[j,:])(np.linspace(0,Nr_*dr_,Nr_))
rep_[at,at,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,Nr[i]*dr[i],Nr[i]),rep[j,j,:])(np.linspace(0,Nr_*dr_,Nr_))
at+=1
# mixing repulsive part
old_err_state = np.seterr(divide='raise')
ignored_states = np.seterr(**old_err_state)
for i in range(nb_at):
for j in range(nb_at):
if j < i :
if method == "geometric":
rep_[i,j,:] = (rep_[i,i,:]*rep_[j,j,:])**0.5
if method == "arithmetic":
if alphas:
rep_[i,j,:] = alphas[i]*rep_[i,i,:]+alphas[j]*rep_[j,j,:]
else:
rep_[i,j,:] = 0.5*(rep_[i,i,:]+rep_[j,j,:])
if method == "weighted":
rep_[i,j,:] = 0.5*(np.divide(f_[j,:],f_[i,:])*rep_[i,i,:]+np.divide(f_[i,:],f_[j,:])*rep_[j,j,:])
if method == "fitted":
rep_ab[np.isnan(rep_ab)] = 0
rep_ab[np.isinf(rep_ab)] = 0
rep_[i,j,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,max(Nr*dr),rep_ab.shape[0]),rep_ab)(np.linspace(0,Nr_*dr_,Nr_))
rep_[i,j,:][np.isnan(rep_[i,j,:])] = 0
rep_[i,j,:][np.isinf(rep_[i,j,:])] = 0
elif kind == "eam/fs":
for i,f_eam in enumerate(files):
source,parameters, F,f,rep = read_eam(f_eam,kind="eam/alloy")
sources+= source
source += " "
nb_at+=len(parameters[0])
Nrho[i] = parameters[5]
drho[i] = parameters[7]
cutoff[i] = parameters[9]
Nr[i] = parameters[6]
dr[i] = parameters[8]
# --- #
max_cutoff = cutoff.argmax()
max_prod = (Nrho*drho).argmax()
max_prod_r = (Nr*dr).argmax()
atomic_numbers,atomic_masses,lattice_parameters,crystal_structures,elements = np.empty(0),np.empty(0),np.empty(0),np.empty(0).astype(str),np.empty(0).astype(str)
Nr_ = Nr[max_prod_r]
dr_ = ((Nr*dr).max())/Nr_
Nrho_ = Nrho[max_prod]
drho_ = ((Nrho*drho).max())/Nrho_
if Nr_ > 2000:
Nr_ = 2000 # reduce
dr_ = ((Nr*dr).max())/Nr_
if Nrho_ > 2000:
Nrho_ = 2000 # reduce
drho_ = ((Nrho*drho).max())/Nrho_
F_,f_,rep_ = np.empty((nb_at,Nrho_)),np.empty((nb_at,nb_at,Nr_)),np.empty((nb_at,nb_at,Nr_))
at = 0
for i,f_eam in enumerate(files):
source,parameters, F,f,rep = read_eam(f_eam,kind="eam/alloy")
elements = np.append(elements,parameters[0])
atomic_numbers = np.append(atomic_numbers,parameters[1])
atomic_masses = np.append(atomic_masses,parameters[2])
lattice_parameters = np.append(lattice_parameters,parameters[3])
crystal_structures = np.append(crystal_structures,parameters[4])
for j in range(len(parameters[0])):
F_[at,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,Nrho[i]*drho[i],Nrho[i]),F[j,:])(np.linspace(0,Nrho_*drho_,Nrho_))
f_[at,at,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,Nr[i]*dr[i],Nr[i]),f[j,:])(np.linspace(0,Nr_*dr_,Nr_))
rep_[at,at,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,Nr[i]*dr[i],Nr[i]),rep[j,j,:])(np.linspace(0,Nr_*dr_,Nr_))
at+=1
# mixing density part
old_err_state = np.seterr(divide='raise')
ignored_states = np.seterr(**old_err_state)
for i in range(nb_at):
for j in range(nb_at):
if i!=j:
if method == "geometric":
f_[i,j,:] = (f_[i,i,:]*f_[j,j,:])**0.5
if method == "arithmetic":
if betas.any():
f_[i,j,:] = betas[i,i]*f_[i,i,:]+betas[i,j]*f_[j,j,:]
else:
f_[i,j,:] = 0.5*(f_[i,i,:]+f_[j,j,:])
if method == "fitted":
f_ab[np.isnan(f_ab)] = 0
f_ab[np.isinf(f_ab)] = 0
f_[i,j,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,max(Nr*dr),rep_ab.shape[0]),rep_ab)(np.linspace(0,Nr_*dr_,Nr_))
f_[i,j,:][np.isnan(f_[i,j,:])] = 0
f_[i,j,:][np.isinf(f_[i,j,:])] = 0
# mixing repulsive part
for i in range(nb_at):
for j in range(nb_at):
if j < i :
if method == "geometric":
rep_[i,j,:] = (rep_[i,i,:]*rep_[j,j,:])**0.5
if method == "arithmetic":
if alphas:
rep_[i,j,:] = alphas[i]*rep_[i,i,:]+alphas[j]*rep_[j,j,:]
else:
rep_[i,j,:] = 0.5*(rep_[i,i,:]+rep_[j,j,:])
if method == "fitted":
rep_ab[np.isnan(rep_ab)] = 0
rep_ab[np.isinf(rep_ab)] = 0
rep_[i,j,:] = interpolate.InterpolatedUnivariateSpline(np.linspace(0,max(Nr*dr),rep_ab.shape[0]),rep_ab)(np.linspace(0,Nr_*dr_,Nr_))
rep_[i,j,:][np.isnan(rep_[i,j,:])] = 0
rep_[i,j,:][np.isinf(rep_[i,j,:])] = 0
else:
raise ValueError(f"EAM kind {kind} is not supported")
parameters_mix = EAMParameters(elements, atomic_numbers, atomic_masses,lattice_parameters,crystal_structures, Nrho_,Nr_, drho_, dr_, cutoff[max_cutoff])
return sources, parameters_mix, F_, f_, rep_
def write_eam(source, parameters, F, f, rep, out_file, kind="eam"):
"""Write an eam lammps format file
There are differnt flavors of EAM, with different storage
formats. This function supports a subset of the formats supported
by Lammps (http://lammps.sandia.gov/doc/pair_eam.html),
* eam (DYNAMO funcfl format)
* eam/alloy (DYNAMO setfl format)
* eam/fs (DYNAMO setfl format)
Parameters
----------
source : string
Source information or comment line for the file header
parameters_mix: EAMParameters
EAM potential parameters
F : array_like
contain the tabulated values of the embedded functions
shape = (nb of data points)
f : array_like
contain the tabulated values of the density functions
shape = (nb of data points)
rep : array_like
contain the tabulated values of pair potential
shape = (nb of data points)
out_file : string
output file name for the eam alloy potential file
kind : {'eam', 'eam/alloy', 'eam/fs'}
kind of EAM file to read
Returns
-------
None
"""
elements, atomic_numbers, atomic_masses, lattice_parameters, crystal_structures = parameters[0:5]
Nrho, Nr, drho, dr, cutoff = parameters[5:10]
if kind == "eam":
# parameters unpacked
# FIXME: atomic numbers etc are now arrays, and not scalars
crystal_structures_str = ' '.join(s for s in crystal_structures)
atline = f"{int(atomic_numbers)} {float(atomic_masses)} {float(lattice_parameters)} {crystal_structures_str}"
parameterline = f'{int(Nrho)}\t{float(drho):.16e}\t{int(Nr)}\t{float(dr):.16e}\t{float(cutoff):.10e}'
potheader = f"# EAM potential from : # {source} \n {atline} \n {parameterline}"
# --- Writing new EAM alloy pot file --- #
# write header and file parameters
potfile = open(out_file,'wb')
# write F and pair charge tables
Nr = parameters.number_of_distance_grid_points
Nrho = parameters.number_of_density_grid_points
np.savetxt(potfile, F.reshape(Nrho), fmt='%.16e', header=potheader, comments='')
# 'eam' (DYNAMO funcfl) tables contain the charge function :math:`Z`,
# and not the pair potential :math:`\phi`. The array :code:`rep`
# stores :math:`r*\phi`, where :math:`r` is the pair distance,
# and we can convert using the relation :math:`r\phi=z*z`, where
# additional unit conversion to units of sqrt(Hartree*Bohr-radii)
# is required.
charge = rep / _hartree_bohr_in_electronvolt_angstrom
charge = np.sqrt(charge)
np.savetxt(potfile, charge.reshape(Nr), fmt='%.16e')
# write electron density tables
np.savetxt(potfile, f.reshape(Nr), fmt='%.16e')
potfile.close()
elif kind == "eam/alloy":
num_elements = len(elements)
# parameters unpacked
potheader = f"# Mixed EAM alloy potential from :\n# {source} \n# \n"
# --- Writing new EAM alloy pot file --- #
potfile = open(out_file,'wb')
# write header and file parameters
np.savetxt(
potfile, elements, fmt="%s", newline=' ',
header=potheader+str(num_elements),
footer=f'\n{Nrho}\t{drho:e}\t{Nr}\t{dr:e}\t{cutoff:e}\n',
comments=''
)
# write F and f tables
for i in range(num_elements):
np.savetxt(
potfile, np.append(F[i,:], f[i,:]), fmt="%.16e",
header=f'{atomic_numbers[i]:d}\t{atomic_masses[i]}\t{lattice_parameters[i]}\t{crystal_structures[i]}',
comments=''
)
# write pair interactions tables
[[np.savetxt(potfile,rep[i,j,:],fmt="%.16e") for j in range(rep.shape[0]) if j <= i] for i in range(rep.shape[0])]
potfile.close()
elif kind == "eam/fs":
num_elements = len(elements)
# parameters unpacked
potheader = f"# Mixed EAM fs potential from :\n# {source} \n# \n"
# --- Writing new EAM alloy pot file --- #
potfile = open(out_file,'wb')
# write header and file parameters
np.savetxt(
potfile, elements, fmt="%s", newline=' ',
header=potheader+str(num_elements),
footer=f'\n{Nrho}\t{drho:e}\t{Nr}\t{dr:e}\t{cutoff:e}\n',
comments=''
)
# write F and f tables
for i in range(num_elements):
np.savetxt(
potfile, np.append(F[i,:], f[i,:,:].flatten()), fmt="%.16e",
header=f'{atomic_numbers[i]:d}\t{atomic_masses[i]}\t{lattice_parameters[i]}\t{crystal_structures[i]}',
comments=''
)
# write pair interactions tables
[[np.savetxt(potfile, rep[i,j,:], fmt="%.16e") for j in range(rep.shape[0]) if j <= i] for i in range(rep.shape[0])]
potfile.close()
else:
raise ValueError(f"EAM kind {kind} is not supported")
| 28,065 | 44.194847 | 169 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.