Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def delete_frames(self):
"""Delete all frames."""
for frame in glob.glob(self.frameglob):
os.unlink(frame)
|
def gmx_resid(self, resid):
"""Returns resid in the Gromacs index by transforming with offset."""
try:
gmx_resid = int(self.offset[resid])
except (TypeError, IndexError):
gmx_resid = resid + self.offset
except KeyError:
raise KeyError("offset must be a dict that contains the gmx resid for {0:d}".format(resid))
return gmx_resid
|
def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=False):
"""Combine individual groups into a single one and write output.
:Keywords:
name_all : string
Name of the combined group, ``None`` generates a name. [``None``]
out_ndx : filename
Name of the output file that will contain the individual groups
and the combined group. If ``None`` then default from the class
constructor is used. [``None``]
operation : character
Logical operation that is used to generate the combined group from
the individual groups: "|" (OR) or "&" (AND); if set to ``False``
then no combined group is created and only the individual groups
are written. ["|"]
defaultgroups : bool
``True``: append everything to the default groups produced by
:program:`make_ndx` (or rather, the groups provided in the ndx file on
initialization --- if this was ``None`` then these are truly default groups);
``False``: only use the generated groups
:Returns:
``(combinedgroup_name, output_ndx)``, a tuple showing the
actual group name and the name of the file; useful when all names are autogenerated.
.. Warning:: The order of the atom numbers in the combined group is
*not* guaranteed to be the same as the selections on input because
``make_ndx`` sorts them ascending. Thus you should be careful when
using these index files for calculations of angles and dihedrals.
Use :class:`gromacs.formats.NDX` in these cases.
.. SeeAlso:: :meth:`IndexBuilder.write`.
"""
if not operation in ('|', '&', False):
raise ValueError("Illegal operation {0!r}, only '|' (OR) and '&' (AND) or False allowed.".format(
operation))
if name_all is None and operation:
name_all = self.name_all or operation.join(self.indexfiles)
if out_ndx is None:
out_ndx = self.output
if defaultgroups:
# make a default file (using the original ndx where provided!!)
fd, default_ndx = tempfile.mkstemp(suffix='.ndx', prefix='default__')
try:
self.make_ndx(o=default_ndx, input=['q'])
except:
utilities.unlink_gmx(default_ndx)
raise
ndxfiles = [default_ndx]
else:
ndxfiles = []
ndxfiles.extend(self.indexfiles.values())
if operation:
# combine multiple selections and name them
try:
fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='combined__')
# combine all selections by loading ALL temporary index files
operation = ' '+operation.strip()+' '
cmd = [operation.join(['"{0!s}"'.format(gname) for gname in self.indexfiles]),
'', 'q']
rc,out,err = self.make_ndx(n=ndxfiles, o=tmp_ndx, input=cmd)
if self._is_empty_group(out):
warnings.warn("No atoms found for {cmd!r}".format(**vars()),
category=BadParameterWarning)
# second pass for naming, sigh (or: use NDX ?)
groups = parse_ndxlist(out)
last = groups[-1]
# name this group
name_cmd = ["name {0:d} {1!s}".format(last['nr'], name_all), 'q']
rc,out,err = self.make_ndx(n=tmp_ndx, o=out_ndx, input=name_cmd)
# For debugging, look at out and err or set stdout=True, stderr=True
# TODO: check out if at least 1 atom selected
##print "DEBUG: combine()"
##print out
finally:
utilities.unlink_gmx(tmp_ndx)
if defaultgroups:
utilities.unlink_gmx(default_ndx)
else:
# just write individual groups in one file (name_all --> None)
rc,out,err = self.make_ndx(n=ndxfiles, o=out_ndx, input=['','q'])
return name_all, out_ndx
|
def write(self, out_ndx=None, defaultgroups=False):
"""Write individual (named) groups to *out_ndx*."""
name_all, out_ndx = self.combine(operation=False, out_ndx=out_ndx, defaultgroups=defaultgroups)
return out_ndx
|
def cat(self, out_ndx=None):
"""Concatenate input index files.
Generate a new index file that contains the default Gromacs index
groups (if a structure file was defined) and all index groups from the
input index files.
:Arguments:
out_ndx : filename
Name of the output index file; if ``None`` then use the default
provided to the constructore. [``None``].
"""
if out_ndx is None:
out_ndx = self.output
self.make_ndx(o=out_ndx, input=['q'])
return out_ndx
|
def parse_selection(self, selection, name=None):
"""Retuns (groupname, filename) with index group."""
if type(selection) is tuple:
# range
process = self._process_range
elif selection.startswith('@'):
# verbatim make_ndx command
process = self._process_command
selection = selection[1:]
else:
process = self._process_residue
return process(selection, name)
|
def _process_command(self, command, name=None):
"""Process ``make_ndx`` command and return name and temp index file."""
self._command_counter += 1
if name is None:
name = "CMD{0:03d}".format(self._command_counter)
# Need to build it with two make_ndx calls because I cannot reliably
# name the new group without knowing its number.
try:
fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='tmp_'+name+'__')
cmd = [command, '', 'q'] # empty command '' necessary to get list
# This sometimes fails with 'OSError: Broken Pipe' --- hard to debug
rc,out,err = self.make_ndx(o=tmp_ndx, input=cmd)
self.check_output(out, "No atoms found for selection {command!r}.".format(**vars()), err=err)
# For debugging, look at out and err or set stdout=True, stderr=True
# TODO: check ' 0 r_300_&_ALA_&_O : 1 atoms' has at least 1 atom
##print "DEBUG: _process_command()"
##print out
groups = parse_ndxlist(out)
last = groups[-1]
# reduce and name this group
fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__')
name_cmd = ["keep {0:d}".format(last['nr']),
"name 0 {0!s}".format(name), 'q']
rc,out,err = self.make_ndx(n=tmp_ndx, o=ndx, input=name_cmd)
finally:
utilities.unlink_gmx(tmp_ndx)
return name, ndx
|
def _process_residue(self, selection, name=None):
"""Process residue/atom selection and return name and temp index file."""
if name is None:
name = selection.replace(':', '_')
# XXX: use _translate_residue() ....
m = self.RESIDUE.match(selection)
if not m:
raise ValueError("Selection {selection!r} is not valid.".format(**vars()))
gmx_resid = self.gmx_resid(int(m.group('resid')))
residue = m.group('aa')
if len(residue) == 1:
gmx_resname = utilities.convert_aa_code(residue) # only works for AA
else:
gmx_resname = residue # use 3-letter for any resname
gmx_atomname = m.group('atom')
if gmx_atomname is None:
gmx_atomname = 'CA'
#: select residue <gmx_resname><gmx_resid> atom <gmx_atomname>
_selection = 'r {gmx_resid:d} & r {gmx_resname!s} & a {gmx_atomname!s}'.format(**vars())
cmd = ['keep 0', 'del 0',
_selection,
'name 0 {name!s}'.format(**vars()),
'q']
fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__')
rc,out,err = self.make_ndx(n=self.ndx, o=ndx, input=cmd)
self.check_output(out, "No atoms found for "
"%(selection)r --> %(_selection)r" % vars())
# For debugging, look at out and err or set stdout=True, stderr=True
##print "DEBUG: _process_residue()"
##print out
return name, ndx
|
def _process_range(self, selection, name=None):
"""Process a range selection.
("S234", "A300", "CA") --> selected all CA in this range
("S234", "A300") --> selected all atoms in this range
.. Note:: Ignores residue type, only cares about the resid (but still required)
"""
try:
first, last, gmx_atomname = selection
except ValueError:
try:
first, last = selection
gmx_atomname = '*'
except:
logger.error("%r is not a valid range selection", selection)
raise
if name is None:
name = "{first!s}-{last!s}_{gmx_atomname!s}".format(**vars())
_first = self._translate_residue(first, default_atomname=gmx_atomname)
_last = self._translate_residue(last, default_atomname=gmx_atomname)
_selection = 'r {0:d} - {1:d} & & a {2!s}'.format(_first['resid'], _last['resid'], gmx_atomname)
cmd = ['keep 0', 'del 0',
_selection,
'name 0 {name!s}'.format(**vars()),
'q']
fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__')
rc,out,err = self.make_ndx(n=self.ndx, o=ndx, input=cmd)
self.check_output(out, "No atoms found for "
"%(selection)r --> %(_selection)r" % vars())
# For debugging, look at out and err or set stdout=True, stderr=True
##print "DEBUG: _process_residue()"
##print out
return name, ndx
|
def _translate_residue(self, selection, default_atomname='CA'):
"""Translate selection for a single res to make_ndx syntax."""
m = self.RESIDUE.match(selection)
if not m:
errmsg = "Selection {selection!r} is not valid.".format(**vars())
logger.error(errmsg)
raise ValueError(errmsg)
gmx_resid = self.gmx_resid(int(m.group('resid'))) # magic offset correction
residue = m.group('aa')
if len(residue) == 1:
gmx_resname = utilities.convert_aa_code(residue) # only works for AA
else:
gmx_resname = residue # use 3-letter for any resname
gmx_atomname = m.group('atom')
if gmx_atomname is None:
gmx_atomname = default_atomname
return {'resname':gmx_resname, 'resid':gmx_resid, 'atomname':gmx_atomname}
|
def check_output(self, make_ndx_output, message=None, err=None):
"""Simple tests to flag problems with a ``make_ndx`` run."""
if message is None:
message = ""
else:
message = '\n' + message
def format(output, w=60):
hrule = "====[ GromacsError (diagnostic output) ]".ljust(w,"=")
return hrule + '\n' + str(output) + hrule
rc = True
if self._is_empty_group(make_ndx_output):
warnings.warn("Selection produced empty group.{message!s}".format(**vars()), category=GromacsValueWarning)
rc = False
if self._has_syntax_error(make_ndx_output):
rc = False
out_formatted = format(make_ndx_output)
raise GromacsError("make_ndx encountered a Syntax Error, "
"%(message)s\noutput:\n%(out_formatted)s" % vars())
if make_ndx_output.strip() == "":
rc = False
out_formatted = format(err)
raise GromacsError("make_ndx produced no output, "
"%(message)s\nerror output:\n%(out_formatted)s" % vars())
return rc
|
def outfile(self, p):
"""Path for an output file.
If :attr:`outdir` is set then the path is
``outdir/basename(p)`` else just ``p``
"""
if self.outdir is not None:
return os.path.join(self.outdir, os.path.basename(p))
else:
return p
|
def rp(self, *args):
"""Return canonical path to file under *dirname* with components *args*
If *args* form an absolute path then just return it as the absolute path.
"""
try:
p = os.path.join(*args)
if os.path.isabs(p):
return p
except TypeError:
pass
return utilities.realpath(self.dirname, *args)
|
def center_fit(self, **kwargs):
"""Write compact xtc that is fitted to the tpr reference structure.
See :func:`gromacs.cbook.trj_fitandcenter` for details and
description of *kwargs* (including *input*, *input1*, *n* and
*n1* for how to supply custom index groups). The most important ones are listed
here but in most cases the defaults should work.
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory.
*xy* : Boolean
If ``True`` then only fit in xy-plane (useful for a membrane normal
to z). The default is ``False``.
*force*
- ``True``: overwrite existing trajectories
- ``False``: throw a IOError exception
- ``None``: skip existing and log a warning [default]
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
"""
kwargs.setdefault('s', self.tpr)
kwargs.setdefault('n', self.ndx)
kwargs['f'] = self.xtc
kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, '_centfit', 'xtc')))
force = kwargs.pop('force', self.force)
logger.info("Centering and fitting trajectory {f!r}...".format(**kwargs))
with utilities.in_dir(self.dirname):
if not self.check_file_exists(kwargs['o'], resolve="indicate", force=force):
trj_fitandcenter(**kwargs)
logger.info("Centered and fit trajectory: {o!r}.".format(**kwargs))
return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])}
|
def fit(self, xy=False, **kwargs):
"""Write xtc that is fitted to the tpr reference structure.
Runs :class:`gromacs.tools.trjconv` with appropriate arguments
for fitting. The most important *kwargs* are listed
here but in most cases the defaults should work.
Note that the default settings do *not* include centering or
periodic boundary treatment as this often does not work well
with fitting. It is better to do this as a separate step (see
:meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`)
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory. A default name is created.
If e.g. *dt* = 100 is one of the *kwargs* then the default name includes
"_dt100ps".
*xy* : boolean
If ``True`` then only do a rot+trans fit in the xy plane
(good for membrane simulations); default is ``False``.
*force*
``True``: overwrite existing trajectories
``False``: throw a IOError exception
``None``: skip existing and log a warning [default]
*fitgroup*
index group to fit on ["backbone"]
.. Note:: If keyword *input* is supplied then it will override
*fitgroup*; *input* = ``[fitgroup, outgroup]``
*kwargs*
kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted`
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
"""
kwargs.setdefault('s', self.tpr)
kwargs.setdefault('n', self.ndx)
kwargs['f'] = self.xtc
force = kwargs.pop('force', self.force)
if xy:
fitmode = 'rotxy+transxy'
kwargs.pop('fit', None)
infix_default = '_fitxy'
else:
fitmode = kwargs.pop('fit', 'rot+trans') # user can use 'progressive', too
infix_default = '_fit'
dt = kwargs.get('dt')
if dt:
infix_default += '_dt{0:d}ps'.format(int(dt)) # dt in ps
kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, infix_default, 'xtc')))
fitgroup = kwargs.pop('fitgroup', 'backbone')
kwargs.setdefault('input', [fitgroup, "system"])
if kwargs.get('center', False):
logger.warn("Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", kwargs)
if len(kwargs['inputs']) != 3:
logger.error("If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)")
raise ValuError("Insufficient index groups for centering,fitting,output")
logger.info("Fitting trajectory %r to with xy=%r...", kwargs['f'], xy)
logger.info("Fitting on index group %(fitgroup)r", vars())
with utilities.in_dir(self.dirname):
if self.check_file_exists(kwargs['o'], resolve="indicate", force=force):
logger.warn("File %r exists; force regenerating it with force=True.", kwargs['o'])
else:
gromacs.trjconv(fit=fitmode, **kwargs)
logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs['o'])
return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])}
|
def strip_water(self, os=None, o=None, on=None, compact=False,
resn="SOL", groupname="notwater", **kwargs):
"""Write xtc and tpr with water (by resname) removed.
:Keywords:
*os*
Name of the output tpr file; by default use the original but
insert "nowater" before suffix.
*o*
Name of the output trajectory; by default use the original name but
insert "nowater" before suffix.
*on*
Name of a new index file (without water).
*compact*
``True``: write a compact and centered trajectory
``False``: use trajectory as it is [``False``]
*centergroup*
Index group used for centering ["Protein"]
.. Note:: If *input* is provided (see below under *kwargs*)
then *centergroup* is ignored and the group for
centering is taken as the first entry in *input*.
*resn*
Residue name of the water molecules; all these residues are excluded.
*groupname*
Name of the group that is generated by subtracting all waters
from the system.
*force* : Boolean
- ``True``: overwrite existing trajectories
- ``False``: throw a IOError exception
- ``None``: skip existing and log a warning [default]
*kwargs*
are passed on to :func:`gromacs.cbook.trj_compact` (unless the
values have to be set to certain values such as s, f, n, o
keywords). The *input* keyword is always mangled: Only the first
entry (the group to centre the trajectory on) is kept, and as a
second group (the output group) *groupname* is used.
:Returns:
dictionary with keys *tpr*, *xtc*, *ndx* which are the names of the
the new files
.. warning:: The input tpr file should *not* have *any position restraints*;
otherwise Gromacs will throw a hissy-fit and say
*Software inconsistency error: Position restraint coordinates are
missing*
(This appears to be a bug in Gromacs 4.x.)
"""
force = kwargs.pop('force', self.force)
newtpr = self.outfile(self.infix_filename(os, self.tpr, '_nowater'))
newxtc = self.outfile(self.infix_filename(o, self.xtc, '_nowater'))
newndx = self.outfile(self.infix_filename(on, self.tpr, '_nowater', 'ndx'))
nowater_ndx = self._join_dirname(newtpr, "nowater.ndx") # refers to original tpr
if compact:
TRJCONV = trj_compact
# input overrides centergroup
if kwargs.get('centergroup') is not None and 'input' in kwargs:
logger.warn("centergroup = %r will be superceded by input[0] = %r", kwargs['centergroup'], kwargs['input'][0])
_input = kwargs.get('input', [kwargs.get('centergroup', 'Protein')])
kwargs['input'] = [_input[0], groupname] # [center group, write-out selection]
del _input
logger.info("Creating a compact trajectory centered on group %r", kwargs['input'][0])
logger.info("Writing %r to the output trajectory", kwargs['input'][1])
else:
TRJCONV = gromacs.trjconv
kwargs['input'] = [groupname]
logger.info("Writing %r to the output trajectory (no centering)", kwargs['input'][0])
# clean kwargs, only legal arguments for Gromacs tool trjconv should remain
kwargs.pop("centergroup", None)
NOTwater = "! r {resn!s}".format(**vars()) # make_ndx selection ("not water residues")
with utilities.in_dir(self.dirname):
# ugly because I cannot break from the block
if not self.check_file_exists(newxtc, resolve="indicate", force=force):
# make no-water index
B = IndexBuilder(struct=self.tpr, selections=['@'+NOTwater],
ndx=self.ndx, out_ndx=nowater_ndx)
B.combine(name_all=groupname, operation="|", defaultgroups=True)
logger.debug("Index file for water removal: %r", nowater_ndx)
logger.info("TPR file without water {newtpr!r}".format(**vars()))
gromacs.tpbconv(s=self.tpr, o=newtpr, n=nowater_ndx, input=[groupname])
logger.info("NDX of the new system %r", newndx)
gromacs.make_ndx(f=newtpr, o=newndx, input=['q'], stderr=False, stdout=False)
# PROBLEM: If self.ndx contained a custom group required for fitting then we are loosing
# this group here. We could try to merge only this group but it is possible that
# atom indices changed. The only way to solve this is to regenerate the group with
# a selection or only use Gromacs default groups.
logger.info("Trajectory without water {newxtc!r}".format(**vars()))
kwargs['s'] = self.tpr
kwargs['f'] = self.xtc
kwargs['n'] = nowater_ndx
kwargs['o'] = newxtc
TRJCONV(**kwargs)
logger.info("pdb and gro for visualization")
for ext in 'pdb', 'gro':
try:
# see warning in doc ... so we don't use the new xtc but the old one
kwargs['o'] = self.filename(newtpr, ext=ext)
TRJCONV(dump=0, stdout=False, stderr=False, **kwargs) # silent
except:
logger.exception("Failed building the water-less %(ext)s. "
"Position restraints in tpr file (see docs)?" % vars())
logger.info("strip_water() complete")
self.nowater[self.rp(newxtc)] = Transformer(dirname=self.dirname, s=newtpr,
f=newxtc, n=newndx, force=force)
return {'tpr':self.rp(newtpr), 'xtc':self.rp(newxtc), 'ndx':self.rp(newndx)}
|
def strip_fit(self, **kwargs):
"""Strip water and fit to the remaining system.
First runs :meth:`strip_water` and then :meth:`fit`; see there
for arguments.
- *strip_input* is used for :meth:`strip_water` (but is only useful in
special cases, e.g. when there is no Protein group defined. Then set
*strip_input* = ``['Other']``.
- *input* is passed on to :meth:`fit` and can contain the
``[center_group, fit_group, output_group]``
- *fitgroup* is only passed to :meth:`fit` and just contains
the group to fit to ("backbone" by default)
.. warning:: *fitgroup* can only be a Gromacs default group and not
a custom group (because the indices change after stripping)
- By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`,
together with the *xy* = ``False`` keyword)
.. Note:: The call signature of :meth:`strip_water` is somewhat different from this one.
"""
kwargs.setdefault('fit', 'rot+trans')
kw_fit = {}
for k in ('xy', 'fit', 'fitgroup', 'input'):
if k in kwargs:
kw_fit[k] = kwargs.pop(k)
kwargs['input'] = kwargs.pop('strip_input', ['Protein'])
kwargs['force'] = kw_fit['force'] = kwargs.pop('force', self.force)
paths = self.strip_water(**kwargs) # updates self.nowater
transformer_nowater = self.nowater[paths['xtc']] # make sure to get the one we just produced
return transformer_nowater.fit(**kw_fit)
|
def _join_dirname(self, *args):
"""return os.path.join(os.path.dirname(args[0]), *args[1:])"""
# extra function because I need to use it in a method that defines
# the kwarg 'os', which collides with os.path...
return os.path.join(os.path.dirname(args[0]), *args[1:])
|
def create(logger_name, logfile='gromacs.log'):
"""Create a top level logger.
- The file logger logs everything (including DEBUG).
- The console logger only logs INFO and above.
Logging to a file and the console.
See http://docs.python.org/library/logging.html?#logging-to-multiple-destinations
The top level logger of the library is named 'gromacs'. Note that
we are configuring this logger with console output. If the root
logger also does this then we will get two output lines to the
console. We'll live with this because this is a simple
convenience library...
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logfile = logging.FileHandler(logfile)
logfile_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logfile.setFormatter(logfile_formatter)
logger.addHandler(logfile)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
return logger
|
def _generate_template_dict(dirname):
"""Generate a list of included files *and* extract them to a temp space.
Templates have to be extracted from the egg because they are used
by external code. All template filenames are stored in
:data:`config.templates`.
"""
return dict((resource_basename(fn), resource_filename(__name__, dirname +'/'+fn))
for fn in resource_listdir(__name__, dirname)
if not fn.endswith('~'))
|
def resource_basename(resource):
"""Last component of a resource (which always uses '/' as sep)."""
if resource.endswith('/'):
resource = resource[:-1]
parts = resource.split('/')
return parts[-1]
|
def get_template(t):
"""Find template file *t* and return its real path.
*t* can be a single string or a list of strings. A string
should be one of
1. a relative or absolute path,
2. a file in one of the directories listed in :data:`gromacs.config.path`,
3. a filename in the package template directory (defined in the template dictionary
:data:`gromacs.config.templates`) or
4. a key into :data:`~gromacs.config.templates`.
The first match (in this order) is returned. If the argument is a
single string then a single string is returned, otherwise a list
of strings.
:Arguments: *t* : template file or key (string or list of strings)
:Returns: os.path.realpath(*t*) (or a list thereof)
:Raises: :exc:`ValueError` if no file can be located.
"""
templates = [_get_template(s) for s in utilities.asiterable(t)]
if len(templates) == 1:
return templates[0]
return templates
|
def _get_template(t):
"""Return a single template *t*."""
if os.path.exists(t): # 1) Is it an accessible file?
pass
else:
_t = t
_t_found = False
for d in path: # 2) search config.path
p = os.path.join(d, _t)
if os.path.exists(p):
t = p
_t_found = True
break
_t = os.path.basename(t)
if not _t_found: # 3) try template dirs
for p in templates.values():
if _t == os.path.basename(p):
t = p
_t_found = True # NOTE: in principle this could match multiple
break # times if more than one template dir existed.
if not _t_found: # 4) try it as a key into templates
try:
t = templates[t]
except KeyError:
pass
else:
_t_found = True
if not _t_found: # 5) nothing else to try...
raise ValueError("Failed to locate the template file {t!r}.".format(**vars()))
return os.path.realpath(t)
|
def get_configuration(filename=CONFIGNAME):
"""Reads and parses the configuration file.
Default values are loaded and then replaced with the values from
``~/.gromacswrapper.cfg`` if that file exists. The global
configuration instance :data:`gromacswrapper.config.cfg` is updated
as are a number of global variables such as :data:`configdir`,
:data:`qscriptdir`, :data:`templatesdir`, :data:`logfilename`, ...
Normally, the configuration is only loaded when the :mod:`gromacs`
package is imported but a re-reading of the configuration can be forced
anytime by calling :func:`get_configuration`.
:Returns: a dict with all updated global configuration variables
"""
global cfg, configuration # very iffy --- most of the whole config mod should a class
#: :data:`cfg` is the instance of :class:`GMXConfigParser` that makes all
#: global configuration data accessible
cfg = GMXConfigParser(filename=filename) # update module-level cfg
globals().update(cfg.configuration) # update configdir, templatesdir ...
configuration = cfg.configuration # update module-level configuration
return cfg
|
def setup(filename=CONFIGNAME):
"""Prepare a default GromacsWrapper global environment.
1) Create the global config file.
2) Create the directories in which the user can store template and config files.
This function can be run repeatedly without harm.
"""
# setup() must be separate and NOT run automatically when config
# is loaded so that easy_install installations work
# (otherwise we get a sandbox violation)
# populate cfg with defaults (or existing data)
get_configuration()
if not os.path.exists(filename):
with open(filename, 'w') as configfile:
cfg.write(configfile) # write the default file so that user can edit
msg = "NOTE: GromacsWrapper created the configuration file \n\t%r\n" \
" for you. Edit the file to customize the package." % filename
print(msg)
# directories
for d in config_directories:
utilities.mkdir_p(d)
|
def check_setup():
"""Check if templates directories are setup and issue a warning and help.
Set the environment variable :envvar:`GROMACSWRAPPER_SUPPRESS_SETUP_CHECK`
skip the check and make it always return ``True``
:return ``True`` if directories were found and ``False`` otherwise
.. versionchanged:: 0.3.1
Uses :envvar:`GROMACSWRAPPER_SUPPRESS_SETUP_CHECK` to suppress check
(useful for scripts run on a server)
"""
if "GROMACSWRAPPER_SUPPRESS_SETUP_CHECK" in os.environ:
return True
missing = [d for d in config_directories if not os.path.exists(d)]
if len(missing) > 0:
print("NOTE: Some configuration directories are not set up yet: ")
print("\t{0!s}".format('\n\t'.join(missing)))
print("NOTE: You can create the configuration file and directories with:")
print("\t>>> import gromacs")
print("\t>>> gromacs.config.setup()")
return False
return True
|
def set_gmxrc_environment(gmxrc):
"""Set the environment from ``GMXRC`` provided in *gmxrc*.
Runs ``GMXRC`` in a subprocess and puts environment variables loaded by it
into this Python environment.
If *gmxrc* evaluates to ``False`` then nothing is done. If errors occur
then only a warning will be logged. Thus, it should be safe to just call
this function.
"""
# only v5: 'GMXPREFIX', 'GROMACS_DIR'
envvars = ['GMXBIN', 'GMXLDLIB', 'GMXMAN', 'GMXDATA',
'LD_LIBRARY_PATH', 'MANPATH', 'PKG_CONFIG_PATH',
'PATH',
'GMXPREFIX', 'GROMACS_DIR']
# in order to keep empty values, add ___ sentinels around result
# (will be removed later)
cmdargs = ['bash', '-c', ". {0} && echo {1}".format(gmxrc,
' '.join(['___${{{0}}}___'.format(v) for v in envvars]))]
if not gmxrc:
logger.debug("set_gmxrc_environment(): no GMXRC, nothing done.")
return
try:
out = subprocess.check_output(cmdargs)
out = out.strip().split()
for key, value in zip(envvars, out):
value = str(value.decode('ascii').replace('___', '')) # remove sentinels
os.environ[key] = value
logger.debug("set_gmxrc_environment(): %s = %r", key, value)
except (subprocess.CalledProcessError, OSError):
logger.warning("Failed to automatically set the Gromacs environment"
"from GMXRC=%r", gmxrc)
|
def get_tool_names():
""" Get tool names from all configured groups.
:return: list of tool names
"""
names = []
for group in cfg.get('Gromacs', 'groups').split():
names.extend(cfg.get('Gromacs', group).split())
return names
|
def configuration(self):
"""Dict of variables that we make available as globals in the module.
Can be used as ::
globals().update(GMXConfigParser.configuration) # update configdir, templatesdir ...
"""
configuration = {
'configfilename': self.filename,
'logfilename': self.getpath('Logging', 'logfilename'),
'loglevel_console': self.getLogLevel('Logging', 'loglevel_console'),
'loglevel_file': self.getLogLevel('Logging', 'loglevel_file'),
'configdir': self.getpath('DEFAULT', 'configdir'),
'qscriptdir': self.getpath('DEFAULT', 'qscriptdir'),
'templatesdir': self.getpath('DEFAULT', 'templatesdir'),
}
configuration['path'] = [os.path.curdir,
configuration['qscriptdir'],
configuration['templatesdir']]
return configuration
|
def getpath(self, section, option):
"""Return option as an expanded path."""
return os.path.expanduser(os.path.expandvars(self.get(section, option)))
|
def getLogLevel(self, section, option):
"""Return the textual representation of logging level 'option' or the number.
Note that option is always interpreted as an UPPERCASE string
and hence integer log levels will not be recognized.
.. SeeAlso: :mod:`logging` and :func:`logging.getLevelName`
"""
return logging.getLevelName(self.get(section, option).upper())
|
def save(self, filename):
"""Pickle the whole collection to *filename*.
If no extension is provided, ".collection" is appended.
"""
cPickle.dump(self, open(self._canonicalize(filename), 'wb'),
protocol=cPickle.HIGHEST_PROTOCOL)
|
def load(self, filename, append=False):
"""Load collection from pickled file *filename*.
*append* determines if the saved collection is added to the current one
or if it replaces the current content.
If no extension is provided, ".collection" is appended.
"""
tmp = cPickle.load(open(self._canonicalize(filename), 'rb'))
if append:
self.extend(tmp)
else:
self[:] = tmp[:]
del tmp
|
def _canonicalize(self, filename):
"""Use .collection as extension unless provided"""
path, ext = os.path.splitext(filename)
if not ext:
ext = ".collection"
return path + ext
|
def register(self,flag):
"""Register a new :class:`Flag` instance with the Flags registry."""
super(Flags,self).__setitem__(flag.name,flag)
|
def update(self,*flags):
"""Update Flags registry with a list of :class:`Flag` instances."""
super(Flags,self).update([(flag.name,flag) for flag in flags])
|
def scale_dihedrals(mol, dihedrals, scale, banned_lines=None):
"""Scale dihedral angles"""
if banned_lines is None:
banned_lines = []
new_dihedrals = []
for dh in mol.dihedrals:
atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype(), dh.atom4.get_atomtype()
atypes = [a.replace("_", "").replace("=","") for a in atypes]
# special-case: this is a [ dihedral ] override in molecule block, continue and don't match
if dh.gromacs['param'] != []:
for p in dh.gromacs['param']:
p['kch'] *= scale
new_dihedrals.append(dh)
continue
for iswitch in range(32):
if (iswitch%2==0 ):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs['func'])
if (key in dihedrals):
for i, dt in enumerate(dihedrals[key]):
dhA = copy.deepcopy(dh)
param = copy.deepcopy(dt.gromacs['param'])
# Only check the first dihedral in a list
if not dihedrals[key][0].line in banned_lines:
for p in param: p['kchi'] *= scale
dhA.gromacs['param'] = param
#if key == "CT3-C-NH1-CT1-9": print i, dt, key
if i == 0:
dhA.comment = "; banned lines {0} found={1}\n".format(" ".join(
map(str, banned_lines)), 1 if dt.line in banned_lines else 0)
dhA.comment += "; parameters for types {}-{}-{}-{}-9 at LINE({})\n".format(
dhA.atom1.atomtype, dhA.atom2.atomtype, dhA.atom3.atomtype,
dhA.atom4.atomtype, dt.line).replace("_","")
name = "{}-{}-{}-{}-9".format(dhA.atom1.atomtype, dhA.atom2.atomtype,
dhA.atom3.atomtype, dhA.atom4.atomtype).replace("_","")
#if name == "CL-CTL2-CTL2-HAL2-9": print dihedrals[key], key
new_dihedrals.append(dhA)
break
mol.dihedrals = new_dihedrals
#assert(len(mol.dihedrals) == new_dihedrals)
return mol
|
def scale_impropers(mol, impropers, scale, banned_lines=None):
"""Scale improper dihedrals"""
if banned_lines is None:
banned_lines = []
new_impropers = []
for im in mol.impropers:
atypes = (im.atom1.get_atomtype(), im.atom2.get_atomtype(),
im.atom3.get_atomtype(), im.atom4.get_atomtype())
atypes = [a.replace("_", "").replace("=", "") for a in atypes]
# special-case: this is a [ dihedral ] override in molecule block, continue and don't match
if im.gromacs['param'] != []:
for p in im.gromacs['param']:
p['kpsi'] *= scale
new_impropers.append(im)
continue
for iswitch in range(32):
if (iswitch%2==0):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3];
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0];
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func'])
if (key in impropers):
for i, imt in enumerate(impropers[key]):
imA = copy.deepcopy(im)
param = copy.deepcopy(imt.gromacs['param'])
# Only check the first dihedral in a list
if not impropers[key][0].line in banned_lines:
for p in param: p['kpsi'] *= scale
imA.gromacs['param'] = param
if i == 0:
imA.comment = "; banned lines {0} found={1}\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\n".format(
" ".join(map(str, banned_lines)),
1 if imt.line in banned_lines else 0,
imt.atype1, imt.atype2, imt.atype3, imt.atype4, imt.line)
new_impropers.append(imA)
break
#assert(len(mol.impropers) == new_impropers)
mol.impropers = new_impropers
return mol
|
def partial_tempering(topfile="processed.top", outfile="scaled.top", banned_lines='',
scale_lipids=1.0, scale_protein=1.0):
"""Set up topology for partial tempering (REST2) replica exchange.
.. versionchanged:: 0.7.0
Use keyword arguments instead of an `args` Namespace object.
"""
banned_lines = map(int, banned_lines.split())
top = TOP(topfile)
groups = [("_", float(scale_protein)), ("=", float(scale_lipids))]
#
# CMAPTYPES
#
cmaptypes = []
for ct in top.cmaptypes:
cmaptypes.append(ct)
for gr, scale in groups:
ctA = copy.deepcopy(ct)
ctA.atype1 += gr
ctA.atype2 += gr
ctA.atype3 += gr
ctA.atype4 += gr
ctA.atype8 += gr
ctA.gromacs['param'] = [ v*scale for v in ct.gromacs['param'] ]
cmaptypes.append(ctA)
logger.debug("cmaptypes was {0}, is {1}".format(len(top.cmaptypes), len(cmaptypes)))
top.cmaptypes = cmaptypes
#
# ATOMTYPES
#
atomtypes = []
for at in top.atomtypes:
atomtypes.append(at)
for gr, scale in groups:
atA = copy.deepcopy(at)
atA.atnum = atA.atype
atA.atype += gr
atA.gromacs['param']['lje'] *= scale
atomtypes.append(atA)
top.atomtypes = atomtypes
#
# PAIRTYPES
#
pairtypes = []
for pt in top.pairtypes:
pairtypes.append(pt)
for gr, scale in groups:
ptA = copy.deepcopy(pt)
ptA.atype1 += gr
ptA.atype2 += gr
ptA.gromacs['param']['lje14'] *= scale
pairtypes.append(ptA)
top.pairtypes = pairtypes
#
# BONDTYPES
#
bondtypes = []
for bt in top.bondtypes:
bondtypes.append(bt)
for gr, scale in groups:
btA = copy.deepcopy(bt)
btA.atype1 += gr
btA.atype2 += gr
bondtypes.append(btA)
top.bondtypes = bondtypes
#
# ANGLETYPES
#
angletypes = []
for at in top.angletypes:
angletypes.append(at)
for gr, scale in groups:
atA = copy.deepcopy(at)
atA.atype1 += gr
atA.atype2 += gr
atA.atype3 += gr
angletypes.append(atA)
top.angletypes = angletypes
#
# Build dihedral dictionary
#
dihedraltypes = {}
for dt in top.dihedraltypes:
dt.disabled = True
dt.comment = "; type={0!s}-{1!s}-{2!s}-{3!s}-9\n; LINE({4:d}) ".format(
dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.line)
dt.comment = dt.comment.replace("_","")
#if "X-CTL2-CTL2-X-9" in dt.comment: print dt
name = "{0}-{1}-{2}-{3}-{4}".format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func'])
if not name in dihedraltypes:
dihedraltypes[name] = []
dihedraltypes[name].append(dt)
logger.debug("Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes)))
#
# Build improper dictionary
#
impropertypes = {}
for it in top.impropertypes:
it.disabled = True
it.comment = "; LINE({0:d}) ".format(it.line)
name = "{0}-{1}-{2}-{3}-{4}".format(
it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func'])
if not name in impropertypes:
impropertypes[name] = []
impropertypes[name].append(it)
logger.debug("Build impropertypes dictionary with {0} entries".format(len(impropertypes)))
for molname_mol in top.dict_molname_mol:
if not 'Protein' in molname_mol:
continue
mol = top.dict_molname_mol[molname_mol]
for at in mol.atoms:
at.charge *= math.sqrt(scale_protein)
mol = scale_dihedrals(mol, dihedraltypes, scale_protein, banned_lines)
mol = scale_impropers(mol, impropertypes, 1.0, banned_lines)
top.write(outfile)
|
def to_unicode(obj):
"""Convert obj to unicode (if it can be be converted).
Conversion is only attempted if `obj` is a string type (as
determined by :data:`six.string_types`).
.. versionchanged:: 0.7.0
removed `encoding keyword argument
"""
if not isinstance(obj, six.string_types):
return obj
try:
obj = six.text_type(obj)
except TypeError:
pass
return obj
|
def besttype(x):
"""Convert string x to the most useful type, i.e. int, float or unicode string.
If x is a quoted string (single or double quotes) then the quotes
are stripped and the enclosed string returned.
.. Note::
Strings will be returned as Unicode strings (using :func:`to_unicode`).
.. versionchanged:: 0.7.0
removed `encoding keyword argument
"""
x = to_unicode(x) # make unicode as soon as possible
try:
x = x.strip()
except AttributeError:
pass
m = re.match(r"""['"](?P<value>.*)["']$""", x)
if m is None:
# not a quoted string, try different types
for converter in int, float, to_unicode: # try them in increasing order of lenience
try:
return converter(x)
except ValueError:
pass
else:
# quoted string
x = to_unicode(m.group('value'))
return x
|
def to_int64(a):
"""Return view of the recarray with all int32 cast to int64."""
# build new dtype and replace i4 --> i8
def promote_i4(typestr):
if typestr[1:] == 'i4':
typestr = typestr[0]+'i8'
return typestr
dtype = [(name, promote_i4(typestr)) for name,typestr in a.dtype.descr]
return a.astype(dtype)
|
def irecarray_to_py(a):
"""Slow conversion of a recarray into a list of records with python types.
Get the field names from :attr:`a.dtype.names`.
:Returns: iterator so that one can handle big input arrays
"""
pytypes = [pyify(typestr) for name,typestr in a.dtype.descr]
def convert_record(r):
return tuple([converter(value) for converter, value in zip(pytypes,r)])
return (convert_record(r) for r in a)
|
def _convert_fancy(self, field):
"""Convert to a list (sep != None) and convert list elements."""
if self.sep is False:
x = self._convert_singlet(field)
else:
x = tuple([self._convert_singlet(s) for s in field.split(self.sep)])
if len(x) == 0:
x = ''
elif len(x) == 1:
x = x[0]
#print "%r --> %r" % (field, x)
return x
|
def parse(self):
"""Parse the xpm file and populate :attr:`XPM.array`."""
with utilities.openany(self.real_filename) as xpm:
# Read in lines until we find the start of the array
meta = [xpm.readline()]
while not meta[-1].startswith("static char *gromacs_xpm[]"):
meta.append(xpm.readline())
# The next line will contain the dimensions of the array
dim = xpm.readline()
# There are four integers surrounded by quotes
# nx: points along x, ny: points along y, nc: ?, nb: stride x
nx, ny, nc, nb = [int(i) for i in self.unquote(dim).split()]
# The next dim[2] lines contain the color definitions
# Each pixel is encoded by dim[3] bytes, and a comment
# at the end of the line contains the corresponding value
colors = dict([self.col(xpm.readline()) for i in range(nc)])
if self.autoconvert:
autoconverter = Autoconverter(mode="singlet")
for symbol, value in colors.items():
colors[symbol] = autoconverter.convert(value)
self.logger.debug("Autoconverted colours: %r", colors)
# make an array containing all possible values and let numpy figure out the dtype
dtype = numpy.array(colors.values()).dtype
self.logger.debug("Guessed array type: %s", dtype.name)
# pre-allocate array
data = numpy.zeros((int(nx/nb), ny), dtype=dtype)
self.logger.debug("dimensions: NX=%d NY=%d strideX=%d (NC=%d) --> (%d, %d)",
nx, ny, nb, nc, nx/nb, ny)
iy = 0
xval = []
yval = []
autoconverter = Autoconverter(mode="singlet")
for line in xpm:
if line.startswith("/*"):
# lines '/* x-axis:' ... and '/* y-axis:' contain the
# values of x and y coordinates
s = self.uncomment(line).strip()
if s.startswith('x-axis:'):
xval.extend([autoconverter.convert(x) for x in s[7:].split()])
elif s.startswith('y-axis:'):
yval.extend([autoconverter.convert(y) for y in s[7:].split()])
continue
s = self.unquote(line)
# Joao M. Damas <[email protected]> suggests on gmx-users (24 Oct 2014)
# that the next line should read:
#
# data[:, iy] = [colors[j[k:k+nb]] for k in range(0,nx*nb,nb)]
#
# "if one is using higher -nlevels for the .xpm construction (in g_rms, for example)"
# However, without a test case I am not eager to change it right away so in
# case some faulty behavior is discovered with the XPM reader then this comment
# might be helpful. --- Oliver 2014-10-25
data[:, iy] = [colors[s[k:k+nb]] for k in range(0,nx,nb)]
self.logger.debug("read row %d with %d columns: '%s....%s'",
iy, data.shape[0], s[:4], s[-4:])
iy += 1 # for next row
self.xvalues = numpy.array(xval)
if self.reverse:
self.logger.debug("reversed row order, reverse=%r", self.reverse)
self.__array = data[:, ::-1]
self.yvalues = numpy.array(yval)
else:
self.__array = data
self.yvalues = numpy.array(yval)[::-1]
|
def col(self, c):
"""Parse colour specification"""
m = self.COLOUR.search(c)
if not m:
self.logger.fatal("Cannot parse colour specification %r.", c)
raise ParseError("XPM reader: Cannot parse colour specification {0!r}.".format(c))
value = m.group('value')
color = m.group('symbol')
self.logger.debug("%s: %s %s\n", c.strip(), color, value)
return color, value
|
def run(self, *args, **kwargs):
"""Run the command; args/kwargs are added or replace the ones given to the constructor."""
_args, _kwargs = self._combine_arglist(args, kwargs)
results, p = self._run_command(*_args, **_kwargs)
return results
|
def _combine_arglist(self, args, kwargs):
"""Combine the default values and the supplied values."""
_args = self.args + args
_kwargs = self.kwargs.copy()
_kwargs.update(kwargs)
return _args, _kwargs
|
def _run_command(self, *args, **kwargs):
"""Execute the command; see the docs for __call__.
:Returns: a tuple of the *results* tuple ``(rc, stdout, stderr)`` and
the :class:`Popen` instance.
"""
# hack to run command WITHOUT input (-h...) even though user defined
# input (should have named it "ignore_input" with opposite values...)
use_input = kwargs.pop('use_input', True)
# logic for capturing output (see docs on I/O and the flags)
capturefile = None
if environment.flags['capture_output'] is True:
# capture into Python vars (see subprocess.Popen.communicate())
kwargs.setdefault('stderr', PIPE)
kwargs.setdefault('stdout', PIPE)
elif environment.flags['capture_output'] == "file":
if 'stdout' in kwargs and 'stderr' in kwargs:
pass
else:
# XXX: not race or thread proof; potentially many commands write to the same file
fn = environment.flags['capture_output_filename']
capturefile = file(fn, "w") # overwrite (clobber) capture file
if 'stdout' in kwargs and 'stderr' not in kwargs:
# special case of stdout used by code but stderr should be captured to file
kwargs.setdefault('stderr', capturefile)
else:
# merge stderr with stdout and write stdout to file
# (stderr comes *before* stdout in capture file, could split...)
kwargs.setdefault('stderr', STDOUT)
kwargs.setdefault('stdout', capturefile)
try:
p = self.Popen(*args, **kwargs)
out, err = p.communicate(use_input=use_input) # special Popen knows input!
except:
if capturefile is not None:
logger.error("Use captured command output in %r for diagnosis.", capturefile)
raise
finally:
if capturefile is not None:
capturefile.close()
rc = p.returncode
return (rc, out, err), p
|
def _commandline(self, *args, **kwargs):
"""Returns the command line (without pipes) as a list."""
# transform_args() is a hook (used in GromacsCommand very differently!)
return [self.command_name] + self.transform_args(*args, **kwargs)
|
def commandline(self, *args, **kwargs):
"""Returns the commandline that run() uses (without pipes)."""
# this mirrors the setup in run()
_args, _kwargs = self._combine_arglist(args, kwargs)
return self._commandline(*_args, **_kwargs)
|
def Popen(self, *args, **kwargs):
"""Returns a special Popen instance (:class:`PopenWithInput`).
The instance has its input pre-set so that calls to
:meth:`~PopenWithInput.communicate` will not need to supply
input. This is necessary if one wants to chain the output from
one command to an input from another.
:TODO:
Write example.
"""
stderr = kwargs.pop('stderr', None) # default: print to stderr (if STDOUT then merge)
if stderr is False: # False: capture it
stderr = PIPE
elif stderr is True:
stderr = None # use stderr
stdout = kwargs.pop('stdout', None) # either set to PIPE for capturing output
if stdout is False: # ... or to False
stdout = PIPE
elif stdout is True:
stdout = None # for consistency, make True write to screen
stdin = kwargs.pop('stdin', None)
input = kwargs.pop('input', None)
use_shell = kwargs.pop('use_shell', False)
if input:
stdin = PIPE
if isinstance(input, six.string_types) and not input.endswith('\n'):
# make sure that input is a simple string with \n line endings
input = six.text_type(input) + '\n'
else:
try:
# make sure that input is a simple string with \n line endings
input = '\n'.join(map(six.text_type, input)) + '\n'
except TypeError:
# so maybe we are a file or something ... and hope for the best
pass
cmd = self._commandline(*args, **kwargs) # lots of magic happening here
# (cannot move out of method because filtering of stdin etc)
try:
p = PopenWithInput(cmd, stdin=stdin, stderr=stderr, stdout=stdout,
universal_newlines=True, input=input, shell=use_shell)
except OSError as err:
logger.error(" ".join(cmd)) # log command line
if err.errno == errno.ENOENT:
errmsg = "Failed to find Gromacs command {0!r}, maybe its not on PATH or GMXRC must be sourced?".format(self.command_name)
logger.fatal(errmsg)
raise OSError(errmsg)
else:
logger.exception("Setting up Gromacs command {0!r} raised an exception.".format(self.command_name))
raise
logger.debug(p.command_string)
return p
|
def transform_args(self, *args, **kwargs):
"""Transform arguments and return them as a list suitable for Popen."""
options = []
for option,value in kwargs.items():
if not option.startswith('-'):
# heuristic for turning key=val pairs into options
# (fails for commands such as 'find' -- then just use args)
if len(option) == 1:
option = '-' + option # POSIX style
else:
option = '--' + option # GNU option
if value is True:
options.append(option)
continue
elif value is False:
raise ValueError('A False value is ambiguous for option {0!r}'.format(option))
if option[:2] == '--':
options.append(option + '=' + str(value)) # GNU option
else:
options.extend((option, str(value))) # POSIX style
return options + list(args)
|
def help(self, long=False):
"""Print help; same as using ``?`` in ``ipython``. long=True also gives call signature."""
print("\ncommand: {0!s}\n\n".format(self.command_name))
print(self.__doc__)
if long:
print("\ncall method: command():\n")
print(self.__call__.__doc__)
|
def _combine_arglist(self, args, kwargs):
"""Combine the default values and the supplied values."""
gmxargs = self.gmxargs.copy()
gmxargs.update(self._combineargs(*args, **kwargs))
return (), gmxargs
|
def _combineargs(self, *args, **kwargs):
"""Add switches as 'options' with value True to the options dict."""
d = {arg: True for arg in args} # switches are kwargs with value True
d.update(kwargs)
return d
|
def _build_arg_list(self, **kwargs):
"""Build list of arguments from the dict; keys must be valid gromacs flags."""
arglist = []
for flag, value in kwargs.items():
# XXX: check flag against allowed values
flag = str(flag)
if flag.startswith('_'):
flag = flag[1:] # python-illegal keywords are '_'-quoted
if not flag.startswith('-'):
flag = '-' + flag # now flag is guaranteed to start with '-'
if value is True:
arglist.append(flag) # simple command line flag
elif value is False:
if flag.startswith('-no'):
# negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?)
arglist.append('-' + flag[3:])
else:
arglist.append('-no' + flag[1:]) # gromacs switches booleans by prefixing 'no'
elif value is None:
pass # ignore flag = None
else:
try:
arglist.extend([flag] + value) # option with value list
except TypeError:
arglist.extend([flag, value]) # option with single value
return list(map(str, arglist))
|
def _run_command(self,*args,**kwargs):
"""Execute the gromacs command; see the docs for __call__."""
result, p = super(GromacsCommand, self)._run_command(*args, **kwargs)
self.check_failure(result, command_string=p.command_string)
return result, p
|
def _commandline(self, *args, **kwargs):
"""Returns the command line (without pipes) as a list. Inserts driver if present"""
if(self.driver is not None):
return [self.driver, self.command_name] + self.transform_args(*args, **kwargs)
return [self.command_name] + self.transform_args(*args, **kwargs)
|
def transform_args(self,*args,**kwargs):
"""Combine arguments and turn them into gromacs tool arguments."""
newargs = self._combineargs(*args, **kwargs)
return self._build_arg_list(**newargs)
|
def _get_gmx_docs(self):
"""Extract standard gromacs doc
Extract by running the program and chopping the header to keep from
'DESCRIPTION' onwards.
"""
if self._doc_cache is not None:
return self._doc_cache
try:
logging.disable(logging.CRITICAL)
rc, header, docs = self.run('h', stdout=PIPE, stderr=PIPE, use_input=False)
except:
logging.critical("Invoking command {0} failed when determining its doc string. Proceed with caution".format(self.command_name))
self._doc_cache = "(No Gromacs documentation available)"
return self._doc_cache
finally:
# ALWAYS restore logging...
logging.disable(logging.NOTSET)
# The header is on STDOUT and is ignored. The docs are read from STDERR in GMX 4.
m = re.match(self.doc_pattern, docs, re.DOTALL)
if m is None:
# In GMX 5, the opposite is true (Grrr)
m = re.match(self.doc_pattern, header, re.DOTALL)
if m is None:
self._doc_cache = "(No Gromacs documentation available)"
return self._doc_cache
self._doc_cache = m.group('DOCS')
return self._doc_cache
|
def communicate(self, use_input=True):
"""Run the command, using the input that was set up on __init__ (for *use_input* = ``True``)"""
if use_input:
return super(PopenWithInput, self).communicate(self.input)
else:
return super(PopenWithInput, self).communicate()
|
def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if type(s) is not str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
s = [converter(i) for i in s.split()]
if len(s) == 1:
return s[0]
else:
return numpy.array(s)
except (ValueError, AttributeError):
pass
raise ValueError("Failed to autoconvert {0!r}".format(s))
|
def openany(datasource, mode='rt', reset=True):
"""Context manager for :func:`anyopen`.
Open the `datasource` and close it when the context of the :keyword:`with`
statement exits.
`datasource` can be a filename or a stream (see :func:`isstream`). A stream
is reset to its start if possible (via :meth:`~io.IOBase.seek` or
:meth:`~cString.StringIO.reset`).
The advantage of this function is that very different input sources
("streams") can be used for a "file", ranging from files on disk (including
compressed files) to open file objects to sockets and strings---as long as
they have a file-like interface.
:Arguments:
*datasource*
a file or a stream
*mode*
{'r', 'w'} (optional), open in r(ead) or w(rite) mode
*reset*
bool (optional) try to read (`mode` 'r') the stream from the
start [``True``]
**Example**
Open a gzipped file and process it line by line::
with openany("input.pdb.gz") as pdb:
for line in pdb:
if line.startswith('ATOM'):
print(line)
Open a URL and read it::
import urllib2
with openany(urllib2.urlopen("https://www.mdanalysis.org/")) as html:
print(html.read())
.. SeeAlso::
:func:`anyopen`
"""
stream = anyopen(datasource, mode=mode, reset=reset)
try:
yield stream
finally:
stream.close()
|
def anyopen(datasource, mode='rt', reset=True):
"""Open datasource (gzipped, bzipped, uncompressed) and return a stream.
`datasource` can be a filename or a stream (see :func:`isstream`). By
default, a stream is reset to its start if possible (via
:meth:`~io.IOBase.seek` or :meth:`~cString.StringIO.reset`).
If possible, the attribute ``stream.name`` is set to the filename or
"<stream>" if no filename could be associated with the *datasource*.
:Arguments:
*datasource*
a file (from :class:`file` or :func:`open`) or a stream (e.g. from
:func:`urllib2.urlopen` or :class:`cStringIO.StringIO`)
*mode*
{'r', 'w', 'a'} (optional),
Open in r(ead), w(rite) or a(ppen) mode. More complicated
modes ('r+', 'w+', ...) are not supported; only the first letter of
`mode` is used and thus any additional modifiers are silently ignored.
*reset*
bool (optional),
try to read (`mode` 'r') the stream from the start
:Returns:
file-like object
.. SeeAlso::
:func:`openany` to be used with the :keyword:`with` statement.
"""
handlers = {'bz2': bz2_open, 'gz': gzip.open, '': open}
if mode.startswith('r'):
if isstream(datasource):
stream = datasource
try:
filename = str(stream.name) # maybe that does not always work?
except AttributeError:
filename = "<stream>"
if reset:
try:
stream.reset()
except (AttributeError, IOError):
try:
stream.seek(0)
except (AttributeError, IOError):
warnings.warn("Stream {0}: not guaranteed to be at the beginning."
"".format(filename),
category=StreamWarning)
else:
stream = None
filename = datasource
for ext in ('bz2', 'gz', ''): # file == '' should be last
openfunc = handlers[ext]
stream = _get_stream(datasource, openfunc, mode=mode)
if stream is not None:
break
if stream is None:
raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename))
elif mode.startswith('w') or mode.startswith('a'): # append 'a' not tested...
if isstream(datasource):
stream = datasource
try:
filename = str(stream.name) # maybe that does not always work?
except AttributeError:
filename = "<stream>"
else:
stream = None
filename = datasource
name, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if not ext in ('bz2', 'gz'):
ext = '' # anything else but bz2 or gz is just a normal file
openfunc = handlers[ext]
stream = openfunc(datasource, mode=mode)
if stream is None:
raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename))
else:
raise NotImplementedError("Sorry, mode={mode!r} is not implemented for {datasource!r}".format(**vars()))
try:
stream.name = filename
except (AttributeError, TypeError):
pass # can't set name (e.g. cStringIO.StringIO)
return stream
|
def _get_stream(filename, openfunction=open, mode='r'):
"""Return open stream if *filename* can be opened with *openfunction* or else ``None``."""
try:
stream = openfunction(filename, mode=mode)
except (IOError, OSError) as err:
# An exception might be raised due to two reasons, first the openfunction is unable to open the file, in this
# case we have to ignore the error and return None. Second is when openfunction can't open the file because
# either the file isn't there or the permissions don't allow access.
if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']:
six.reraise(*sys.exc_info())
return None
if mode.startswith('r'):
# additional check for reading (eg can we uncompress) --- is this needed?
try:
stream.readline()
except IOError:
stream.close()
stream = None
except:
stream.close()
raise
else:
stream.close()
stream = openfunction(filename, mode=mode)
return stream
|
def hasmethod(obj, m):
"""Return ``True`` if object *obj* contains the method *m*.
.. versionadded:: 0.7.1
"""
return hasattr(obj, m) and callable(getattr(obj, m))
|
def isstream(obj):
"""Detect if `obj` is a stream.
We consider anything a stream that has the methods
- ``close()``
and either set of the following
- ``read()``, ``readline()``, ``readlines()``
- ``write()``, ``writeline()``, ``writelines()``
:Arguments:
*obj*
stream or str
:Returns:
*bool*, ``True`` if `obj` is a stream, ``False`` otherwise
.. SeeAlso::
:mod:`io`
.. versionadded:: 0.7.1
"""
signature_methods = ("close",)
alternative_methods = (
("read", "readline", "readlines"),
("write", "writeline", "writelines"))
# Must have ALL the signature methods
for m in signature_methods:
if not hasmethod(obj, m):
return False
# Must have at least one complete set of alternative_methods
alternative_results = [
numpy.all([hasmethod(obj, m) for m in alternatives])
for alternatives in alternative_methods]
return numpy.any(alternative_results)
|
def convert_aa_code(x):
"""Converts between 3-letter and 1-letter amino acid codes."""
if len(x) == 1:
return amino_acid_codes[x.upper()]
elif len(x) == 3:
return inverse_aa_codes[x.upper()]
else:
raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, "
"not %r" % x)
|
def in_dir(directory, create=True):
"""Context manager to execute a code block in a directory.
* The directory is created if it does not exist (unless
create=False is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
"""
startdir = os.getcwd()
try:
try:
os.chdir(directory)
logger.debug("Working in {directory!r}...".format(**vars()))
except OSError as err:
if create and err.errno == errno.ENOENT:
os.makedirs(directory)
os.chdir(directory)
logger.info("Working in {directory!r} (newly created)...".format(**vars()))
else:
logger.exception("Failed to start working in {directory!r}.".format(**vars()))
raise
yield os.getcwd()
finally:
os.chdir(startdir)
|
def realpath(*args):
"""Join all args and return the real path, rooted at /.
Expands ``~`` and environment variables such as :envvar:`$HOME`.
Returns ``None`` if any of the args is none.
"""
if None in args:
return None
return os.path.realpath(
os.path.expandvars(os.path.expanduser(os.path.join(*args))))
|
def find_first(filename, suffices=None):
"""Find first *filename* with a suffix from *suffices*.
:Arguments:
*filename*
base filename; this file name is checked first
*suffices*
list of suffices that are tried in turn on the root of *filename*; can contain the
ext separator (:data:`os.path.extsep`) or not
:Returns: The first match or ``None``.
"""
# struct is not reliable as it depends on qscript so now we just try everything...
root,extension = os.path.splitext(filename)
if suffices is None:
suffices = []
else:
suffices = withextsep(suffices)
extensions = [extension] + suffices # native name is first
for ext in extensions:
fn = root + ext
if os.path.exists(fn):
return fn
return None
|
def withextsep(extensions):
"""Return list in which each element is guaranteed to start with :data:`os.path.extsep`."""
def dottify(x):
if x.startswith(os.path.extsep):
return x
return os.path.extsep + x
return [dottify(x) for x in asiterable(extensions)]
|
def iterable(obj):
"""Returns ``True`` if *obj* can be iterated over and is *not* a string."""
if isinstance(obj, string_types):
return False # avoid iterating over characters of a string
if hasattr(obj, 'next'):
return True # any iterator will do
try:
len(obj) # anything else that might work
except TypeError:
return False
return True
|
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
|
def unlink_gmx_backups(*args):
"""Unlink (rm) all backup files corresponding to the listed files."""
for path in args:
dirname, filename = os.path.split(path)
fbaks = glob.glob(os.path.join(dirname, '#'+filename+'.*#'))
for bak in fbaks:
unlink_f(bak)
|
def mkdir_p(path):
"""Create a directory *path* with subdirs but do not complain if it exists.
This is like GNU ``mkdir -p path``.
"""
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST:
raise
|
def cat(f=None, o=None):
"""Concatenate files *f*=[...] and write to *o*"""
# need f, o to be compatible with trjcat and eneconv
if f is None or o is None:
return
target = o
infiles = asiterable(f)
logger.debug("cat {0!s} > {1!s} ".format(" ".join(infiles), target))
with open(target, 'w') as out:
rc = subprocess.call(['cat'] + infiles, stdout=out)
if rc != 0:
msg = "failed with return code {0:d}: cat {1!r} > {2!r} ".format(rc, " ".join(infiles), target)
logger.exception(msg)
raise OSError(errno.EIO, msg, target)
|
def activate_subplot(numPlot):
"""Make subplot *numPlot* active on the canvas.
Use this if a simple ``subplot(numRows, numCols, numPlot)``
overwrites the subplot instead of activating it.
"""
# see http://www.mail-archive.com/[email protected]/msg07156.html
from pylab import gcf, axes
numPlot -= 1 # index is 0-based, plots are 1-based
return axes(gcf().get_axes()[numPlot])
|
def remove_legend(ax=None):
"""Remove legend for axes or gca.
See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html
"""
from pylab import gca, draw
if ax is None:
ax = gca()
ax.legend_ = None
draw()
|
def number_pdbs(*args, **kwargs):
"""Rename pdbs x1.pdb ... x345.pdb --> x0001.pdb ... x0345.pdb
:Arguments:
- *args*: filenames or glob patterns (such as "pdb/md*.pdb")
- *format*: format string including keyword *num* ["%(num)04d"]
"""
format = kwargs.pop('format', "%(num)04d")
name_format = "%(prefix)s" + format +".%(suffix)s"
for f in itertools.chain.from_iterable(map(glob.glob, args)):
m = NUMBERED_PDB.search(f)
if m is None:
continue
num = int(m.group('NUMBER'))
prefix = m.group('PREFIX')
suffix = m.group('SUFFIX')
newname = name_format % vars()
logger.info("Renaming {f!r} --> {newname!r}".format(**vars()))
try:
os.rename(f, newname)
except OSError:
logger.exception("renaming failed")
|
def _init_filename(self, filename=None, ext=None):
"""Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
"""
extension = ext or self.default_extension
filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)
#: Current full path of the object for reading and writing I/O.
self.real_filename = os.path.realpath(filename)
|
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith(os.extsep):
ext = ext[1:] # strip a dot to avoid annoying mistakes
if ext != "":
filename = filename + os.extsep + ext
return filename
|
def check_file_exists(self, filename, resolve='exception', force=None):
"""If a file exists then continue with the action specified in ``resolve``.
``resolve`` must be one of
"ignore"
always return ``False``
"indicate"
return ``True`` if it exists
"warn"
indicate and issue a :exc:`UserWarning`
"exception"
raise :exc:`IOError` if it exists
Alternatively, set *force* for the following behaviour (which
ignores *resolve*):
``True``
same as *resolve* = "ignore" (will allow overwriting of files)
``False``
same as *resolve* = "exception" (will prevent overwriting of files)
``None``
ignored, do whatever *resolve* says
"""
def _warn(x):
msg = "File {0!r} already exists.".format(x)
logger.warn(msg)
warnings.warn(msg)
return True
def _raise(x):
msg = "File {0!r} already exists.".format(x)
logger.error(msg)
raise IOError(errno.EEXIST, x, msg)
solutions = {'ignore': lambda x: False, # file exists, but we pretend that it doesn't
'indicate': lambda x: True, # yes, file exists
'warn': _warn,
'warning': _warn,
'exception': _raise,
'raise': _raise,
}
if force is True:
resolve = 'ignore'
elif force is False:
resolve = 'exception'
if not os.path.isfile(filename):
return False
else:
return solutions[resolve](filename)
|
def infix_filename(self, name, default, infix, ext=None):
"""Unless *name* is provided, insert *infix* before the extension *ext* of *default*."""
if name is None:
p, oldext = os.path.splitext(default)
if ext is None:
ext = oldext
if ext.startswith(os.extsep):
ext = ext[1:]
name = self.filename(p+infix, ext=ext)
return name
|
def strftime(self, fmt="%d:%H:%M:%S"):
"""Primitive string formatter.
The only directives understood are the following:
============ ==========================
Directive meaning
============ ==========================
%d day as integer
%H hour [00-23]
%h hours including days
%M minute as integer [00-59]
%S second as integer [00-59]
============ ==========================
"""
substitutions = {
"%d": str(self.days),
"%H": "{0:02d}".format(self.dhours),
"%h": str(24*self.days + self.dhours),
"%M": "{0:02d}".format(self.dminutes),
"%S": "{0:02d}".format(self.dseconds),
}
s = fmt
for search, replacement in substitutions.items():
s = s.replace(search, replacement)
return s
|
def start_logging(logfile="gromacs.log"):
"""Start logging of messages to file and console.
The default logfile is named ``gromacs.log`` and messages are
logged with the tag *gromacs*.
"""
from . import log
log.create("gromacs", logfile=logfile)
logging.getLogger("gromacs").info("GromacsWrapper %s STARTED logging to %r",
__version__, logfile)
|
def stop_logging():
"""Stop logging to logfile and console."""
from . import log
logger = logging.getLogger("gromacs")
logger.info("GromacsWrapper %s STOPPED logging", get_version())
log.clear_handlers(logger)
|
def filter_gromacs_warnings(action, categories=None):
"""Set the :meth:`warnings.simplefilter` to *action*.
*categories* must be a list of warning classes or strings.
``None`` selects the defaults, :data:`gromacs.less_important_warnings`.
"""
if categories is None:
categories = less_important_warnings
for c in categories:
try:
w = globals()[c]
except KeyError:
w = c
if not issubclass(w, Warning):
raise TypeError("{0!r} is neither a Warning nor the name of a Gromacs warning.".format(c))
warnings.simplefilter(action, category=w)
|
def tool_factory(clsname, name, driver, base=GromacsCommand):
""" Factory for GromacsCommand derived types. """
clsdict = {
'command_name': name,
'driver': driver,
'__doc__': property(base._get_gmx_docs)
}
return type(clsname, (base,), clsdict)
|
def find_executables(path):
""" Find executables in a path.
Searches executables in a directory excluding some know commands
unusable with GromacsWrapper.
:param path: dirname to search for
:return: list of executables
"""
execs = []
for exe in os.listdir(path):
fullexe = os.path.join(path, exe)
if (os.access(fullexe, os.X_OK) and not os.path.isdir(fullexe) and
exe not in ['GMXRC', 'GMXRC.bash', 'GMXRC.csh', 'GMXRC.zsh',
'demux.pl', 'xplor2gmx.pl']):
execs.append(exe)
return execs
|
def load_v5_tools():
""" Load Gromacs 2018/2016/5.x tools automatically using some heuristic.
Tries to load tools (1) using the driver from configured groups (2) and
falls back to automatic detection from ``GMXBIN`` (3) then to rough guesses.
In all cases the command ``gmx help`` is ran to get all tools available.
:return: dict mapping tool names to GromacsCommand classes
"""
logger.debug("Loading 2018/2016/5.x tools...")
drivers = config.get_tool_names()
if len(drivers) == 0 and 'GMXBIN' in os.environ:
drivers = find_executables(os.environ['GMXBIN'])
if len(drivers) == 0 or len(drivers) > 4:
drivers = ['gmx', 'gmx_d', 'gmx_mpi', 'gmx_mpi_d']
append = config.cfg.getboolean('Gromacs', 'append_suffix', fallback=True)
tools = {}
for driver in drivers:
suffix = driver.partition('_')[2]
try:
out = subprocess.check_output([driver, '-quiet', 'help',
'commands'])
for line in out.splitlines()[5:-1]:
line = str(line.decode('ascii')) # Python 3: byte string -> str, Python 2: normal string
if line[4] != ' ':
name = line[4:line.index(' ', 4)]
fancy = make_valid_identifier(name)
if suffix and append:
fancy = '{0!s}_{1!s}'.format(fancy, suffix)
tools[fancy] = tool_factory(fancy, name, driver)
except (subprocess.CalledProcessError, OSError):
pass
if not tools:
errmsg = "Failed to load 2018/2016/5.x tools (tried drivers: {})".format(drivers)
logger.debug(errmsg)
raise GromacsToolLoadingError(errmsg)
logger.debug("Loaded {0} v5 tools successfully!".format(len(tools)))
return tools
|
def load_v4_tools():
""" Load Gromacs 4.x tools automatically using some heuristic.
Tries to load tools (1) in configured tool groups (2) and fails back to
automatic detection from ``GMXBIN`` (3) then to a prefilled list.
Also load any extra tool configured in ``~/.gromacswrapper.cfg``
:return: dict mapping tool names to GromacsCommand classes
"""
logger.debug("Loading v4 tools...")
names = config.get_tool_names()
if len(names) == 0 and 'GMXBIN' in os.environ:
names = find_executables(os.environ['GMXBIN'])
if len(names) == 0 or len(names) > len(V4TOOLS) * 4:
names = list(V4TOOLS)
names.extend(config.get_extra_tool_names())
tools = {}
for name in names:
fancy = make_valid_identifier(name)
tools[fancy] = tool_factory(fancy, name, None)
if not tools:
errmsg = "Failed to load v4 tools"
logger.debug(errmsg)
raise GromacsToolLoadingError(errmsg)
logger.debug("Loaded {0} v4 tools successfully!".format(len(tools)))
return tools
|
def merge_ndx(*args):
""" Takes one or more index files and optionally one structure file and
returns a path for a new merged index file.
:param args: index files and zero or one structure file
:return: path for the new merged index file
"""
ndxs = []
struct = None
for fname in args:
if fname.endswith('.ndx'):
ndxs.append(fname)
else:
if struct is not None:
raise ValueError("only one structure file supported")
struct = fname
fd, multi_ndx = tempfile.mkstemp(suffix='.ndx', prefix='multi_')
os.close(fd)
atexit.register(os.unlink, multi_ndx)
if struct:
make_ndx = registry['Make_ndx'](f=struct, n=ndxs, o=multi_ndx)
else:
make_ndx = registry['Make_ndx'](n=ndxs, o=multi_ndx)
_, _, _ = make_ndx(input=['q'], stdout=False, stderr=False)
return multi_ndx
|
def read(self, filename=None):
"""Read and parse index file *filename*."""
self._init_filename(filename)
data = odict()
with open(self.real_filename) as ndx:
current_section = None
for line in ndx:
line = line.strip()
if len(line) == 0:
continue
m = self.SECTION.match(line)
if m:
current_section = m.group('name')
data[current_section] = [] # can fail if name not legal python key
continue
if current_section is not None:
data[current_section].extend(map(int, line.split()))
super(NDX,self).update(odict([(name, self._transform(atomnumbers))
for name, atomnumbers in data.items()]))
|
def write(self, filename=None, ncol=ncol, format=format):
"""Write index file to *filename* (or overwrite the file that the index was read from)"""
with open(self.filename(filename, ext='ndx'), 'w') as ndx:
for name in self:
atomnumbers = self._getarray(name) # allows overriding
ndx.write('[ {0!s} ]\n'.format(name))
for k in range(0, len(atomnumbers), ncol):
line = atomnumbers[k:k+ncol].astype(int) # nice formatting in ncol-blocks
n = len(line)
ndx.write((" ".join(n*[format])+'\n') % tuple(line))
ndx.write('\n')
|
def ndxlist(self):
"""Return a list of groups in the same format as :func:`gromacs.cbook.get_ndx_groups`.
Format:
[ {'name': group_name, 'natoms': number_atoms, 'nr': # group_number}, ....]
"""
return [{'name': name, 'natoms': len(atomnumbers), 'nr': nr+1} for
nr,(name,atomnumbers) in enumerate(self.items())]
|
def join(self, *groupnames):
"""Return an index group that contains atoms from all *groupnames*.
The method will silently ignore any groups that are not in the
index.
**Example**
Always make a solvent group from water and ions, even if not
all ions are present in all simulations::
I['SOLVENT'] = I.join('SOL', 'NA+', 'K+', 'CL-')
"""
return self._sum([self[k] for k in groupnames if k in self])
|
def break_array(a, threshold=numpy.pi, other=None):
"""Create a array which masks jumps >= threshold.
Extra points are inserted between two subsequent values whose
absolute difference differs by more than threshold (default is
pi).
Other can be a secondary array which is also masked according to
*a*.
Returns (*a_masked*, *other_masked*) (where *other_masked* can be
``None``)
"""
assert len(a.shape) == 1, "Only 1D arrays supported"
if other is not None and a.shape != other.shape:
raise ValueError("arrays must be of identical shape")
# jump occurs after the index in break
breaks = numpy.where(numpy.abs(numpy.diff(a)) >= threshold)[0]
# insert a blank after
breaks += 1
# is this needed?? -- no, but leave it here as a reminder
#f2 = numpy.diff(a, 2)
#up = (f2[breaks - 1] >= 0) # >0: up, <0: down
# sort into up and down breaks:
#breaks_up = breaks[up]
#breaks_down = breaks[~up]
# new array b including insertions for all the breaks
m = len(breaks)
b = numpy.empty((len(a) + m))
# calculate new indices for breaks in b, taking previous insertions into account
b_breaks = breaks + numpy.arange(m)
mask = numpy.zeros_like(b, dtype=numpy.bool)
mask[b_breaks] = True
b[~mask] = a
b[mask] = numpy.NAN
if other is not None:
c = numpy.empty_like(b)
c[~mask] = other
c[mask] = numpy.NAN
ma_c = numpy.ma.array(c, mask=mask)
else:
ma_c = None
return numpy.ma.array(b, mask=mask), ma_c
|
def write(self, filename=None):
"""Write array to xvg file *filename* in NXY format.
.. Note:: Only plain files working at the moment, not compressed.
"""
self._init_filename(filename)
with utilities.openany(self.real_filename, 'w') as xvg:
xvg.write("# xmgrace compatible NXY data file\n"
"# Written by gromacs.formats.XVG()\n")
xvg.write("# :columns: {0!r}\n".format(self.names))
for xyy in self.array.T:
xyy.tofile(xvg, sep=" ", format="%-8s") # quick and dirty ascii output...--no compression!
xvg.write('\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.