content
stringlengths 5
1.05M
|
---|
from django import forms
from django.core import validators
def check_for_z(val):
if val[0].lower() != 'z':
raise forms.ValidationError('man this is wrong use Z')
print('not using z')
class my_form(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={'class' : 'inp'}),validators=[check_for_z])
email = forms.EmailField() # widget=forms.TextInput(attrs={'class' : 'inp'})
vrfmail = forms.EmailField(required=True)
text = forms.CharField(widget=forms.Textarea(attrs = {'class':'inp'}))
bot = forms.CharField(required=False,widget =forms.HiddenInput,validators=[
validators.MaxLengthValidator(0)
])
# def clean_bot(self):
# bot = self.cleaned_data['bot']
# if len(bot)>0 :
# raise forms.ValidationError(' hi man error ')
# return bot
# این راه حل برای من raise error نداد اصلن
# ولی راه دوم با vlidators هست
# البته توی راه دوم هم ارورر فقط توی کنسول میده
# راه سوم ولدیت کردن ایمیل
def clean(self):
# برای اینکه بفهمد فرم که یک ولیدیتور است از
# clean نام استفاده میکنیم بدون آندر اسگور
all_clean_data = super().clean()
# متد بسار مهم کلین که همیشه برای
# is_valid()
# میشود تا بر اساس آن دیتا را لیبل کنیم
em = all_clean_data['email']
vm = all_clean_data['vrfmail']
if em != vm :
raise forms.ValidationError("two emails must be identical")
|
from django.core.cache import cache
import logging, sys, csv, os, math, re, string, subprocess, time, stat
from plotty import settings
from plotty.results.Utilities import present_value, present_value_csv, scenario_hash, length_cmp, t_quantile
from plotty.results.Exceptions import LogTabulateStarted, PipelineError
from plotty.results.CSVParser import parse_csv
import tempfile
import StringIO, urllib
class Messages(object):
def __init__(self):
self.info_messages = list()
self.warn_messages = list()
def extend(self, other):
self.info_messages.extend(other.info_messages)
self.warn_messages.extend(other.warn_messages)
def info(self, text, extra=""):
self.info_messages.append((text, extra))
def warn(self, text, extra=""):
self.warn_messages.append((text, extra))
def empty(self):
return (len(self.info_messages) + len(self.warn_messages)) == 0
def infos(self):
return self.info_messages
def warnings(self):
return self.warn_messages
class DataTable:
""" The core data structure. DataTable has one property, DataTable.rows.
This is an array of DataRow objects, one per scenario in the file(s)
being used.
A DataTable is constructed by parsing the given list of CSV files.
Django's caching settings are used to try to cache the parsed CSV
data.
"""
def __init__(self, logs, wait=True):
""" Creates a new DataTable by reading each CSV file provided, or
loading them from cache if they are present. This routine will
also check whether the log files specified have been modified
since they were cached (based on last modified date), and if so,
will expire the cache.
logs: an array of paths to CSV files, relative to
settings.BM_LOG_DIR.
wait: if True, we will wait for the logs to be tabulated. if not,
depending on the size of the logs, we will spawn a subprocess
and wait.
"""
self.rows = []
self.scenarioColumns = set()
self.valueColumns = set()
self.messages = Messages()
self.lastModified = 0
for i, log in enumerate(logs):
dir_path = os.path.join(settings.BM_LOG_DIR, log)
cached_vals = cache.get("LOGFILE-" + log)
file_last_modified = os.path.getmtime(dir_path)
if cached_vals is None or cached_vals['last_modified'] < file_last_modified:
# cache is invalid, we need to reload
messages = Messages()
try:
rows, lastModified, scenarioColumns, valueColumns = self.loadLog(log, wait, messages)
except LogTabulateStarted as e:
e.index = i
e.length = len(logs)
raise e
# store the results in the cache
ret = cache.set("LOGFILE-" + log, {
'last_modified': lastModified,
'rows': rows,
'scenarioColumns': scenarioColumns,
'valueColumns': valueColumns,
'messages': messages})
logging.debug('For log %s: cache empty or expired, stored %d rows to cache.' % (log, len(rows)))
else:
lastModified = cached_vals['last_modified']
rows = cached_vals['rows']
scenarioColumns = cached_vals['scenarioColumns']
valueColumns = cached_vals['valueColumns']
messages = cached_vals['messages']
logging.debug('For log %s: loaded %d rows from cache (dir last modified: %d, cache last modified: %d)' % (log, len(rows), file_last_modified, cached_vals['last_modified']))
self.rows.extend(rows)
self.scenarioColumns |= scenarioColumns
self.valueColumns |= valueColumns
self.messages.extend(messages)
if self.lastModified < lastModified:
self.lastModified = lastModified
self.valueColumnsDisplay = dict([(x,x) for x in self.valueColumns])
def __iter__(self):
""" Lets us do `for row in datatable` instead of
`for row in datatable.rows`.
"""
return iter(self.rows)
def loadLog(self, log, wait, messages):
""" Load a log file directly (services the cache)
log: a relative path to the log file to be parsed.
wait: should we wait for the parser (true), or return immediately
while it runs in the background (false)
"""
# is this a log directory, or a plain csv file?
log_path = os.path.join(settings.BM_LOG_DIR, log)
lastModified = os.path.getmtime(log_path)
if os.path.isdir(log_path):
rows = self.loadLogDirectory(log, wait)
else:
rows = parse_csv(log_path)
# make column names safe
num_unnamed_columns = [0]
safe_chars = frozenset('_.')
def make_column_name_safe(k, tag):
if any(c.isalnum() or c in safe_chars for c in k):
newk = ''.join(c if c.isalnum() or c in safe_chars else '.' for c in k)
if newk[0].isdigit():
newk = "_" + newk
else:
newk = tag + str(num_unnamed_columns[0])
num_unnamed_columns[0] += 1
return newk
clean_rows = []
scenario_column_names = {'logfile': 'logfile'}
value_column_names = {}
duplicate_value_columns = set()
nonnumeric_value_columns = set()
for row in rows:
# validate scenario keys
for k in row.scenario.keys():
# sanitise the column name
if k not in scenario_column_names:
scenario_column_names[k] = make_column_name_safe(k, "scenario_")
newk = scenario_column_names[k]
# rename the column in the row if necessary
if k != newk:
row.scenario[newk] = row.scenario[k]
del row.scenario[k]
# add the log's name to its scenario columns; force cast from unicode
row.scenario['logfile'] = str(log)
# validate value keys
value = {}
for k, v in row.value:
# if the value isn't numeric, we're not going to use it, so
# need to do nothing for this (k, v)
try:
v = float(v)
except ValueError:
if k not in nonnumeric_value_columns:
messages.warn("Non-numeric values for value column '%s'." % k,
"For example, scenario %s has %s value '%s'." % (
row.scenario, k, v))
nonnumeric_value_columns.add(k)
continue
# sanitise the column name
if k not in value_column_names:
value_column_names[k] = make_column_name_safe(k, "value_")
newk = value_column_names[k]
# check for duplicates that are distinct (we let repeated values
# through silently)
if newk in value and v != value[newk]:
# only output a warning once per column
if newk not in duplicate_value_columns:
messages.warn("Duplicate values for value column '%s'." % k,
"For example, scenario %s has %s values %s and %s." % (
row.scenario, k, value[newk], v))
duplicate_value_columns.add(newk)
# write the value into this row; we do this regardless of
# duplicates, so the last value always wins
value[newk] = v
# add the row
clean_rows.append(DataRow(row.scenario, value))
# summarise what we've done
logging.debug('Parsed %d results from log %s' % (len(clean_rows), log))
scenario_columns = set(scenario_column_names.values())
value_columns = set(value_column_names.values())
return clean_rows, lastModified, scenario_columns, value_columns
def loadLogDirectory(self, log, wait):
""" Tabulate a log directory into the CSV cache """
# path to the log directory
log_path = os.path.join(settings.BM_LOG_DIR, log)
# the tabulation script will output a csv file into the csv directory
csv_dir = os.path.join(settings.CACHE_ROOT, "csv")
if not os.path.exists(csv_dir):
os.mkdir(csv_dir)
csv_file = os.path.join(csv_dir, log + ".csv.gz")
# we need to re-parse the log file if the csv doesn't yet exist or the
# log directory has changed since the csv was written
lastModified = os.path.getmtime(log_path)
if not os.path.exists(csv_file) or os.path.getmtime(csv_file) < lastModified:
logging.debug("Retabulating CSV for " + log_path + " since CSV was out of date or non-existent")
if not wait:
# we're not going to wait for the parser; run it in the background
pid = subprocess.Popen([settings.LOGPARSER_PYTHON, settings.TABULATE_EXECUTABLE, log_path, csv_file, settings.CACHE_ROOT]).pid
raise LogTabulateStarted(log, pid)
else:
# call the parser directly
if settings.USE_NEW_LOGPARSER:
from plotty.results.LogParser import tabulate_log_folder
tabulate_log_folder(log_path, csv_file)
else:
from plotty.results.Tabulate import extract_csv
extract_csv(log_path, csv_file)
else:
logging.debug("Valid CSV already exists for " + log_path + ", skipping retabulation.")
# parse the resulting CSV
rows = parse_csv(csv_file)
return rows
def headers(self):
""" Returns the headers that would be used to output a table of
this data as two lists - scenario headers and value headers.
"""
scenarios = set()
values = set()
values_with_ci = set()
# XXX TODO: Why do we need to loop here? Can't we just use
# self.valueColumns and self.scenarioColumns, assuming they're being
# kept up to date?
for row in self.rows:
for key in row.scenario.iterkeys():
if key not in scenarios:
scenarios.add(key)
for key,val in row.values.items():
if key not in values:
values.add(key)
if isinstance(val, DataAggregate):
values_with_ci.add(key)
s_list = list(scenarios)
s_list.sort()
v_list = list(values)
v_list.sort()
vci_list = list(values_with_ci)
vci_list.sort()
return s_list, v_list, vci_list
def selectValueColumns(self, vals, derivedVals):
""" Selects the specified set of value columns and throws away all
others from each row in the table.
vals: a list of value columns to keep.
"""
vals = set(map(lambda x: str(x), vals))
derivedVals = set(map(lambda x: str(x), derivedVals))
derived_vals = []
subst_variable = lambda i: "s%02i" % i
value_columns_lower = dict([(str.lower(s), s) for s in self.valueColumns])
# Super hack: this avoids e.g. 'time.gc' being interpreted as referring
# to the 'time' column, thereby creating an invalid expression. So, we
# sort the possible keys by length, so 'time.gc' is always tested before
# 'time'.
value_columns_keys = value_columns_lower.keys()
value_columns_keys.sort(length_cmp)
for expr in derivedVals:
# Try to compile the derived columns
val = str.lower(str(expr))
# Replace the value column tokens in the expression with a simple
# substitution key. We'll also prepare a simple exemplar row to make
# sure the expression evaluates cleanly at this point.
statement = val
subst_key = {}
exemplar_row = {}
for i,valid_val in enumerate(value_columns_keys):
if statement.find(valid_val) > -1:
var = subst_variable(i)
statement = statement.replace(valid_val, var)
subst_key[var] = value_columns_lower[valid_val]
exemplar_row[var] = 1.0
# It's a clean and reasonable expression, prepare it properly.
# This is a hack that lets us be more flexible with value column
# names. A number of column names (particularly stats outputs
# from MMTk) are invalid python identifiers.
try:
# This is safe - compile won't evaluate the code, just parse it
compiled = compile(statement, statement, 'eval')
except SyntaxError:
raise PipelineError("The expression '%s' is not a valid Python expression" % expr)
except ValueError:
raise PipelineError("The expression '%s' is not a valid Python expression" % expr)
# Now try evaluating it, without access to the standard library
try:
v = eval(compiled, {'__builtins__': None}, exemplar_row)
except:
raise PipelineError("The expression '%s' is not a valid Python expression" % expr)
derived_vals.append((expr, compiled, subst_key.copy()))
# From now on, the derived value cols should be treated exactly like
# any other value column
vals.add(expr)
for row in self.rows:
# Calculate derived cols first, since they might not be selected
# in their own right.
for name,code,subst in derived_vals:
# Calculate the substitution dictionary
evaled_subst = {}
invalid = False
for token,key in subst.items():
if key not in row.values:
invalid = True
break
else:
evaled_subst[token] = row.values[key]
if invalid:
continue
# Evaluate the code with none of the builtin functions available.
# This means none of the python builtin methods, which include the
# import statement, are available to the code. This is pretty good
# security, but does restrict us somewhat in mathematics.
try:
row.values[name] = eval(code, {'__builtins__': None}, evaled_subst)
except:
continue
# Now select the value columns we're after
for (key,val) in row.values.items():
if key not in vals:
del row.values[key]
self.valueColumns = vals
self.valueColumnsDisplay = dict([(x,x if x not in self.valueColumnsDisplay else self.valueColumnsDisplay[x]) for x in vals])
def selectScenarioColumns(self, cols):
""" Selects the specified set of scenario columns and throws away all
others from each row in the table.
cols: a list of scenario columns to keep.
"""
for row in self.rows:
for (key,val) in row.scenario.items():
if key not in cols:
del row.scenario[key]
self.scenarioColumns = set(cols)
def getScenarioValues(self):
scenarioValues = {}
for row in self.rows:
for col in row.scenario:
if col not in scenarioValues:
scenarioValues[col] = set()
scenarioValues[col].add(row.scenario[col])
for k in scenarioValues.iterkeys():
valuesList = list(scenarioValues[k])
formattedValues = []
otherValues = []
for v in valuesList:
if isinstance(v, ScenarioValue):
formattedValues.append(v)
else:
otherValues.append(v)
formattedValues.sort(key=lambda fv: fv.index)
otherValues.sort()
formattedValues.extend(otherValues)
scenarioValues[k] = formattedValues
return scenarioValues
def renderToTable(self):
""" Renders the values in this data table into a HTML table. """
scenarios, values, _ = self.headers()
output = '<table class="results"><thead>'
for name in scenarios:
output += '<th class="scenario-header">' + name + '</th>'
for name in values:
output += '<th class="value-header">' + name + '</th>'
output += '</thead><tbody>'
for row in self.rows:
s = '<tr>'
for key in scenarios:
if key in row.scenario:
if isinstance(row.scenario[key], ScenarioValue):
s+= '<td title="' + row.scenario[key].value + '">' + row.scenario[key].display + '</td>'
else:
s+= '<td>' + str(row.scenario[key]) + '</td>'
else:
s+= '<td>*</td>'
for key in values:
if key in row.values:
s += '<td>' + present_value(row.values[key]) + '</td>'
else:
s += '<td>*</td>'
s += '</tr>'
output += s
output += '</tbody></table>'
return output
def renderToCSV(self):
scenarios, values, values_with_ci = self.headers()
scenarios.sort(key=str.lower)
values.sort(key=str.lower)
output = ''
for name in scenarios:
output += '"' + name + '",'
for name in values:
output += '"' + name + '",'
if name in values_with_ci:
output += '"' + name + '.' + str(settings.CONFIDENCE_LEVEL * 100) + '%-CI.lowerBound",'
output += '"' + name + '.' + str(settings.CONFIDENCE_LEVEL * 100) + '%-CI.upperBound",'
if len(output)>0 and output[-1] == ',':
output = output[:-1]
output += "\r\n"
for row in self.rows:
for key in scenarios:
if key in row.scenario:
output += '"' + str(row.scenario[key]) + '",'
else:
output += '"",'
for key in values:
if key in row.values:
output += present_value_csv(key, row.values[key], values_with_ci) + ','
else:
if key in values_with_ci:
output += '"","",""'
else:
output += '"",'
if output[-1] == ',':
output = output[:-1]
output += "\r\n"
return output
class DataRow:
""" A simple object that holds a row of data. The data is stored in two
dictionaries - DataRow.scenario for the scenario columns, and
DataRow.values for the value columns.
"""
def __init__(self, scenario=None, values=None):
if scenario is None:
scenario = {}
if values is None:
values = {}
self.values = values
self.scenario = scenario
def __repr__(self):
return '(DataRow scenario=%s values=%s)' % (self.scenario, self.values)
class ScenarioValue:
def __init__(self, indexOrOther, value=None, display=None, group = None, color = None):
if not value is None:
self.index = indexOrOther
self.value = value
self.display = display
self.group = group
self.color = color
elif isinstance(indexOrOther, ScenarioValue):
self.index = indexOrOther.index
self.value = indexOrOther.value
self.display = indexOrOther.display
self.group = indexOrOther.group
self.color = indexOrOther.color
else:
self.index = None
self.value = str(indexOrOther)
self.display = str(indexOrOther)
self.group = None
self.color = None
def isFormatted():
return not self.index is None
def __str__(self):
return str(self.display)
def __float__(self):
raise PipelineError("ScenarioValue shouldn't treated as a float")
def __ne__(self, other):
return not (self == other)
def __eq__(self, other):
if isinstance(other, ScenarioValue):
return self.value == other.value
return self.value == other
def __cmp__(self, other):
raise PipelineError("ScenarioValues shouldn't be compared directly")
def __hash__(self):
return hash(self.value)
class DataAggregate:
""" Holds an aggregate of values that were mutliple rows but have been
condensed into one as part of an Aggregate block. This object can
report the mean or geomean of those values, as well as their minimum
and maximum, and standard deviation and a confidence interval (with
confidence decided by settings.CONFIDENCE_LEVEL). It is also
possible to divide two DataAggregates (generally for normalisation),
in which case relevant statistical techniques are used to determine
the new confidence interval and standard deviation.
"""
def __init__(self, newType):
""" Create a new DataAggregate of the specified type.
newType: either 'mean' or 'geomean', the type of aggregate
reported by this object.
"""
self.type = newType
self._isValid = False
self._values = []
# Private methods
def _calculate(self):
""" Calculates the summary statistics for data in self._values. This
method only does calculations for a single variable - calculations
for a compound variable (A + B, A / B, etc, where A and B are
DataAggregates) should be handled by the appropriate operator
overload below.
"""
valMin = float('+inf')
valMax = float('-inf')
valMean = 0.0
valM2 = 0.0
valLogSum = 0.0
n = 0
allow_cis = len(self._values) > 1
for val in self._values:
# We can also aggregate sets of DataAggregates
if isinstance(val, DataAggregate):
val = val.value()
allow_cis = False
n += 1
if val < valMin:
valMin = val
if val > valMax:
valMax = val
if self.type == 'geomean':
if valLogSum is None:
continue
# If any value is zero, the geomean is also zero
if val == 0:
valLogSum = None
continue
valLogSum += math.log(val)
else:
delta = val - valMean
valMean += delta/n
valM2 += delta * (val - valMean)
self._min = valMin
self._max = valMax
if self.type == 'geomean':
if valLogSum is not None:
self._value = math.exp(valLogSum / n)
else:
self._value = 0.0
self._stdev = 0
self._ciUp = self._ciDown = float('nan')
elif self.type == 'mean':
self._value = valMean
if allow_cis:
self._stdev = math.sqrt(valM2 / (n - 1))
ciDelta = t_quantile(1 - settings.CONFIDENCE_LEVEL, n-1) * self._stdev / math.sqrt(n)
self._ciUp = self._value + ciDelta
self._ciDown = self._value - ciDelta
else:
self._stdev = 0
self._ciUp = self._ciDown = float('nan')
self._isValid = True
# Mutators
def append(self, value):
""" Push a new value into this aggregate. """
self._values.append(value)
self._isValid = False
def map(self, func):
""" Apply a function to every value in this aggregate. """
self._isValid = False
self._values = map(func, self._values)
def setType(self, newType):
""" Change the type of this aggregate.
newType : either 'mean' or 'geomean'.
"""
self.type = newType
self._isValid = False
def manual(self, value, ciUp, ciDown, newMin, newMax):
""" Set the values of this DataAggregate manually. Used by operator
overloads.
"""
self._value = value
self._ciUp = ciUp
self._ciDown = ciDown
self._min = newMin
self._max = newMax
self._isValid = True
# Getters
def value(self):
if not self._isValid:
self._calculate()
return self._value
def values(self):
return self._values
def stdev(self):
if not self._isValid:
self._calculate()
return self._stdev
def count(self):
if not self._isValid:
self._calculate()
return len(self._values)
def sem(self):
if not self._isValid:
self._calculate()
return self._stdev / math.sqrt(len(self._values))
def min(self):
if not self._isValid:
self._calculate()
return self._min
def max(self):
if not self._isValid:
self._calculate()
return self._max
def ci(self):
if not self._isValid:
self._calculate()
return self._ciDown, self._ciUp
def ciPercent(self):
if not self._isValid:
self._calculate()
if math.isnan(self._ciUp):
return self._ciDown, self._ciUp
if self._value == 0:
return float('NaN'), float('NaN')
ciDown = (self._value - self._ciDown) * 100 / self._value
ciUp = (self._ciUp - self._value) * 100 / self._value
return ciDown, ciUp
# Overloads
def __repr__(self):
if not self._isValid:
self._calculate()
if math.isnan(self._ciUp):
return "%.3f" % self._value
else:
return "%.3f CI(%.3f, %.3f) min=%.3f max=%.3f vals=%s" % (self._value, self._ciDown, self._ciUp, self._min, self._max, self._values)
def __str__(self):
return self.__repr__()
def __float__(self):
return self.value()
def __cmp__(self, other):
if float(self) > float(other):
return 1
elif float(self) < float(other):
return -1
else:
return 0
def __div__(self, other):
""" Divides this DataAggregate by some other value. If the other value
is a DataAggregate, statistical techniques are used to compute the
new value and standard error. If not, we just divide every value
in this DataAggregate by the other value, and force the summary
data to be regenerated.
"""
if isinstance(other, DataAggregate):
#logging.debug(other)
res = DataAggregate(self.type)
if other.value() <> 0:
val = self.value() / other.value()
else:
val = math.copysign(float('inf'), self.value())
# Motulsky, 'Intuitive Biostatistics', pp285-6
if self.value() <> 0 and other.value() <> 0:
tinv = t_quantile(1 - settings.CONFIDENCE_LEVEL, self.count() + other.count() - 2)
g = (tinv * (other.sem() / other.value()))**2
if g >= 1.0:
ciUp = ciDown = float('nan')
else:
sem = ( val / (1-g) ) * math.sqrt((1-g) * (self.sem() / self.value())**2 + (other.sem() / other.value()) ** 2)
ciUp = ( val / (1-g) ) + tinv*sem
ciDown = ( val / (1-g) ) - tinv*sem
else:
ciUp = ciDown = float('nan')
if other.max() <> 0 and other.min() <> 0:
valMin = self.min() / other.max()
valMax = self.max() / other.min()
else:
valMin = math.copysign(float('inf'), self.min())
valMax = math.copysign(float('inf'), self.max())
res.manual(value=val, ciUp=ciUp, ciDown=ciDown, newMin=valMin, newMax=valMax)
return res
else:
res = copy.copy(self)
res.map(lambda d: d / float(other))
return res
|
import numpy as np
from numpy import sqrt as sqrt
from numpy import cos as cos
from numpy import sin as sin
import matplotlib.pyplot as plt
from matplotlib import cm as cm
from matplotlib.ticker import LinearLocator as LinearLocator
from matplotlib.ticker import FormatStrFormatter as FormatStrFormatter
from numpy.fft import fftshift as fftshift
from numpy.fft import ifftshift as ifftshift
from numpy.fft import fft2 as fft2
def apershow(obj):
obj = -abs(obj)
plt.imshow(obj)
plt.set_cmap('Greys')
plt.show()
l1 = 100
#Generate test surface matrix from a detector
x = np.linspace(-1, 1, l1)
y = np.linspace(-1, 1, l1)
[X,Y] = np.meshgrid(x,y)
r = sqrt(X**2+Y**2)
Z = sqrt(14)*(8*X**4-8*X**2*r**2+r**4)*(6*r**2-5)
for i in range(len(Z)):
for j in range(len(Z)):
if x[i]**2+y[j]**2>1:
Z[i][j]=0
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.RdYlGn,
linewidth=0, antialiased=False, alpha = 0.6)
v = max(abs(Z.max()),abs(Z.min()))
ax.set_zlim(-v*5, v*5)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-v*5, cmap=cm.RdYlGn)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=1, aspect=30)
plt.show()
d = 400
A = np.zeros([d,d])
A[d/2-49:d/2+51,d/2-49:d/2+51] = Z
plt.imshow(A)
plt.show()
abbe = np.exp(1j*2*np.pi*A)
for i in range(len(abbe)):
for j in range(len(abbe)):
if abbe[i][j]==1:
abbe[i][j]=0
fig = plt.figure(2)
AP = abs(fftshift(fft2(fftshift(abbe))))**2
AP = AP/AP.max()
plt.imshow(AP)
plt.show() |
'''
Variables: Creation, Initialization, Saving, and Loading
When you train a model, you use variables to hold and update parameters. Variables are in-memory buffers containing tensors. They must be
explicitly initialized and can be saved to disk during and after training. You can later restore saved values to exercise or analyze the
model.
We'll see two classes:
The tf.Variable class. (It has tf.Variable() constructor)
The tf.train.Saver class.
'''
#Creation
somevar = tf.Variable([1.0, 2.0]) #[1.0, 2.0] is also a tensor, don't you think, iit? :p
# Create two variables.
weights = tf.Variable(tf.random_normal([784, 200], stddev=0.35), name="weights")
biases = tf.Variable(tf.zeros([200]), name="biases")
#we're passing a tensor(an n-dim array) to the Variable constructor to construct a variable
'''
Calling tf.Variable() adds several ops to the graph:
- A variable op that holds the variable value.
- An initializer op that sets the variable to its initial value. This is actually a tf.assign op.
- The ops for the initial value, such as the zeros op for the biases variable in the example are also added to the graph.
The value returned by tf.Variable() value is an instance of the Python class tf.Variable (obvious na! :/ )
'''
#Initialization
'''
already covered but we will revisit again! :)
'Variable Initializers' must be run explicitly before other ops in your model can be run.
'''
# Create two variables.
weights = tf.Variable(tf.random_normal([784, 200], stddev=0.35),
name="weights")
biases = tf.Variable(tf.zeros([200]), name="biases")
...
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Later, when launching the model
with tf.Session() as sess:
# Run the init operation at the very start of the session
sess.run(init_op)
...
# Use the model
...
#Initialization from another Variable
'''
You sometimes need to initialize a variable from the initial value of another variable.
As the op added by tf.global_variables_initializer() initializes all variables in parallel you have to be careful
when this is needed.
To initialize a new variable from the value of another variable use the other variable's initialized_value() property.
You can use the initialized value directly as the initial value for the new variable, or you can use it as any other
tensor to compute a value for the new variable.
'''
# Create a variable with a random value.
weights = tf.Variable(tf.random_normal([784, 200], stddev=0.35),
name="weights")
# Create another variable with the same value as 'weights'.
w2 = tf.Variable(weights.initialized_value(), name="w2") #this is like w2 = weights
# Create another variable with twice the value of 'weights'
w_twice = tf.Variable(weights.initialized_value() * 2.0, name="w_twice")
#Custom Initialization
The convenience function tf.global_variables_initializer() adds an op to initialize all variables in the model.
You can also pass an explicit list of variables to initialize to tf.variables_initializer
we'll see this later
|
#!//dls_sw/prod/R3.14.12.3/support/pythonSoftIoc/2-11/pythonIoc
from pkg_resources import require
require('cothread==2.13')
#require('epicsdbbuilder==1.0')
require("numpy")
import argparse
import logging
import PIStepScan
import PIController
from PIConstants import *
def parse_arguments():
parser = argparse.ArgumentParser(description="Run a calibration routine for PI E727 controller analogue output. "
"Will set up a waveform to step one axis over a specified range, "
"issuing triggers at each step that are used to trigger an external "
"measurement device (assume a Zebra) to measure the output signal "
"(at the moment you have to configure your measurement device separately, this script "
"doesn't do that).")
parser.add_argument("--address", type=str, help="IP address of controller")
parser.add_argument("--port", type=int, help="IP port number of controller", default=50000)
parser.add_argument("--axis", type=int, choices=[1, 2, 3], help="Axis number to scan", required=True)
parser.add_argument("--start", type=float, help="Start position (EGU)", required=True)
parser.add_argument("--end", type=float, help="End position (EGU)", required=True)
parser.add_argument("-s", "--number_of_steps", type=int, help="Number of steps to make", required=True)
parser.add_argument("-m", "--move_time", type=int, help="Time for each move, per step (ms)")
parser.add_argument("-t", "--time_in_position", type=int, help="Time to wait in position at each step (ms)", required=True)
parser.add_argument("-n", "--n_repeats", type=int, default=1, help="Repeat the scan n times")
parser.add_argument("-d", "--dryrun", action="store_true", help="Print commands instead of sending them")
return parser.parse_args()
def create_params(args):
# Create parameter objects to push into the PIStepScan class
param_dict = {"STATE": STATE_NOT_CONFIGRED,
"NX": args.number_of_steps,
"NY": 1,
"NZ": 1,
"DX": (args.end - args.start) / float(args.number_of_steps),
"DY": 1,
"DZ": 1,
"X0": args.start,
"Y0": 50,
"Z0": 50,
"MOVETIME": args.move_time,
"EXPOSURE": args.time_in_position,
"axis_to_scan": args.axis
}
return param_dict
def main():
args = parse_arguments()
logging.basicConfig(level=logging.DEBUG)
# Create controller object
pi_controller = PIController.PIController(args.address, args.port,
debug=args.dryrun)
step_scan = PIStepScan.PICalibrationScan(pi_controller)
step_scan.insert_params(create_params(args))
step_scan.get_scan_parameters()
step_scan.configure_scan()
step_scan.start_scan()
if __name__ == "__main__":
main() |
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Name: test_ft_lag_statistics.py
#
# Description: Tests that a configured static Link Aggregation can summarize
# accurate individual interfaces statistics as they change
#
# Author: Jose Hernandez
#
# Topology: |Host| ----- |Switch| ---------------------- |Switch| ----- |Host|
# (Static LAG - 3 links)
#
# Success Criteria: PASS -> Statistics are accurate and represent the sum of
# each individual member
#
# FAILED -> Information from the statistics does not
# represent the sum of each individual member
#
###############################################################################
from time import sleep
from lacp_lib import create_lag
from lacp_lib import turn_on_interface
from lacp_lib import validate_turn_on_interfaces
from lacp_lib import associate_interface_to_lag
from lacp_lib import verify_lag_config
from lacp_lib import create_vlan
from lacp_lib import verify_vlan_full_state
from lacp_lib import check_connectivity_between_hosts
from lacp_lib import associate_vlan_to_l2_interface
from lacp_lib import associate_vlan_to_lag
from lacp_lib import verify_lag_interface_key
from lacp_lib import verify_lag_interface_priority
from lacp_lib import verify_lag_interface_id
from lacp_lib import verify_lag_interface_system_id
from lacp_lib import verify_lag_interface_system_priority
from lacp_lib import verify_lag_interface_lag_id
from lacp_lib import validate_lag_state_static
from lacp_lib import LOCAL_STATE
from lacp_lib import REMOTE_STATE
from lacp_lib import retry_wrapper
from lacp_lib import compare_lag_interface_basic_settings
import pytest
TOPOLOGY = """
# +-----------------+
# | |
# | Host 1 |
# | |
# +-----------------+
# |
# |
# +-------------------------------+
# | |
# | |
# | Switch 1 |
# | |
# +-------------------------------+
# | | |
# | | |
# | | |
# +-------------------------------+
# | |
# | |
# | Switch 2 |
# | |
# +-------------------------------+
# |
# |
# +-----------------+
# | |
# | Host 2 |
# | |
# +-----------------+
# Nodes
[type=openswitch name="Switch 1"] sw1
[type=openswitch name="Switch 2"] sw2
[type=host name="Host 1" image="openswitch/ubuntutest:latest"] hs1
[type=host name="Host 2" image="openswitch/ubuntutest:latest"] hs2
# Links
sw1:1 -- hs1:1
sw2:1 -- hs2:1
sw1:2 -- sw2:2
sw1:3 -- sw2:3
sw1:4 -- sw2:4
"""
# Global variables
SW_LBL_PORTS = ['1', '2', '3', '4']
LAG_ID = '1'
LAG_VLAN = 900
NETWORK = '10.90.0.'
NETMASK = '24'
NUMBER_PINGS = 5
BASE_IPERF_PORT = 5000
IPERF_TIME = 30
IPERF_BW = '10m'
SW_COUNTERS_DELAY = 20
def compare_values(value1, value2, error=0.02):
value2_max = value2 * (1 + error)
value2_min = value2 * (1 - error)
assert value1 <= value2_max, ' '.join(
['Value of {} is more than {}'.format(value1, error * 100),
'percent higher than {}'.format(value2)]
)
assert value1 >= value2_min, ' '.join(
['Value of {} is more than {}'.format(value1, error * 100),
'percent lower than {}'.format(value2)]
)
def compare_switches_counters(sw_list, stats):
# Verify switches counters match
for port in SW_LBL_PORTS:
print('Compare port {} between switches'.format(port))
print('rx from sw1 vs tx from sw2')
compare_values(
int(stats[sw_list[0]][port]['{}_packets'.format('rx')]),
int(stats[sw_list[1]][port]['{}_packets'.format('tx')])
)
print('tx from sw1 vs rx from sw2')
compare_values(
int(stats[sw_list[0]][port]['{}_packets'.format('tx')]),
int(stats[sw_list[1]][port]['{}_packets'.format('rx')])
)
def compare_lag_to_switches_counters(sw_stats, lag_stats, ports):
for param in [
'rx_bytes',
'rx_packets',
'rx_crc_fcs',
'rx_dropped',
'rx_error',
'tx_packets',
'tx_bytes',
'tx_collisions',
'tx_dropped',
'tx_errors',
'speed'
]:
total = 0
for port in ports:
total += int(sw_stats[port][param])
print('Verifying LAG interface value for {}'.format(param))
compare_values(int(lag_stats[param]), total)
for port in ports:
assert lag_stats['speed_unit'] == sw_stats[port]['speed_unit'],\
'Unexpected change in speed unit {}, Expected {}'.format(
lag_stats['speed_unit'],
sw_stats[port]['speed_unit']
)
def verify_lacp_state(
sw1,
sw2
):
lacp_def_value = ''
lacp_def_priority = ''
sys_id = ''
print('Verify LACP state on LAG members')
for port in SW_LBL_PORTS[1:]:
sw1_lacp_state = sw1.libs.vtysh.show_lacp_interface(port)
sw2_lacp_state = sw2.libs.vtysh.show_lacp_interface(port)
sw_lacp_states = [sw1_lacp_state, sw2_lacp_state]
for (
sw_lacp_state,
rev_sw_lacp_state
) in zip(
sw_lacp_states,
reversed(sw_lacp_states)
):
verify_lag_interface_lag_id(sw_lacp_state)
verify_lag_interface_key(
sw_lacp_state,
rev_sw_lacp_state,
key=lacp_def_value,
value_check=True,
cross_check=True
)
verify_lag_interface_priority(
sw_lacp_state,
rev_sw_lacp_state,
priority=lacp_def_value,
value_check=True,
cross_check=True
)
verify_lag_interface_system_priority(
sw_lacp_state,
sw2_int_map_lacp=rev_sw_lacp_state,
system_priority=lacp_def_priority,
value_check=True,
cross_check=True
)
verify_lag_interface_system_id(
sw_lacp_state,
sw2_int_map_lacp=rev_sw_lacp_state,
system_id=sys_id,
value_check=True,
cross_check=True
)
verify_lag_interface_id(
sw_lacp_state,
rev_sw_lacp_state,
id='',
value_check=True,
cross_check=True
)
validate_lag_state_static(sw_lacp_state, LOCAL_STATE)
validate_lag_state_static(sw_lacp_state, REMOTE_STATE)
def enable_switches_interfaces(sw_list, step):
step('Enable switches interfaces')
for sw in sw_list:
for port in SW_LBL_PORTS:
turn_on_interface(sw, port)
# Defining internal method to use decorator
@retry_wrapper(
'Ensure interfaces are turned on',
'Interfaces not yet ready',
5,
60)
def internal_check_interfaces(sw_list):
for sw in sw_list:
validate_turn_on_interfaces(sw, SW_LBL_PORTS)
internal_check_interfaces(sw_list)
def configure_lags(sw_list, sw_real_ports, step):
step('Create LAGs')
for sw in sw_list:
create_lag(sw, LAG_ID, 'off')
for port in sw_real_ports[sw][1:]:
associate_interface_to_lag(sw, port, LAG_ID)
verify_lag_config(
sw,
LAG_ID,
sw_real_ports[sw][1:]
)
check_func = retry_wrapper(
'Verify LACP status on both devices',
'Configuration not yet applied',
2,
4
)(verify_lacp_state)
check_func(
sw_list[0],
sw_list[1]
)
def configure_vlans(sw_list, sw_real_ports, step):
step('Configure VLANs on devices')
for sw in sw_list:
# Create VLAN
create_vlan(sw, LAG_VLAN)
# Associate VLAN to LAG
associate_vlan_to_lag(sw, str(LAG_VLAN), LAG_ID)
# Associate VLAN to host interface
associate_vlan_to_l2_interface(
sw,
str(LAG_VLAN),
sw_real_ports[sw][0]
)
# Verify VLAN configuration was successfully applied
verify_vlan_full_state(
sw,
LAG_VLAN, interfaces=[
sw_real_ports[sw][0],
'lag{}'.format(LAG_ID)
]
)
def configure_workstations(hs_list, step):
step('Configure workstations')
for hs_num, hs in enumerate(hs_list):
hs.libs.ip.interface(
SW_LBL_PORTS[0],
addr='{}{}/{}'.format(NETWORK, hs_num + 1, NETMASK),
up=True
)
def validate_connectivity(hs_list, wait, step):
step('Check workstations connectivity')
if wait is False:
check_connectivity_between_hosts(
hs_list[0],
'{}{}'.format(NETWORK, 1),
hs_list[1],
'{}{}'.format(NETWORK, 2),
NUMBER_PINGS,
True
)
else:
check_func = retry_wrapper(
'Verifying workstations connectivity',
'Configuration not yet applied',
5,
15
)(check_connectivity_between_hosts)
check_func(
hs_list[0],
'{}{}'.format(NETWORK, 1),
hs_list[1],
'{}{}'.format(NETWORK, 2),
NUMBER_PINGS,
True
)
def verify_lag_statistics(sw_list, hs_list, sw_real_ports, step):
sw_stats_before = {}
sw_stats_after = {}
step('Verify LAG statistics')
print('Collect all switch ports statistics')
for sw in sw_list:
sw_stats_before[sw] = {}
for port in SW_LBL_PORTS:
sw_stats_before[sw][port] = sw.libs.vtysh.show_interface(port)
sw_stats_before[sw]['lag{}'.format(LAG_ID)] =\
sw.libs.vtysh.show_interface('lag{}'.format(LAG_ID))
print('Verify LAG statistic interface basic settings')
compare_lag_interface_basic_settings(
sw_stats_before[sw]['lag{}'.format(LAG_ID)],
LAG_ID,
sw_real_ports[sw][1:]
)
print('Compare LAG counters vs switches interfaces')
compare_lag_to_switches_counters(
sw_stats_before[sw],
sw_stats_before[sw]['lag{}'.format(LAG_ID)],
SW_LBL_PORTS[1:]
)
print('Start iperf servers')
for i, hs in enumerate(hs_list):
hs.libs.iperf.server_start(BASE_IPERF_PORT + i, udp=True)
print('Start traffic transmission')
for hs, other_base in zip(hs_list, [2, 1]):
hs.libs.iperf.client_start(
'{}{}'.format(NETWORK, other_base),
BASE_IPERF_PORT + other_base - 1,
time=IPERF_TIME,
udp=True,
bandwidth=IPERF_BW
)
print('Wait for traffic to finish in {} seconds'.format(IPERF_TIME))
sleep(IPERF_TIME)
print('Stop iperf')
for hs in hs_list:
hs.libs.iperf.server_stop()
hs.libs.iperf.client_stop()
@retry_wrapper(
'Obtain interfaces information and verify their consistency',
'Information provided by counters is not yet reliable',
5,
SW_COUNTERS_DELAY)
def internal_check():
print('Get statistics from switches')
for sw in sw_list:
sw_stats_after[sw] = {}
for port in SW_LBL_PORTS:
sw_stats_after[sw][port] = sw.libs.vtysh.show_interface(port)
sw_stats_after[sw]['lag{}'.format(LAG_ID)] =\
sw.libs.vtysh.show_interface('lag{}'.format(LAG_ID))
print('Verify obtained information is consistent')
print('Compare counters between siwtches')
compare_switches_counters(sw_list, sw_stats_after)
internal_check()
print('Compare switch counters individually')
for sw in sw_list:
print('Verify LAG statistic interface basic settings')
compare_lag_interface_basic_settings(
sw_stats_after[sw]['lag{}'.format(LAG_ID)],
LAG_ID,
sw_real_ports[sw][1:]
)
print('Compare LAG counters vs switches interfaces')
compare_lag_to_switches_counters(
sw_stats_after[sw],
sw_stats_after[sw]['lag{}'.format(LAG_ID)],
SW_LBL_PORTS[1:]
)
@pytest.mark.skipif(True, reason="Skipping due to instability")
def test_ft_lag_statistics(topology, step):
hs1 = topology.get('hs1')
hs2 = topology.get('hs2')
sw1 = topology.get('sw1')
sw2 = topology.get('sw2')
assert hs1 is not None, 'hs1 was not initialized'
assert hs2 is not None, 'hs2 was not initialized'
assert sw1 is not None, 'sw1 was not initialized'
assert sw2 is not None, 'sw2 was not initialized'
sw_real_ports = {
sw1: [sw1.ports[port] for port in SW_LBL_PORTS],
sw2: [sw2.ports[port] for port in SW_LBL_PORTS]
}
# Enable switches interfaces
enable_switches_interfaces([sw1, sw2], step)
# Configure static LAGs with members
configure_lags([sw1, sw2], sw_real_ports, step)
# Add VLAN configuration to LAGs and workstation interfaces
configure_vlans([sw1, sw2], sw_real_ports, step)
# Configure workstations
configure_workstations([hs1, hs2], step)
# Validate workstations can communicate
validate_connectivity([hs1, hs2], True, step)
# Obtain and validate LAG statistics
verify_lag_statistics([sw1, sw2], [hs1, hs2], sw_real_ports, step)
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.add_show,name="addandshow"),
path('delete/<int:id>/',views.delete_data,name="deletedata"),
path('<int:id>/',views.update_data,name="updatedata"),
]
|
from flask import request
from flask_restplus import Resource, Namespace, fields
ns_user = Namespace("users", description="Users related operations")
user_model = ns_user.model(
"user",
{
"email": fields.String(required=True, description="user email address"),
"username": fields.String(required=True, description="user username"),
},
)
@ns_user.route("/ping")
class UsersPing(Resource):
def get(self):
return {"status": "success", "message": "pong!"}
@ns_user.route("/")
class UsersList(Resource):
@ns_user.doc("create a new user")
@ns_user.expect(user_model, validate=True)
def post(self):
from project.api.service.storage.users import store_user
post_data = request.get_json()
username = post_data.get("username")
email = post_data.get("email")
store_user(username, email)
response_object = {
"status": "success",
"message": "{} was added!".format(email),
}
return response_object, 201
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-06-15 20:55
import logging
from typing import Union, List
import torch
from torch import nn
from torch.utils.data import DataLoader
from elit.common.dataset import PadSequenceDataLoader, SamplerBuilder, TransformableDataset
from elit.common.structure import History
from elit.common.transform import FieldLength, TransformList
from elit.common.vocab import Vocab
from elit.components.classifiers.transformer_classifier import TransformerComponent
from elit.components.taggers.tagger import Tagger
from elit.datasets.ner.tsv import TSVTaggingDataset
from elit.layers.crf.crf import CRF
from elit.layers.transformers.encoder import TransformerEncoder
from elit.transform.transformer_tokenizer import TransformerSequenceTokenizer
from elit.utils.time_util import CountdownTimer
from elit.utils.torch_util import clip_grad_norm
from elit.common.util import merge_locals_kwargs
from alnlp.modules.util import lengths_to_mask
# noinspection PyAbstractClass
class TransformerTaggingModel(nn.Module):
def __init__(self,
encoder: TransformerEncoder,
num_labels,
crf=False,
secondary_encoder=None) -> None:
"""
A shallow tagging model use transformer as decoder.
Args:
encoder: A pretrained transformer.
num_labels: Size of tagset.
crf: True to enable CRF.
crf_constraints: The allowed transitions (from_label_id, to_label_id).
"""
super().__init__()
self.encoder = encoder
self.secondary_encoder = secondary_encoder
# noinspection PyUnresolvedReferences
self.classifier = nn.Linear(encoder.transformer.config.hidden_size, num_labels)
self.crf = CRF(num_labels) if crf else None
def forward(self, lens: torch.LongTensor, input_ids, token_span, token_type_ids=None):
mask = lengths_to_mask(lens)
x = self.encoder(input_ids, token_span=token_span, token_type_ids=token_type_ids)
if self.secondary_encoder:
x = self.secondary_encoder(x, mask=mask)
x = self.classifier(x)
return x, mask
class TransformerTagger(TransformerComponent, Tagger):
def __init__(self, **kwargs) -> None:
"""A simple tagger using a linear layer with an optional CRF (:cite:`lafferty2001conditional`) layer for
any tagging tasks including PoS tagging and many others.
Args:
**kwargs: Not used.
"""
super().__init__(**kwargs)
self._tokenizer_transform = None
self.model: TransformerTaggingModel = None
# noinspection PyMethodOverriding
def fit_dataloader(self,
trn: DataLoader,
criterion,
optimizer,
metric,
logger: logging.Logger,
history: History,
gradient_accumulation=1,
grad_norm=None,
transformer_grad_norm=None,
teacher: Tagger = None,
kd_criterion=None,
temperature_scheduler=None,
ratio_width=None,
**kwargs):
optimizer, scheduler = optimizer
if teacher:
scheduler, lambda_scheduler = scheduler
else:
lambda_scheduler = None
self.model.train()
timer = CountdownTimer(history.num_training_steps(len(trn), gradient_accumulation=gradient_accumulation))
total_loss = 0
for idx, batch in enumerate(trn):
out, mask = self.feed_batch(batch)
y = batch['tag_id']
loss = self.compute_loss(criterion, out, y, mask)
if gradient_accumulation and gradient_accumulation > 1:
loss /= gradient_accumulation
if teacher:
with torch.no_grad():
out_T, _ = teacher.feed_batch(batch)
# noinspection PyNoneFunctionAssignment
kd_loss = self.compute_distill_loss(kd_criterion, out, out_T, mask, temperature_scheduler)
_lambda = float(lambda_scheduler)
loss = _lambda * loss + (1 - _lambda) * kd_loss
loss.backward()
total_loss += loss.item()
prediction = self.decode_output(out, mask, batch)
self.update_metrics(metric, out, y, mask, batch, prediction)
if history.step(gradient_accumulation):
self._step(optimizer, scheduler, grad_norm, transformer_grad_norm, lambda_scheduler)
report = f'loss: {total_loss / (idx + 1):.4f} {metric}'
timer.log(report, logger=logger, ratio_percentage=False, ratio_width=ratio_width)
del loss
del out
del mask
def _step(self, optimizer, scheduler, grad_norm, transformer_grad_norm, lambda_scheduler):
clip_grad_norm(self.model, grad_norm, self.model.encoder.transformer, transformer_grad_norm)
optimizer.step()
scheduler.step()
if lambda_scheduler:
lambda_scheduler.step()
optimizer.zero_grad()
def compute_distill_loss(self, kd_criterion, out_S, out_T, mask, temperature_scheduler):
logits_S = out_S[mask]
logits_T = out_T[mask]
temperature = temperature_scheduler(logits_S, logits_T)
return kd_criterion(logits_S, logits_T, temperature)
def build_model(self, **kwargs) -> torch.nn.Module:
model = TransformerTaggingModel(self.build_transformer(),
len(self.vocabs.tag),
self.config.crf,
self.config.get('secondary_encoder', None),
)
return model
# noinspection PyMethodOverriding
def build_dataloader(self, data, batch_size, shuffle, device, logger: logging.Logger = None,
sampler_builder: SamplerBuilder = None, gradient_accumulation=1, **kwargs) -> DataLoader:
if isinstance(data, TransformableDataset):
dataset = data
else:
args = dict((k, self.config.get(k, None)) for k in
['delimiter', 'max_seq_len', 'sent_delimiter', 'char_level', 'hard_constraint'])
dataset = self.build_dataset(data, **args)
if self.config.token_key is None:
self.config.token_key = next(iter(dataset[0]))
logger.info(
f'Guess [bold][blue]token_key={self.config.token_key}[/blue][/bold] according to the '
f'training dataset: [blue]{dataset}[/blue]')
dataset.append_transform(self.tokenizer_transform)
dataset.append_transform(self.last_transform())
if not isinstance(data, list):
dataset.purge_cache()
if self.vocabs.mutable:
self.build_vocabs(dataset, logger)
if sampler_builder is not None:
sampler = sampler_builder.build([len(x[f'{self.config.token_key}_input_ids']) for x in dataset], shuffle,
gradient_accumulation=gradient_accumulation if shuffle else 1)
else:
sampler = None
return PadSequenceDataLoader(dataset, batch_size, shuffle, device=device, batch_sampler=sampler)
def build_dataset(self, data, transform=None, **kwargs):
return TSVTaggingDataset(data, transform=transform, **kwargs)
def last_transform(self):
return TransformList(self.vocabs, FieldLength(self.config.token_key))
@property
def tokenizer_transform(self) -> TransformerSequenceTokenizer:
if not self._tokenizer_transform:
self._tokenizer_transform = TransformerSequenceTokenizer(self.transformer_tokenizer,
self.config.token_key,
ret_token_span=True)
return self._tokenizer_transform
def build_vocabs(self, trn, logger, **kwargs):
self.vocabs.tag = Vocab(pad_token=None, unk_token=None)
timer = CountdownTimer(len(trn))
max_seq_len = 0
token_key = self.config.token_key
for each in trn:
max_seq_len = max(max_seq_len, len(each[token_key]))
timer.log(f'Building vocab [blink][yellow]...[/yellow][/blink] (longest sequence: {max_seq_len})')
self.vocabs.tag.set_unk_as_safe_unk()
self.vocabs.lock()
self.vocabs.summary(logger)
# noinspection PyMethodOverriding
def fit(self,
trn_data,
dev_data,
save_dir,
transformer,
average_subwords=False,
word_dropout: float = 0.2,
hidden_dropout=None,
layer_dropout=0,
scalar_mix=None,
mix_embedding: int = 0,
grad_norm=5.0,
transformer_grad_norm=None,
lr=5e-5,
transformer_lr=None,
transformer_layers=None,
gradient_accumulation=1,
adam_epsilon=1e-6,
weight_decay=0,
warmup_steps=0.1,
secondary_encoder=None,
crf=False,
reduction='sum',
batch_size=32,
sampler_builder: SamplerBuilder = None,
epochs=3,
patience=5,
token_key=None,
max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False,
transform=None,
logger=None,
devices: Union[float, int, List[int]] = None,
**kwargs):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def feed_batch(self, batch: dict):
features = [batch[k] for k in self.tokenizer_transform.output_key]
if len(features) == 2:
input_ids, token_span = features
else:
input_ids, token_span = features[0], None
lens = batch[f'{self.config.token_key}_length']
x, mask = self.model(lens, input_ids, token_span, batch.get(f'{self.config.token_key}_token_type_ids'))
return x, mask
# noinspection PyMethodOverriding
def distill(self,
teacher: str,
trn_data,
dev_data,
save_dir,
transformer: str,
batch_size=None,
temperature_scheduler='flsw',
epochs=None,
devices=None,
logger=None,
seed=None,
**kwargs):
return super().distill(**merge_locals_kwargs(locals(), kwargs))
|
import unittest
from unittest import mock
import dbt.flags as flags
from dbt.adapters.mysql import MySQLAdapter
from .utils import config_from_parts_or_dicts, mock_connection
class TestMySQLAdapter(unittest.TestCase):
def setUp(self):
pass
flags.STRICT_MODE = True
profile_cfg = {
'outputs': {
'test': {
'type': 'mysql',
'server': 'thishostshouldnotexist',
'port': 3306,
'schema': 'dbt_test_schema',
'username': 'dbt',
'password': 'dbt',
}
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._adapter = None
@property
def adapter(self):
if self._adapter is None:
self._adapter = MySQLAdapter(self.config)
return self._adapter
@mock.patch('dbt.adapters.mysql.connections.mysql.connector')
def test_acquire_connection(self, connector):
connection = self.adapter.acquire_connection('dummy')
connector.connect.assert_not_called()
connection.handle
self.assertEqual(connection.state, 'open')
self.assertNotEqual(connection.handle, None)
connector.connect.assert_called_once()
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_main(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection(
'main')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_placeholder(self):
pass
|
"""Tests for evohomeclient package"""
from datetime import datetime
import requests_mock
from . import EvohomeClient
UNAUTH_RESPONSE = """[{
"code": "Unauthorized",
"message": "Unauthorized"
}]"""
TASK_ACCEPTED_LIST = """[{"id": "123"}]"""
TASK_ACCEPTED = """{"id": "123"}"""
VALID_SESSION_RESPONSE = """{
"sessionId": "EE32E3A8-1C09-4A5C-9572-24A088197A38",
"userInfo": {
"userID": 123456,
"username": "username",
"firstname": "Forename",
"lastname": "Surname",
"streetAddress": "Street Address",
"city": "City",
"state": "",
"zipcode": "AB1 2CD",
"country": "GB",
"telephone": "",
"userLanguage": "en-GB",
"isActivated": true,
"deviceCount": 0,
"tenantID": 5,
"securityQuestion1": "NotUsed",
"securityQuestion2": "NotUsed",
"securityQuestion3": "NotUsed",
"latestEulaAccepted": false
}
}"""
VALID_ZONE_RESPONSE = """[
{
"locationID": 23456,
"name": "Home",
"streetAddress": "Street Address",
"city": "City",
"state": "",
"country": "GB",
"zipcode": "AB1 2CD",
"type": "Residential",
"hasStation": true,
"devices": [
{
"gatewayId": 333444,
"deviceID": 131313,
"thermostatModelType": "DOMESTIC_HOT_WATER",
"deviceType": 96,
"name": "",
"scheduleCapable": false,
"holdUntilCapable": false,
"thermostat": {
"units": "Celsius",
"indoorTemperature": 24.0100,
"outdoorTemperature": 128.0000,
"outdoorTemperatureAvailable": false,
"outdoorHumidity": 128.0000,
"outdootHumidityAvailable": false,
"indoorHumidity": 128.0000,
"indoorTemperatureStatus": "Measured",
"indoorHumidityStatus": "NotAvailable",
"outdoorTemperatureStatus": "NotAvailable",
"outdoorHumidityStatus": "NotAvailable",
"isCommercial": false,
"allowedModes": [
"DHWOn",
"DHWOff"
],
"deadband": 0.0000,
"minHeatSetpoint": 5.0000,
"maxHeatSetpoint": 30.0000,
"minCoolSetpoint": 50.0000,
"maxCoolSetpoint": 90.0000,
"changeableValues": {
"mode": "DHWOff",
"status": "Scheduled"
},
"scheduleCapable": false,
"vacationHoldChangeable": false,
"vacationHoldCancelable": false,
"scheduleHeatSp": 0.0000,
"scheduleCoolSp": 0.0000
},
"alertSettings": {
"deviceID": 131313,
"tempHigherThanActive": false,
"tempHigherThan": 100.0000,
"tempHigherThanMinutes": 0,
"tempLowerThanActive": false,
"tempLowerThan": -50.0000,
"tempLowerThanMinutes": 0,
"faultConditionExistsActive": false,
"faultConditionExistsHours": 0,
"normalConditionsActive": true,
"communicationLostActive": false,
"communicationLostHours": 0,
"thermostatAlertActive": false,
"communicationFailureActive": true,
"communicationFailureMinutes": 15,
"deviceLostActive": false,
"deviceLostHours": 0
},
"isUpgrading": false,
"isAlive": true,
"thermostatVersion": "03.00.10.06",
"macID": "001122334455",
"locationID": 23456,
"domainID": 28111,
"instance": 250
},
{
"gatewayId": 333444,
"deviceID": 121212,
"thermostatModelType": "EMEA_ZONE",
"deviceType": 96,
"name": "RoomName",
"scheduleCapable": false,
"holdUntilCapable": false,
"thermostat": {
"units": "Celsius",
"indoorTemperature": 17.5400,
"outdoorTemperature": 128.0000,
"outdoorTemperatureAvailable": false,
"outdoorHumidity": 128.0000,
"outdootHumidityAvailable": false,
"indoorHumidity": 128.0000,
"indoorTemperatureStatus": "Measured",
"indoorHumidityStatus": "NotAvailable",
"outdoorTemperatureStatus": "NotAvailable",
"outdoorHumidityStatus": "NotAvailable",
"isCommercial": false,
"allowedModes": [
"Heat",
"Off"
],
"deadband": 0.0000,
"minHeatSetpoint": 5.0000,
"maxHeatSetpoint": 35.0000,
"minCoolSetpoint": 50.0000,
"maxCoolSetpoint": 90.0000,
"changeableValues": {
"mode": "Off",
"heatSetpoint": {
"value": 15.0,
"status": "Scheduled"
},
"vacationHoldDays": 0
},
"scheduleCapable": false,
"vacationHoldChangeable": false,
"vacationHoldCancelable": false,
"scheduleHeatSp": 0.0000,
"scheduleCoolSp": 0.0000
},
"alertSettings": {
"deviceID": 121212,
"tempHigherThanActive": true,
"tempHigherThan": 30.0000,
"tempHigherThanMinutes": 0,
"tempLowerThanActive": true,
"tempLowerThan": 5.0000,
"tempLowerThanMinutes": 0,
"faultConditionExistsActive": false,
"faultConditionExistsHours": 0,
"normalConditionsActive": true,
"communicationLostActive": false,
"communicationLostHours": 0,
"communicationFailureActive": true,
"communicationFailureMinutes": 15,
"deviceLostActive": false,
"deviceLostHours": 0
},
"isUpgrading": false,
"isAlive": true,
"thermostatVersion": "03.00.10.06",
"macID": "001122334455",
"locationID": 23456,
"domainID": 28111,
"instance": 10
}
]
}
]"""
VALID_ZONE_RESPONSE_NO_DHW = """[
{
"locationID": 23456,
"name": "Home",
"streetAddress": "Street Address",
"city": "City",
"state": "",
"country": "GB",
"zipcode": "AB1 2CD",
"type": "Residential",
"hasStation": true,
"devices": [
{
"gatewayId": 333444,
"deviceID": 121212,
"thermostatModelType": "EMEA_ZONE",
"deviceType": 96,
"name": "RoomName",
"scheduleCapable": false,
"holdUntilCapable": false,
"thermostat": {
"units": "Celsius",
"indoorTemperature": 17.5400,
"outdoorTemperature": 128.0000,
"outdoorTemperatureAvailable": false,
"outdoorHumidity": 128.0000,
"outdootHumidityAvailable": false,
"indoorHumidity": 128.0000,
"indoorTemperatureStatus": "Measured",
"indoorHumidityStatus": "NotAvailable",
"outdoorTemperatureStatus": "NotAvailable",
"outdoorHumidityStatus": "NotAvailable",
"isCommercial": false,
"allowedModes": [
"Heat",
"Off"
],
"deadband": 0.0000,
"minHeatSetpoint": 5.0000,
"maxHeatSetpoint": 35.0000,
"minCoolSetpoint": 50.0000,
"maxCoolSetpoint": 90.0000,
"changeableValues": {
"mode": "Off",
"heatSetpoint": {
"value": 15.0,
"status": "Scheduled"
},
"vacationHoldDays": 0
},
"scheduleCapable": false,
"vacationHoldChangeable": false,
"vacationHoldCancelable": false,
"scheduleHeatSp": 0.0000,
"scheduleCoolSp": 0.0000
},
"alertSettings": {
"deviceID": 121212,
"tempHigherThanActive": true,
"tempHigherThan": 30.0000,
"tempHigherThanMinutes": 0,
"tempLowerThanActive": true,
"tempLowerThan": 5.0000,
"tempLowerThanMinutes": 0,
"faultConditionExistsActive": false,
"faultConditionExistsHours": 0,
"normalConditionsActive": true,
"communicationLostActive": false,
"communicationLostHours": 0,
"communicationFailureActive": true,
"communicationFailureMinutes": 15,
"deviceLostActive": false,
"deviceLostHours": 0
},
"isUpgrading": false,
"isAlive": true,
"thermostatVersion": "03.00.10.06",
"macID": "001122334455",
"locationID": 23456,
"domainID": 28111,
"instance": 10
}
]
}
]"""
@requests_mock.Mocker()
def test_429_returned_raises_exception(mock): # pylint: disable=invalid-name
"""test that exception is raised for a 429 error"""
mock.post(
"http://localhost:5050/WebAPI/api/Session",
status_code=429,
text="""[
{
"code": "TooManyRequests",
"message": "Request count limitation exceeded, please try again later."
}
]""",
)
try:
client = EvohomeClient("username", "password", hostname="http://localhost:5050")
list(client.temperatures())
# Shouldn't get here
assert False
# pylint: disable=bare-except
except:
assert True
@requests_mock.Mocker()
def test_valid_login(mock):
"""test valid path"""
mock.post("http://localhost:5050/WebAPI/api/Session", text=VALID_SESSION_RESPONSE)
mock.get(
"http://localhost:5050/WebAPI/api/locations?userId=123456&allData=True",
text=VALID_ZONE_RESPONSE,
)
client = EvohomeClient("username", "password", hostname="http://localhost:5050")
data = list(client.temperatures())
assert len(data) == 2
# assert x[1].name == "RoomName"
assert data == [
{
"thermostat": "DOMESTIC_HOT_WATER",
"id": 131313,
"name": "",
"temp": 24.01,
"setpoint": 0,
"status": "Scheduled",
"mode": "DHWOff",
},
{
"thermostat": "EMEA_ZONE",
"id": 121212,
"name": "RoomName",
"temp": 17.54,
"setpoint": 15.0,
"status": "Scheduled",
"mode": "Off",
},
]
@requests_mock.Mocker()
def test_expired_sessionid(mock):
"""test expired sessionid"""
mock.post("http://localhost:5050/WebAPI/api/Session", text=VALID_SESSION_RESPONSE)
mock.get(
"http://localhost:5050/WebAPI/api/locations?userId=123456&allData=True",
[
{"text": UNAUTH_RESPONSE, "status_code": 401},
{"text": VALID_ZONE_RESPONSE, "status_code": 200},
],
)
client = EvohomeClient(
"username",
"password",
hostname="http://localhost:5050",
user_data={"userInfo": {"userID": "123456"}, "sessionId": "sessionhere"},
)
data = list(client.temperatures())
assert len(data) == 2
# assert x[1].name == "RoomName"
assert data == [
{
"thermostat": "DOMESTIC_HOT_WATER",
"id": 131313,
"name": "",
"temp": 24.01,
"setpoint": 0,
"status": "Scheduled",
"mode": "DHWOff",
},
{
"thermostat": "EMEA_ZONE",
"id": 121212,
"name": "RoomName",
"temp": 17.54,
"setpoint": 15.0,
"status": "Scheduled",
"mode": "Off",
},
]
@requests_mock.Mocker()
def test_get_zone_modes(mock):
"""test get zone modes"""
mock.get(
"http://localhost:5050/WebAPI/api/locations?userId=123456&allData=True",
text=VALID_ZONE_RESPONSE,
)
client = EvohomeClient(
"username",
"password",
hostname="http://localhost:5050",
user_data={"userInfo": {"userID": "123456"}, "sessionId": "sessionhere"},
)
modes = client.get_modes("RoomName")
modes2 = client.get_modes(121212)
assert modes == modes2 == ["Heat", "Off"]
@requests_mock.Mocker()
def test_set_status(mock):
"""test that statuses can be set correctly"""
mock.get(
"http://localhost:5050/WebAPI/api/locations?userId=123456&allData=True",
text=VALID_ZONE_RESPONSE,
)
mock.put(
"http://localhost:5050/WebAPI/api/evoTouchSystems?locationId=23456",
text=TASK_ACCEPTED,
)
mock.get(
"http://localhost:5050/WebAPI/api/commTasks?commTaskId=123",
[
{"status_code": 200, "text": """{"state":"pending"}"""},
{"status_code": 200, "text": """{"state":"Succeeded"}"""},
],
)
client = EvohomeClient(
"username",
"password",
hostname="http://localhost:5050",
user_data={"userInfo": {"userID": "123456"}, "sessionId": "sessionhere"},
)
client.set_status_normal()
client.set_status_custom()
client.set_status_eco()
client.set_status_away()
client.set_status_dayoff()
client.set_status_heatingoff(datetime(2019, 10, 10, 10, 10, 10))
@requests_mock.Mocker()
def test_zone_temp(mock):
"""test that zone temps can be set correctly"""
mock.get(
"http://localhost:5050/WebAPI/api/locations?userId=123456&allData=True",
text=VALID_ZONE_RESPONSE,
)
mock.put(
"http://localhost:5050/WebAPI/api/devices/121212/thermostat/changeableValues/heatSetpoint",
text=TASK_ACCEPTED,
)
mock.put(
"http://localhost:5050/WebAPI/api/devices/131313/thermostat/changeableValues",
text=TASK_ACCEPTED_LIST,
)
mock.get(
"http://localhost:5050/WebAPI/api/commTasks?commTaskId=123",
[
{"status_code": 200, "text": """{"state":"pending"}"""},
{"status_code": 200, "text": """{"state":"Succeeded"}"""},
],
)
client = EvohomeClient(
"username",
"password",
hostname="http://localhost:5050",
user_data={"userInfo": {"userID": "123456"}, "sessionId": "sessionhere"},
)
client.set_temperature("RoomName", 25)
client.set_temperature("RoomName", 25, until=datetime(2019, 10, 10, 10, 10, 10))
client.cancel_temp_override("RoomName")
mock.get(
"http://localhost:5050/WebAPI/api/commTasks?commTaskId=123",
[
{"status_code": 200, "text": """{"state":"pending"}"""},
{"status_code": 200, "text": """{"state":"Succeeded"}"""},
],
)
client.set_dhw_on()
client.set_dhw_off(datetime(2019, 10, 10, 10, 10, 10))
client.set_dhw_auto()
@requests_mock.Mocker()
def test_zone_temp_no_dhw(mock):
"""test that zone temps can be set correctly"""
mock.get(
"http://localhost:5050/WebAPI/api/locations?userId=123456&allData=True",
text=VALID_ZONE_RESPONSE_NO_DHW,
)
mock.put(
"http://localhost:5050/WebAPI/api/devices/121212/thermostat/changeableValues/heatSetpoint",
text=TASK_ACCEPTED_LIST,
)
mock.put(
"http://localhost:5050/WebAPI/api/devices/131313/thermostat/changeableValues",
text=TASK_ACCEPTED_LIST,
)
mock.get(
"http://localhost:5050/WebAPI/api/commTasks?commTaskId=123",
[
{"status_code": 200, "text": """{"state":"pending"}"""},
{"status_code": 200, "text": """{"state":"Succeeded"}"""},
],
)
client = EvohomeClient(
"username",
"password",
hostname="http://localhost:5050",
user_data={"userInfo": {"userID": "123456"}, "sessionId": "sessionhere"},
debug=True,
)
try:
client.set_dhw_on()
assert False
except: # pylint: disable=bare-except
pass
|
# -*- coding: utf-8 -*-
#Need to import this library to support array type
from array import array
# Returns the name of the object if it is supported.
# Returns an empty string else
def supported_object(obj):
if type(obj) not in (tuple, list, array):
return ""
if(type(obj) == tuple):
return "tuple"
if(type(obj) == list):
return "list"
if(type(obj) == array):
return "array"
#Function that generates the string if the object is supported
def generate_string(obj, cont):
output = supported_object(obj)
output += " of "
output += str(len(obj))
if(len(obj) < 2):
output += " element "
else:
output += " elements "
output += "containing"
if cont != []:
output += " " + str(set(cont))
else:
output += " : "
return output
def supertype(obj,indent=0):
if supported_object(obj) != "":
cont=[]
i=0
while (i<len(obj)) and supported_object(obj[i]) == "":
i+=1
if i >= len(obj):
for j in obj:
cont.append(supertype(j,indent+1))
cont = generate_string(obj, cont)
if i< len(obj):
for j in obj:
cont+='\n'+' '*(indent+1)+'-'+str(supertype(j,indent+1))
if(indent==0):
print(cont)
else:
return(cont)
# elif type(obj)==numpy.ndarray:
# return('numpy array of shape '+str(obj.shape))
# #If we want to add specific info about some types (as shape of nunmpy array) we can add elif right here
else:
try:
cont = (str(type(obj))[8:-2]+' of shape '+str(obj.shape))
except:
try:
cont = (str(type(obj))[8:-2]+' of '+str(len(obj))+' element')
if len(obj)>1:
cont+='s'
except:
cont = (str(type(obj))[8:-2])
if(indent==0):
print(cont)
else:
return(cont)
#The [8:-2] allows us to make "<class 'str'>" into "str"
|
# import h5py
# import json
# from abc import ABC, abstractmethod
# from .utils import *
# class BaseClientDataLoader(ABC):
# @abstractmethod
# def __init__(self, data_path, partition_path, client_idx, partition_method, tokenize, data_fields):
# self.data_path = data_path
# self.partition_path = partition_path
# self.client_idx = client_idx
# self.partition_method = partition_method
# self.tokenize = tokenize
# self.data_fields = data_fields
# self.train_data = None
# self.test_data = None
# self.attributes = None
# self.load_data()
# if self.tokenize:
# self.spacy_tokenizer = SpacyTokenizer()
# def get_train_batch_data(self, batch_size=None):
# if batch_size is None:
# return self.train_data
# else:
# batch_data_list = list()
# start = 0
# length = len(self.train_data["Y"])
# while start < length:
# end = start + batch_size if start + batch_size < length else length
# batch_data = dict()
# for field in self.data_fields:
# batch_data[field] = self.train_data[field][start: end]
# batch_data_list.append(batch_data)
# start = end
# return batch_data_list
# def get_test_batch_data(self, batch_size=None):
# if batch_size is None:
# return self.test_data
# else:
# batch_data_list = list()
# start = 0
# length = len(self.test_data["Y"])
# while start < length:
# end = start + batch_size if start + batch_size < length else length
# batch_data = dict()
# for field in self.data_fields:
# batch_data[field] = self.test_data[field][start: end]
# batch_data_list.append(batch_data)
# start = end
# return batch_data_list
# def get_train_data_num(self):
# if "X" in self.train_data:
# return len(self.train_data["X"])
# elif "context_X" in self.train_data:
# return len(self.train_data["context_X"])
# else:
# print(self.train_data.keys())
# return None
# def get_test_data_num(self):
# if "X" in self.test_data:
# return len(self.test_data["X"])
# elif "context_X" in self.test_data:
# return len(self.test_data["context_X"])
# else:
# return None
# def get_attributes(self):
# return self.attributes
# def load_data(self):
# data_dict = h5py.File(self.data_path, "r")
# partition_dict = h5py.File(self.partition_path, "r")
# def generate_client_data(data_dict, index_list):
# data = dict()
# for field in self.data_fields:
# data[field] = [decode_data_from_h5(data_dict[field][str(idx)][()]) for idx in index_list]
# return data
# if self.client_idx is None:
# train_index_list = []
# test_index_list = []
# for client_idx in partition_dict[self.partition_method]["partition_data"].keys():
# train_index_list.extend(decode_data_from_h5(partition_dict[self.partition_method]["partition_data"][client_idx]["train"][()]))
# test_index_list.extend(decode_data_from_h5(partition_dict[self.partition_method]["partition_data"][client_idx]["test"][()]))
# self.train_data = generate_client_data(data_dict, train_index_list)
# self.test_data = generate_client_data(data_dict, test_index_list)
# else:
# client_idx = str(self.client_idx)
# train_index_list = decode_data_from_h5(partition_dict[self.partition_method]["partition_data"][client_idx]["train"][()])
# test_index_list = decode_data_from_h5(partition_dict[self.partition_method]["partition_data"][client_idx]["test"][()])
# self.train_data = generate_client_data(data_dict, train_index_list)
# self.test_data = generate_client_data(data_dict, test_index_list)
# self.attributes = json.loads(data_dict["attributes"][()])
# self.attributes["n_clients"] = decode_data_from_h5(partition_dict[self.partition_method]["n_clients"][()])
# data_dict.close()
# partition_dict.close()
|
import tensorflow as tf
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Input, InputLayer, Conv2D, MaxPooling2D, Reshape, Flatten
from keras.models import load_model
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def create_sequential_model():
single_output_model = Sequential([
InputLayer(input_shape=(28, 28)),
Reshape((28,28,1),input_shape=(28,28,)),
Conv2D(filters=6, kernel_size=5, strides=1, input_shape=(28, 28, 1), name="Conv2D_1"),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="MaxPooling2D_1"),
Conv2D(filters=16, kernel_size=5, strides=1, name="Conv2D_2"),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="MaxPooling2D_2"),
Flatten(),
Dense(120, activation="relu", name="Dense_1"),
Dense(84, activation="relu", name="Dense_2"),
Dense(10, activation="softmax", name="Softmax")
])
return single_output_model
def create_model():
input_tensor = Input(shape=(28, 28))
reshape_layer = Reshape((28, 28, 1), input_shape=(28, 28,))(input_tensor)
conv2D_layer_1 = Conv2D(filters=6, kernel_size=5, strides=1, input_shape=(28, 28, 1))(reshape_layer)
maxpool2D_layer_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2D_layer_1)
conv2D_layer_2 = Conv2D(filters=16, kernel_size=5, strides=1)(maxpool2D_layer_1)
maxpool2D_layer_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2D_layer_2)
flatten_layer = Flatten()(maxpool2D_layer_2)
dense_layer_1 = Dense(120, activation="relu")(flatten_layer)
dense_layer_2 = Dense(84, activation="relu")(dense_layer_1)
dense_layer_3 = Dense(10, activation="softmax")(dense_layer_2)
classic_model =Model(
inputs=input_tensor,
outputs=dense_layer_3
)
return classic_model
def train_model():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = create_sequential_model()
# model = create_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, batch_size=32)
# model.evaluate(x_test, y_test)
return model
def save_model(model, name_path):
model.save(name_path)
def load_from_model(name_path):
model = load_model(name_path)
# Load model from json+weights
# from keras.models import model_from_json
# json_path = "./saved_models/gan/generator.json"
# weight_path = "./saved_models/gan/generator_weights.hdf5"
#
# structure = open(json_path, "r")
# model = model_from_json(
# structure
# )
# model.load_weights(weight_path)
return model
def generate_encapsulate_model_with_output_layer_names(model, output_layer_names):
enc_model = Model(
inputs=model.input,
outputs=list(map(lambda oln: model.get_layer(oln).output, output_layer_names))
)
return enc_model
def generate_encapsulate_model(model):
enc_model = Model(
inputs=model.input,
# ignore 1st layer (input), since some old models do not have 1st layer as Keras layer
outputs=list(map(lambda layer: layer.output, model.layers[1:]))
)
return enc_model
# model = train_model()
# save_model(model, "../models/keras_model.h5")
model = load_from_model("../models/keras_model.h5")
# model = load_from_model("../models/enc_keras_model.h5")
input_sample = np.ndarray(shape=(28,28), buffer=np.random.rand(28,28))
input_sample = np.expand_dims(input_sample, axis=0)
print(model.predict(input_sample))
# model.summary()
#
# for l in model.layers:
# print(l.name)
output_layer_names = ["Conv2D_1", "MaxPooling2D_1", "Conv2D_2", "MaxPooling2D_2", "Dense_1", "Dense_2", "Softmax"]
# enc_model = generate_encapsulate_model_with_output_layer_names(model, output_layer_names)
# enc_model = generate_encapsulate_model(model)
# print(enc_model.predict(input_sample))
# enc_model.compile(optimizer='adam',
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
# save_model(enc_model, "../models/enc_keras_model.h5")
|
# 2017 John Shell
from datetime import datetime, timezone
from random import randint as rand
import discord
import pytz
from .grasslands import Peacock
log = Peacock()
def m2id(mem):
"""
Convert member object to id str
:param mem: discord.Member or id str
:return: str id
"""
if isinstance(mem, discord.Member) or isinstance(mem, discord.User):
mid = mem.id
elif isinstance(mem, discord.TextChannel) or isinstance(mem, discord.Guild):
print(mem.name)
mid = None
else:
mid = mem
return str(mid)
def ts(dt):
"""
Generate timestamp from datetime object, it's just cleaner
:param dt: datetime object
:return: seconds since epoch
"""
if not isinstance(dt, datetime):
return dt
try:
dt = dt.replace(tzinfo=pytz.utc)
return (dt - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds()
except TypeError:
return dt
class DBHandler(object):
"""
Handle connections between leaf and the database. If config.yml has
Databasing turned off, functionality will be solely
to let the user know it is off.
"""
def __init__(self, config):
if config.get("dbconf") is None:
log.f(
"DBHandler",
"Could not find database config entry "
"in config.yml. "
"Certain features will be disabled",
)
self.useDB = False
return
self.useDB = True
self.config = config
db_conf = self.config.get("dbconf")
from pymongo import MongoClient
if "remote_uri" in db_conf:
client = MongoClient(db_conf["remote_uri"])
else:
client = MongoClient("localhost", db_conf["port"])
if "name" not in db_conf:
self.db = client["petal"]
else:
self.db = client[db_conf["name"]]
self.members = self.db["members"]
self.reminders = self.db["reminders"]
self.motd = self.db["motd"]
self.void = self.db["void"]
self.ac = self.db["ac"]
self.subs = self.db["subs"]
self.emoji = self.db["emoji"]
self.dinos = self.db["dinos"]
log.f("DBHandler", "Database system ready")
def member_exists(self, member):
"""
:param member: id of member to look up
:return: bool member id is in the member collection
"""
if not self.useDB:
return False
if self.members.find_one({"uid": m2id(member)}) is not None:
return True
return False
def add_member(self, member, verbose=False):
if not self.useDB:
return False
if self.member_exists(member):
if verbose:
log.f(
"DBhandler",
"Member already exists in database, "
"use update_member to update them",
)
return False
else:
data = {
"name": member.name,
"uid": member.id,
"discord_date": ts(member.created_at),
"local_date": ts(datetime.utcnow()),
"aliases": [],
"discriminator": member.discriminator,
"isBot": member.bot,
"avatar_url": str(member.avatar_url),
"location": "Brisbane, Australia",
"osu": "",
"banned": False,
"subreddit": "aww",
"message_count": 0,
"last_active": ts(datetime.utcnow()),
"last_message": 0,
"last_message_channel": "0",
"strikes": [],
"subscriptions": [],
"commands_count": 0,
}
try:
data["guilds"] = [member.guild.id]
except AttributeError:
log.f(
"dbhandler",
f"{member.name} is a User type object, cannot add server attribute",
)
if isinstance(member, discord.Member):
data["server_date"] = ts(member.joined_at)
data["joins"] = [ts(member.joined_at)]
if member.display_name != member.name:
data["aliases"].append(member.display_name)
pid = self.members.insert_one(data).inserted_id
log.f("DBhandler", f"New member added to DB! (_id: {pid})")
return True
def get_member(self, member):
"""
Retrieves a Dictionary representation of a member
:param member: discord.Member or str id of member
:return: dict member
"""
if not self.useDB:
return None
r = self.members.find_one({"uid": m2id(member)})
if r is not None:
return r
return None
def get_attribute(self, member, key, verbose=True):
"""
Retrieves a specific field from a stored member object
:param member: discord.Member or str id of member
:param key: field to return
:param verbose:
:return: member[key] or None if none
"""
if not self.useDB:
return False
mem = self.get_member(member)
if mem is None:
if verbose:
log.f("DBHandler", f"{member.name} {m2id(member)} not found in db")
return None
if key in mem:
return mem[key]
else:
if verbose:
log.f("DBHandler", f"{m2id(member)} has no field: {key}")
return None
def update_member(self, member, data=None, type=0, subdict=""):
"""
Updates a the database with keys and values provided in the data field
:param member: member to update
:param data: dictionary containing data to update
:param type: 0 = None, 1 = Message, 2 = Command
:param subdict: Whether this operation is an update to a subdict of the user
:return: str response
"""
if not self.useDB:
return False
if data is None:
log.f("DBhandler", "Please provide data first!")
return False
self.add_member(member)
# mem: MONGO DOCUMENT (as python dict)
mem = self.get_member(member)
if mem is None:
log.f("DBhandler", "Member doesn't exist")
return False
# TODO: get member dict first then query over. Update finally
count = 0
if subdict:
# This operation is running in Subdict mode; Update the dict provided
if not subdict in mem:
mem[subdict] = data
count += 1
else:
mem[subdict].update(data)
else:
for key in data:
# data: DICT
# key: STR (probably)
# mem[key]: CURRENT VALUE
# data[key]: NEW VALUE
if isinstance(data[key], dict):
print(str(key) + "\n" + str(data) + "\n")
mem[key] = data[key]
print(str(mem[key]))
for vk in mem[key]:
mem[key][vk] = ts(mem[key][vk])
elif key in mem:
if isinstance(mem[key], list):
if isinstance(data[key], list):
for item in data[key]:
# log.f("DBHandler", "Item: " + item)
if item not in mem[key]:
# log.f("DBHandler", "ON key: " + key + " added " + item + " to " + str(mem[key]))
mem[key].append(item)
count += 1
else:
if data[key] not in mem[key]:
mem[key].append(data[key])
log.f("DBHandler", f"added { data[key]} to {key}")
count += 1
else:
# log.f("DBHandler", "replace key: " + key + " -> "
# + str(mem[key]) + " with "
# + str(data[key]))
# log.f("Replaced " + key + ": " + str(mem[key]) + " -> " + str(ts(data[key])))
mem[key] = ts(data[key])
else:
# log.f("Added " + key)
mem[key] = ts(data[key])
count += 1
if type == 1:
mem["message_count"] += 1
elif type == 2:
mem["commands_count"] += 1
if count > 0:
log.f("DBHandler", "Added {count} fields to {mem['name']}")
self.members.replace_one({"uid": m2id(member)}, mem, upsert=False)
return True
def get_void(self):
void_size = self.void.count()
if void_size == 0:
return "Nothing in void storage"
response = None
while response is None:
index = rand(0, void_size - 1)
response = self.void.find_one({"number": index})
return response
def save_void(self, content, name, id):
if self.void.count({"content": content}) > 0:
return None
self.void.insert(
{"content": content, "number": self.void.count(), "author": name + " " + id}
)
return self.void.count()
def delete_void(self, number):
return self.void.delete_one({"number": number})
def get_reminders(self, timestamp):
timestamp = ts(timestamp)
return self.reminders.find({"ts": {"$lr": timestamp}})
def add_reminder(self, author, content, timestamp):
timestamp = ts(timestamp)
return self.reminders.insert_one(
{"ts": timestamp, "author": author.id, "content": content}
)
def get_motd_entry(self, update=False):
response = self.motd.find_one({"used": False, "approved": True})
if response is None:
return None
if not update:
return response
self.motd.update(
{"_id": response["_id"]},
{"$set": {"used": True}},
upsert=False,
multi=False,
)
return response
def get_motd_max(self):
return self.motd.find_one(sort=[("num", -1)])
def submit_motd(self, author, content):
num = self.get_motd_max()
if num is None:
idx: int = 2000
else:
idx: int = num["num"] + 1
entry = {
"author": author,
"num": idx,
"content": content,
"approved": False,
"used": False,
}
# print(str(entry))
return self.motd.find_one({"_id": self.motd.insert_one(entry).inserted_id})
def update_motd(self, num, approve=True):
if approve:
self.motd.update_one(
{"num": num}, {"$set": {"approved": True, "used": False}}, upsert=False
)
else:
self.motd.update_one(
{"num": num}, {"$set": {"approved": False, "used": False}}, upsert=False
)
return self.motd.find_one({"num": num})
def read_cmd_image(self, invoker: str) -> (bytes, None):
# TODO: `invoker` is a string key in the DB with Base64 image data. Find and return it.
# Should return a bytes class object if the data is in the DB, or `None` if not.
pass
def write_cmd_image(self, invoker: str, img: bytes):
# TODO: `img` is a bstring of Base64 data. Write it into the DB under the key `invoker`.
# Should return `True` if the image was written, or `False` if it was not.
pass
|
from anvil import *
import anvil.facebook.auth
import anvil.google.auth, anvil.google.drive
from anvil.google.drive import app_files
import anvil.microsoft.auth
import anvil.users
import anvil.server
if not get_url_hash():
while not anvil.users.login_with_form():
pass
# disabling report module
# open_form('landing.main')
open_form('landing.select_action_survey')
else:
schema=anvil.server.call('get_form', get_url_hash())
open_form('form.main', schema)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load(":shape.bzl", "shape")
# Define shapes for systemd units. This is not intended to be an exhaustive
# list of every systemd unit setting from the start, but should be added to as
# more use cases generate units with these shapes.
unit_t = shape.shape(
description = str,
requires = shape.list(str, default = []),
after = shape.list(str, default = []),
before = shape.list(str, default = []),
)
fstype_t = shape.enum("btrfs", "9p")
mount_t = shape.shape(
unit = unit_t,
what = str,
where = shape.path,
# add more filesystem types here as required
type = shape.field(fstype_t, optional = True),
options = shape.list(str, default = []),
)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='model.proto',
package='modeltest',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0bmodel.proto\x12\tmodeltest\"\x11\n\x02hi\x12\x0b\n\x03msg\x18\x01 \x01(\t\"/\n\x05input\x12\x11\n\tinputType\x18\x01 \x01(\t\x12\x13\n\x0binputStream\x18\x02 \x01(\t\"2\n\x06output\x12\x12\n\noutputType\x18\x01 \x01(\t\x12\x14\n\x0coutputStream\x18\x02 \x01(\t\"1\n\tproxyinfo\x12\x11\n\tproxyName\x18\x01 \x01(\t\x12\x11\n\tproxyPort\x18\x02 \x01(\t\"\x1a\n\x08response\x12\x0e\n\x06status\x18\x01 \x01(\t2\xa9\x01\n\x0ePredictService\x12\x30\n\x07Predict\x12\x10.modeltest.input\x1a\x11.modeltest.output\"\x00\x12\x37\n\x08SetProxy\x12\x14.modeltest.proxyinfo\x1a\x13.modeltest.response\"\x00\x12,\n\x04Ping\x12\r.modeltest.hi\x1a\x13.modeltest.response\"\x00\x62\x06proto3')
)
_HI = _descriptor.Descriptor(
name='hi',
full_name='modeltest.hi',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msg', full_name='modeltest.hi.msg', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=43,
)
_INPUT = _descriptor.Descriptor(
name='input',
full_name='modeltest.input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inputType', full_name='modeltest.input.inputType', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputStream', full_name='modeltest.input.inputStream', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=45,
serialized_end=92,
)
_OUTPUT = _descriptor.Descriptor(
name='output',
full_name='modeltest.output',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='outputType', full_name='modeltest.output.outputType', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputStream', full_name='modeltest.output.outputStream', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=94,
serialized_end=144,
)
_PROXYINFO = _descriptor.Descriptor(
name='proxyinfo',
full_name='modeltest.proxyinfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proxyName', full_name='modeltest.proxyinfo.proxyName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxyPort', full_name='modeltest.proxyinfo.proxyPort', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=146,
serialized_end=195,
)
_RESPONSE = _descriptor.Descriptor(
name='response',
full_name='modeltest.response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='modeltest.response.status', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=197,
serialized_end=223,
)
DESCRIPTOR.message_types_by_name['hi'] = _HI
DESCRIPTOR.message_types_by_name['input'] = _INPUT
DESCRIPTOR.message_types_by_name['output'] = _OUTPUT
DESCRIPTOR.message_types_by_name['proxyinfo'] = _PROXYINFO
DESCRIPTOR.message_types_by_name['response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
hi = _reflection.GeneratedProtocolMessageType('hi', (_message.Message,), dict(
DESCRIPTOR = _HI,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:modeltest.hi)
))
_sym_db.RegisterMessage(hi)
input = _reflection.GeneratedProtocolMessageType('input', (_message.Message,), dict(
DESCRIPTOR = _INPUT,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:modeltest.input)
))
_sym_db.RegisterMessage(input)
output = _reflection.GeneratedProtocolMessageType('output', (_message.Message,), dict(
DESCRIPTOR = _OUTPUT,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:modeltest.output)
))
_sym_db.RegisterMessage(output)
proxyinfo = _reflection.GeneratedProtocolMessageType('proxyinfo', (_message.Message,), dict(
DESCRIPTOR = _PROXYINFO,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:modeltest.proxyinfo)
))
_sym_db.RegisterMessage(proxyinfo)
response = _reflection.GeneratedProtocolMessageType('response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:modeltest.response)
))
_sym_db.RegisterMessage(response)
_PREDICTSERVICE = _descriptor.ServiceDescriptor(
name='PredictService',
full_name='modeltest.PredictService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=226,
serialized_end=395,
methods=[
_descriptor.MethodDescriptor(
name='Predict',
full_name='modeltest.PredictService.Predict',
index=0,
containing_service=None,
input_type=_INPUT,
output_type=_OUTPUT,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='SetProxy',
full_name='modeltest.PredictService.SetProxy',
index=1,
containing_service=None,
input_type=_PROXYINFO,
output_type=_RESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Ping',
full_name='modeltest.PredictService.Ping',
index=2,
containing_service=None,
input_type=_HI,
output_type=_RESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PREDICTSERVICE)
DESCRIPTOR.services_by_name['PredictService'] = _PREDICTSERVICE
# @@protoc_insertion_point(module_scope)
|
from unittest import TestCase
from currencies import server
from currencies import repository
class TestService(TestCase):
def setUp(self):
app = server.start(repository)
self.client = app.test_client()
def test_root_returns_200(self):
response = self.client.get("/")
assert response.status_code == 200
def test_root_returns_a_list(self):
response = self.client.get("/")
assert isinstance(response.json, list)
def test_root_returns_a_non_empty_list(self):
response = self.client.get("/")
assert len(response.json) > 0
|
from django.shortcuts import render, reverse
from django.http import HttpResponse, HttpResponseRedirect
from batchthis.models import Batch, Fermenter, BatchTestType, BatchNoteType
from django.shortcuts import get_object_or_404
from .forms import BatchTestForm, BatchNoteForm, BatchAdditionForm, RefractometerCorrectionForm
from batchthis.utils import Utils
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
top_batches = Batch.objects.all()[:5]
total_batch_count = Batch.objects.all().count()
active_batches = Batch.objects.filter(active=True)
active_batch_count = len(active_batches)
active_fermenters = Fermenter.objects.filter(status=Fermenter.STATUS_ACTIVE)
active_fermenters_count = len(active_fermenters)
total_volume = 0
for batch in active_batches:
total_volume += batch.size
fermenter_detail = {}
for fermenter in active_fermenters:
fermenter_batch = fermenter.batch.filter(active=True)
if not fermenter.name in fermenter_detail.keys():
fermenter_detail[fermenter.name] = {'batch': fermenter_batch[0].name, 'size': fermenter_batch[0].size}
context = {
'active_batches': active_batches,
'active_fermenters': active_fermenters,
'top_batches': top_batches,
'total_batch_count': total_batch_count,
'active_batch_count': active_batch_count,
'active_fermenters_count': active_fermenters_count,
'total_volume': total_volume,
'fermenter_detail': fermenter_detail
}
return render(request,'batchthis/index.html',context=context)
@login_required
def batchListing(request):
batches = Batch.objects.all()
context = {
'batches': batches
}
return render(request,'batchthis/batches.html', context=context)
def batch(request, pk):
batch = get_object_or_404(Batch,pk=pk)
testTypes = BatchTestType.objects.all()
fermenters = batch.fermenter.all()
gravity_tests = batch.tests.filter(type__shortid='specific-gravity')
current_gravity = batch.startingGravity
if len(gravity_tests) > 0:
# We have gravity tests. Get the latest
current_gravity = gravity_tests[len(gravity_tests)-1].value
percent_complete = round((batch.startingGravity-current_gravity)/(batch.startingGravity-batch.estimatedEndGravity)*100)
thirdSugarBreak = round(batch.startingGravity-((batch.startingGravity-batch.estimatedEndGravity)/3),3)
thirdSugarBreakPercent = round((batch.startingGravity-thirdSugarBreak)/(batch.startingGravity-batch.estimatedEndGravity)*100)
ferm_notes = batch.notes.filter(notetype__name='Fermentation Note')
gen_notes = batch.notes.filter(notetype__name='General Note')
taste_notes = batch.notes.filter(notetype__name='Tasting Note')
gravityChart = {}
for test in gravity_tests:
if not "dates" in gravityChart.keys():
gravityChart["shortid"] = "specific-gravity"
gravityChart["dates"] = []
gravityChart["values"] = []
strfmt = "%m/%d/%y"
gravityChart["dates"].append(test.datetime.strftime(strfmt))
gravityChart["values"].append(test.value)
context = {
"batch": batch,
"percentComplete": percent_complete,
"gravityChart": gravityChart,
"gravityTests": gravity_tests,
"testTypes": testTypes,
"fermenters": fermenters,
"thirdSugarBreak": thirdSugarBreak,
"thirdSugarBreakPercent": thirdSugarBreakPercent,
"startingGravity": batch.startingGravity,
"endingGravity": batch.estimatedEndGravity,
"gennotes": gen_notes,
"fermnotes": ferm_notes,
"tastenotes": taste_notes
}
return render(request, 'batchthis/batch.html', context=context)
def batchTest(request, pk=None):
if request.method == 'GET':
if pk:
form = BatchTestForm()
form.fields['batch'].queryset = Batch.objects.filter(pk=pk)
form.initial = {'batch':pk}
# We have a batchID. Let's auto assign the batch to the note
else:
form = BatchTestForm()
# We don't have a batchID. Only show active batches
form.fields['batch'].queryset = Batch.objects.filter(active=True)
else:
form = BatchTestForm(request.POST)
form.save()
return HttpResponseRedirect(reverse('batch', kwargs={'pk': pk}))
return render(request,"batchthis/addTest.html", {'form':form})
def batchAddition(request, pk=None):
if request.method == 'GET':
form = BatchAdditionForm()
if pk:
form.fields['batch'].queryset = Batch.objects.filter(pk=pk)
form.initial = {'batch':pk}
else:
form.fields['batch'].queryset = Batch.objects.filter(active=True)
else:
form = BatchAdditionForm(request.POST)
form.save()
return HttpResponseRedirect(reverse('batch', kwargs={'pk': pk}))
return render(request, "batchthis/addAddon.html", {'form': form})
def batchNote(request, pk=None, noteType=None):
if request.method == 'GET':
form = BatchNoteForm()
form.initial = {}
if pk:
form.fields['batch'].queryset = Batch.objects.filter(pk=pk)
form.initial['batch'] = pk
if noteType:
noteTypes = BatchNoteType.objects.filter(name=noteType)
form.fields['notetype'].queryset = noteTypes
form.initial['notetype'] = noteTypes[0].pk
else:
form.fields['batch'].queryset = Batch.objects.all()
else:
form = BatchNoteForm(request.POST)
form.save()
return HttpResponseRedirect(reverse('batch', kwargs={'pk':pk}))
return render(request, "batchthis/addNote.html", {'form':form})
def activity(request, pk=None):
print("Getting activity for batch " + str(pk))
batch = Batch.objects.get(pk=pk)
activity = batch.activity.all().order_by('datetime')
print("Found " + str(len(activity)) + " entries in activity")
context = {
'activity': activity
}
return render(request, "batchthis/activity.html", context=context)
def refractometerCorrection(request):
form = RefractometerCorrectionForm(initial={'startUnit': 'bx','currentUnit': 'bx'})
result = (0,0)
if request.method == "POST":
form = RefractometerCorrectionForm(request.POST)
params = {}
if form.is_valid():
# Calculate Correction
startData = form.cleaned_data['startData']
startUnit = form.cleaned_data['startUnit']
currentData = form.cleaned_data['currentData']
currentUnit = form.cleaned_data['currentUnit']
if startUnit == 'sg':
params['startSG'] = startData
else:
params['startBrix'] = startData
if currentUnit == 'sg':
params['currentSG'] = currentData
else:
params['currentBrix'] = currentData
result = Utils.refractometerCorrection(**params)
context = {
'form': form,
'sg': '%.3f' % result[0], # Format SG to normal readable notation
'abv': round(result[1],1)
}
return render(request, 'batchthis/util.refractometer.html', context)
def batchGraphs(request, pk):
# Capture each type of test. No need to show graphs on tests not performed
batch = Batch.objects.get(pk=pk)
tests = batch.tests.all()
# Build data var for chart data
testGroup = {}
for test in tests:
if not test.type.name in testGroup.keys():
testGroup[test.type.name] = {}
testGroup[test.type.name]['shortid'] = test.type.shortid
testGroup[test.type.name]['dates'] = []
testGroup[test.type.name]['values'] = []
date_format = "%m/%d/%y"
strdate = test.datetime.strftime(date_format)
testGroup[test.type.name]['dates'].append(strdate)
testGroup[test.type.name]['values'].append(test.value)
context = {"tests": testGroup,
"testTypes": testGroup.keys()
}
return render(request, "batchthis/batchGraphs.html", context) |
"""
Decorators for exposing function arguments / returns
"""
from io import StringIO
import sys
import functools as ftl # , pprint
from .. import pprint as pp
from .base import Decorator
def get_inner(func, args=(), kws=None):
""""""
kws = kws or {}
while isinstance(func, ftl.partial):
kws.update(func.keywords)
args += func.args
func = func.func
return func, args, kws
class show(Decorator):
"""
Decorator to print function call details - parameters names and effective
values optional arguments specify stuff to print before and after, as well
as specific pretty printing options to `show_func`.
Examples
--------
>>> from recipes.decor import expose
>>> @expose.show()
... def foo(a, b, c, **kw):
... return a
...
... foo('aaa', 42, id, gr=8, bar=...)
foo(a = aaa,
b = 42,
c = <built-in function id>,
kwargs = {'bar': Ellipsis, 'gr': 8} )
Out[43]: 'aaa'
"""
def __init__(self, pre='', post='', **options):
self.pre = pre
self.post = post
self.options = options
def wrapper(self, *args, **kws):
print(self.pre)
print(pp.caller(self.func, args, kws, **self.options))
result = self.func(*args, **kws)
print(self.post)
sys.stdout.flush()
return result
args = show
def returns(func):
"""Decorator to print function return details"""
@ftl.wraps(func)
def wrapper(*args, **kw):
r = func(*args, **kw)
print('%s\nreturn %s' % (func.__name__, r))
return r
return wrapper
def suppress(func):
"""Suppress all print statements in a function call"""
@ftl.wraps(func)
def wrapper(*args, **kws):
# shadow stdout temporarily
actualstdout = sys.stdout
sys.stdout = StringIO()
# call the actual function
r = func(*args, **kws)
# restore stdout
sys.stdout = actualstdout
sys.stdout.flush()
return r
return wrapper
# class InfoPrintWrapper(DecoratorBase):
# def setup(self, pre='', post=''):
# self.pre = pre
# self.post = post
# def __call__(self)
# # def make_wrapper(self, func):
# # @ftl.wraps(func)
# # def wrapper(*args, **kw):
# # print(self.pre)
# # r = func(*args, **kw)
# # print(self.post)
# # return r
# # return wrapper
# class SameLineDone(InfoPrintWrapper):
# def setup(self, pre='', post='', **kws):
# self.pre = pre
# up = '\033[1A'
# right = '\033[%iC' % (len(pre) + 3)
# self.post = up + right + post
|
def resolve():
'''
code here
'''
import collections
import itertools
N = int(input())
Ss = [input()[0] for _ in range(N)]
march_letter = [item for item in Ss if item in ['M', 'A', 'R', 'C', 'H']]
march_cnt = collections.Counter(march_letter)
if len(march_cnt) < 3:
res = 0
else:
res_list = itertools.combinations(march_cnt.values(),3)
res = 0
for element in res_list:
res += element[0]*element[1]*element[2]
print(res)
if __name__ == "__main__":
resolve()
|
import ast
import inspect
import textwrap
from .base import TohuBaseGenerator
from .ipython_support import get_ast_node_for_classes_defined_interactively_in_ipython
__all__ = ["Placeholder", "placeholder", "foreach"]
class Placeholder:
def __init__(self, name):
self.name = name
placeholder = Placeholder("<generic>")
def get_ast_node_for_classes_defined_in_source_files(cls):
orig_cls_source = textwrap.dedent(inspect.getsource(cls))
orig_cls_ast_node = ast.parse(orig_cls_source)
return orig_cls_ast_node
def get_cls_compiled_ast_node(cls):
try:
orig_cls_ast_node = get_ast_node_for_classes_defined_in_source_files(cls)
except TypeError as exc:
if exc.args[0] == "<module '__main__'> is a built-in class":
orig_cls_ast_node = get_ast_node_for_classes_defined_interactively_in_ipython(cls)
else:
# unexpected error; re-raise the exception
raise
orig_cls_compiled = compile(orig_cls_ast_node, "<string>", "exec")
return orig_cls_compiled
def reevaluate_class_definition(
orig_cls_compiled_ast_node, *, orig_cls_name, global_vars, local_vars, **custom_var_defs
):
my_global_vars = global_vars.copy()
my_global_vars.update(custom_var_defs)
my_global_vars.update(local_vars)
my_local_vars = {}
exec(orig_cls_compiled_ast_node, my_global_vars, my_local_vars)
# Sanity check to ensure the code only evaluated the expected class definition
assert list(my_local_vars.keys()) == [orig_cls_name], "Unexpected object(s) found during code evaluation."
reevaluated_cls = my_local_vars[orig_cls_name]
return reevaluated_cls
def restore_globals(global_vars, names, clashes):
for name in names:
if name in clashes:
# restore items that were previously defined
global_vars[name] = clashes[name]
else:
# remove items which didn't exist before
global_vars.pop(name)
def foreach(**var_defs):
new_names = var_defs.keys()
parent_frame = inspect.currentframe().f_back
global_vars = parent_frame.f_globals
local_vars = parent_frame.f_locals
clashes = {name: global_vars[name] for name in new_names if name in global_vars}
global_vars.update(var_defs)
def make_foreach_closure(cls):
if not inspect.isclass(cls):
raise TypeError(
f"Foreach decorator must be applied to a tohu generator class, not an object of type {type(cls)}."
)
if not issubclass(cls, TohuBaseGenerator):
raise TypeError("Decorated class must be a subclass of TohuBaseGenerator.")
orig_cls_compiled_ast_node = get_cls_compiled_ast_node(cls)
orig_cls_name = cls.__name__
class ForeachWrapper:
def __init__(self, *args, **kwargs):
self.init_args = args
self.init_kwargs = kwargs
def foreach(self, **custom_var_defs):
custom_var_names = list(custom_var_defs.keys())
missing_params = list(set(new_names).difference(custom_var_names))
extra_params = list(set(custom_var_names).difference(new_names))
if missing_params:
raise ValueError(f"Missing parameter(s): {', '.join(missing_params)!r}")
if extra_params:
raise ValueError(f"Extra parameter(s) provided: {', '.join(extra_params)!r}")
# Re-evaluate the class definition, including the previously missing
# variable values to replace the placeholders.
rewritten_cls = reevaluate_class_definition(
orig_cls_compiled_ast_node,
orig_cls_name=orig_cls_name,
global_vars=global_vars,
local_vars=local_vars,
**custom_var_defs,
)
return rewritten_cls(*self.init_args, **self.init_kwargs)
restore_globals(global_vars, new_names, clashes)
return ForeachWrapper
return make_foreach_closure
|
def generateTraverse(content, numberOfAssertions):
with open("traverse_tests/main_model_" + str(numberOfAssertions) + ".c", 'w+') as f:
for i,line in enumerate(content):
if "int action_run;" in line:
for x in range(0, numberOfAssertions):
f.write('\nint path_' + str(x) + " = 0;\n")
elif "standard_metadata.egress_spec = port;" in line:
for x in range(0, numberOfAssertions):
f.write('\n\tpath_' + str(x) + " = 1;\n")
elif "//Emit hdr.ethernet" in line:
for x in range(0, numberOfAssertions):
f.write('\tif(!path_' + str(x) + ") { printf(\"assertion error: traverse_path\"); }\n")
f.write(line)
def generateEquality(content, numberOfAssertions):
with open("equality_tests/main_model_" + str(numberOfAssertions) + ".c", 'w+') as f:
for i,line in enumerate(content):
if "standard_metadata.egress_spec = port;" in line:
for x in range(0, numberOfAssertions):
f.write("\tif(!(hdr.ipv4.ttl != 0)) { printf(\"assertion error: hdr.ipv4.ttl != 0\"); }\n")
f.write(line)
def generateFakeEquality(content, numberOfAssertions):
with open("symbolic_equality_tests/main_model_fake_vars" + str(numberOfAssertions) + ".c", 'w+') as f:
for i,line in enumerate(content):
if "standard_metadata.egress_spec = port;" in line:
for x in range(0, numberOfAssertions):
varname = "hdr.ipv4.fake_var_" + str(x)
f.write("\tif(!(" + varname + " != 0)) { printf(\"assertion error: " + varname + " != 0\"); }\n")
f.write(line)
def generateFakeVarsFile(content, numberOfAssertions):
with open("main_model_fake_vars.c", 'w+') as f:
for i,line in enumerate(content):
if "} ipv4_t;" in line:
for x in range(0, numberOfAssertions):
f.write("\tuint32_t fake_var_" + str(x) + " : 32;\n")
f.write(line)
numberOfAssertions = 200
with open("main_model.c") as f:
content = f.readlines()
generateFakeVarsFile(content, numberOfAssertions)
with open("main_model_fake_vars.c") as f:
contentFake = f.readlines()
for x in range(0, numberOfAssertions):
generateTraverse(content, x)
generateEquality(content, x)
generateFakeEquality(contentFake, x) |
from flask import Flask, render_template, request
from flask_socketio import SocketIO, emit, disconnect
from streamer import Stream, auth, preprocessor, clf_path
app = Flask(__name__)
socketio = SocketIO(app)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@socketio.on('connect', namespace='/topic')
def connect():
topic = request.args.get('topic')
print(topic)
@socketio.on('disconnect', namespace='/topic')
def disconnect_cleanup():
stream.disconnect()
disconnect()
if __name__ == '__main__':
# Create stream object with given credentials
global stream
stream = Stream(auth, preprocessor, clf_path, socketio)
# Streaming filter
stream_thread = stream.filter(
track=["jakarta"],
threaded=True
)
socketio.run(app, host='0.0.0.0', port=8000)
|
from typing import Optional
from distil.primitives.ensemble_forest import EnsembleForestPrimitive
from common_primitives.extract_columns_semantic_types import (
ExtractColumnsBySemanticTypesPrimitive,
)
from common_primitives.grouping_field_compose import GroupingFieldComposePrimitive
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.simple_profiler import SimpleProfilerPrimitive
from common_primitives.xgboost_gbtree import XGBoostGBTreeClassifierPrimitive
from common_primitives.xgboost_regressor import XGBoostGBTreeRegressorPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from processing.metrics import (
regression_metrics,
classification_metrics,
confidence_metrics,
)
from d3m.metadata.pipeline import Pipeline, PrimitiveStep, Resolver
from d3m.metadata.base import ArgumentType
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.pipeline import Resolver
def create_pipeline(
metric: str,
use_boost: bool = False,
grid_search=False,
n_jobs: int = -1,
compute_confidences=False,
resolver: Optional[Resolver] = None,
) -> Pipeline:
previous_step = 0
tune_steps = []
input_val = "steps.{}.produce"
ts_tabular_pipeline = Pipeline()
ts_tabular_pipeline.add_input(name="inputs")
# step 0 - Extract dataframe from dataset
step = PrimitiveStep(
primitive_description=DatasetToDataFramePrimitive.metadata.query(),
resolver=resolver,
)
step.add_argument(
name="inputs", argument_type=ArgumentType.CONTAINER, data_reference="inputs.0"
)
step.add_output("produce")
ts_tabular_pipeline.add_step(step)
step = PrimitiveStep(
primitive_description=SimpleProfilerPrimitive.metadata.query(),
resolver=resolver,
)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(previous_step),
)
step.add_output("produce")
ts_tabular_pipeline.add_step(step)
previous_step += 1
# Parse columns.
step = PrimitiveStep(
primitive_description=ColumnParserPrimitive.metadata.query(), resolver=resolver
)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(previous_step),
)
step.add_output("produce")
semantic_types = (
"http://schema.org/Boolean",
"http://schema.org/Integer",
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/FloatVector",
"http://schema.org/DateTime",
)
step.add_hyperparameter("parse_semantic_types", ArgumentType.VALUE, semantic_types)
ts_tabular_pipeline.add_step(step)
previous_step += 1
parse_step = previous_step
# Extract attributes
step = PrimitiveStep(
primitive_description=ExtractColumnsBySemanticTypesPrimitive.metadata.query(),
resolver=resolver,
)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(parse_step),
)
step.add_output("produce")
step.add_hyperparameter(
"semantic_types",
ArgumentType.VALUE,
(
"https://metadata.datadrivendiscovery.org/types/Attribute",
"https://metadata.datadrivendiscovery.org/types/PrimaryKey",
),
)
ts_tabular_pipeline.add_step(step)
previous_step += 1
attributes_step = previous_step
# Extract targets
step = PrimitiveStep(
primitive_description=ExtractColumnsBySemanticTypesPrimitive.metadata.query(),
resolver=resolver,
)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(parse_step),
)
step.add_output("produce")
target_types = (
"https://metadata.datadrivendiscovery.org/types/Target",
"https://metadata.datadrivendiscovery.org/types/TrueTarget",
)
step.add_hyperparameter("semantic_types", ArgumentType.VALUE, target_types)
ts_tabular_pipeline.add_step(step)
previous_step += 1
target_step = previous_step
if use_boost:
if metric in regression_metrics:
step = PrimitiveStep(
primitive_description=XGBoostGBTreeRegressorPrimitive.metadata.query(),
resolver=resolver,
)
elif metric in classification_metrics and metric not in confidence_metrics:
# xgboost classifier doesn't support probability generation so no support for confidence-based metrics
step = PrimitiveStep(
primitive_description=XGBoostGBTreeClassifierPrimitive.metadata.query(),
resolver=resolver,
)
else:
step = PrimitiveStep(
primitive_description=EnsembleForestPrimitive.metadata.query(),
resolver=resolver,
)
step.add_hyperparameter("grid_search", ArgumentType.VALUE, grid_search)
step.add_hyperparameter("small_dataset_fits", ArgumentType.VALUE, 1)
step.add_hyperparameter(
"compute_confidences", ArgumentType.VALUE, compute_confidences
)
step.add_hyperparameter("n_jobs", ArgumentType.VALUE, n_jobs)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(attributes_step),
)
step.add_argument(
name="outputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(target_step),
)
step.add_output("produce")
if not use_boost:
step.add_hyperparameter("metric", ArgumentType.VALUE, metric)
ts_tabular_pipeline.add_step(step)
previous_step += 1
tune_steps.append(previous_step)
step = PrimitiveStep(
primitive_description=ConstructPredictionsPrimitive.metadata.query(),
resolver=resolver,
)
step.add_argument(
name="inputs",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(previous_step),
)
step.add_argument(
name="reference",
argument_type=ArgumentType.CONTAINER,
data_reference=input_val.format(parse_step),
)
step.add_output("produce")
ts_tabular_pipeline.add_step(step)
previous_step += 1
# Adding output step to the pipeline
ts_tabular_pipeline.add_output(
name="output", data_reference=input_val.format(previous_step)
)
return (ts_tabular_pipeline, tune_steps)
|
from __future__ import absolute_import, division, print_function
import iotbx.phil
import iotbx.pdb
import iotbx.mrcfile
from cctbx import crystal
from cctbx import maptbx
from libtbx.utils import Sorry
import sys, os
from cctbx.array_family import flex
from scitbx.math import matrix
from copy import deepcopy
from libtbx.utils import null_out
import libtbx.callbacks # import dependency
from libtbx import group_args
from six.moves import range
from six.moves import zip
master_phil = iotbx.phil.parse("""
input_files {
seq_file = None
.type = path
.short_caption = Sequence file
.help = Sequence file (unique chains only, \
1-letter code, chains separated by \
blank line or greater-than sign.) \
Can have chains that are DNA/RNA/protein and\
all can be present in one file. \
If not supplied, must supply molecular mass or \
solvent content.
map_file = None
.type = path
.help = File with CCP4-style map
.short_caption = Map file
half_map_file = None
.type = path
.multiple = True
.short_caption = Half map
.help = Half map (two should be supplied) for FSC calculation. Must \
have grid identical to map_file
ncs_file = None
.type = path
.help = File with symmetry information (typically point-group NCS with \
the center specified). Typically in PDB format. \
Can also be a .ncs_spec file from phenix. \
Created automatically if symmetry is specified.
.short_caption = symmetry file
pdb_file = None
.type = path
.help = Optional PDB file matching map_file to be offset
pdb_to_restore = None
.type = path
.help = Optional PDB file to restore to position matching original \
map_file. Used in combination with info_file=xxx.pkl \
and restored_pdb=yyyy.pdb
.short_caption = PDB to restore
info_file = None
.type = path
.help = Optional pickle file with information from a previous run.\
Can be used with pdb_to_restore to restore a PDB file to \
to position matching original \
map_file.
.short_caption = Info file
target_ncs_au_file = None
.type = path
.help = Optional PDB file to partially define the ncs asymmetric \
unit of the map. The coordinates in this file will be used \
to mark part of the ncs au and all points nearby that are \
not part of another ncs au will be added.
input_weight_map_pickle_file = None
.type = path
.short_caption = Input weight map pickle file
.help = Weight map pickle file
}
output_files {
magnification_map_file = magnification_map.ccp4
.type = path
.help = Input map file with magnification applied. Only written if\
magnification is applied.
.short_caption = Magnification map file
magnification_ncs_file = magnification_ncs.ncs_spec
.type = path
.help = Input NCS with magnification applied. Only written if\
magnification is applied.
.short_caption = Magnification NCS file
shifted_map_file = shifted_map.ccp4
.type = path
.help = Input map file shifted to new origin.
.short_caption = Shifted map file
shifted_sharpened_map_file = shifted_sharpened_map.ccp4
.type = path
.help = Input map file shifted to new origin and sharpened.
.short_caption = Shifted sharpened map file
sharpened_map_file = sharpened_map.ccp4
.type = str
.short_caption = Sharpened map file
.help = Output sharpened map file, superimposed on the original map.
.input_size = 400
shifted_pdb_file = shifted_pdb.pdb
.type = path
.help = Input pdb file shifted to new origin.
.short_caption = Shifted pdb file
shifted_ncs_file = shifted_ncs.ncs_spec
.type = path
.help = NCS information shifted to new origin.
.short_caption = Output NCS info file
shifted_used_ncs_file = shifted_used_ncs.ncs_spec
.type = path
.help = NCS information (just the part that is used) shifted \
to new origin.
.short_caption = Output used NCS info file
output_directory = segmented_maps
.type = path
.help = Directory where output files are to be written \
applied.
.short_caption = Output directory
box_map_file = box_map_au.ccp4
.type = path
.help = Output map file with one NCS asymmetric unit, cut out box
.short_caption = Box NCS map file
box_mask_file = box_mask_au.ccp4
.type = path
.help = Output mask file with one NCS asymmetric unit, cut out box
.short_caption = Box NCS mask file
box_buffer = 5
.type = int
.help = Buffer (grid units) around NCS asymmetric unit in box_mask and map
.short_caption = Box buffer size
au_output_file_stem = shifted_au
.type = str
.help = File stem for output map files with one NCS asymmetric unit
.short_caption = Output au file stem
write_intermediate_maps = False
.type = bool
.help = Write out intermediate maps and masks for visualization
.short_caption = Write intermediate maps
write_output_maps = True
.type = bool
.help = Write out maps
.short_caption = Write maps
remainder_map_file = remainder_map.ccp4
.type = path
.help = output map file with remainder after initial regions identified
.short_caption = Output remainder map file
output_info_file = segment_and_split_map_info.pkl
.type = path
.help = Output pickle file with information about map and masks
.short_caption = Output pickle file
restored_pdb = None
.type = path
.help = Output name of PDB restored to position matching original \
map_file. Used in combination with info_file=xxx.pkl \
and pdb_to_restore=xxxx.pdb
.short_caption = Restored PDB file
output_weight_map_pickle_file = weight_map_pickle_file.pkl
.type = path
.short_caption = Output weight map pickle file
.help = Output weight map pickle file
}
crystal_info {
chain_type = *None PROTEIN RNA DNA
.type = choice
.short_caption = Chain type
.help = Chain type. Determined automatically from sequence file if \
not given. Mixed chain types are fine (leave blank if so).
sequence = None
.type = str
.short_caption = Sequence
.help = Sequence as string
is_crystal = None
.type = bool
.short_caption = Is a crystal
.help = Defines whether this is a crystal (or cryo-EM).\
Default is True if use_sg_symmetry=True and False otherwise.
use_sg_symmetry = False
.type = bool
.short_caption = Use space-group symmetry
.help = If you set use_sg_symmetry=True then the symmetry of the space\
group will be used. For example in P1 a point at one end of \
the \
unit cell is next to a point on the other end. Normally for \
cryo-EM data this should be set to False and for crystal data \
it should be set to True. This will normally also set the \
value of is_crystal (same value as use_sg_symmetry) and \
restrict_map_size (False if use_sg_symmetry=True).
resolution = None
.type = float
.short_caption = resolution
.help = Nominal resolution of the map. This is used later to decide on\
resolution cutoffs for Fourier inversion of the map. Note: \
the resolution is not cut at this value, it is cut at \
resolution*d_min_ratio if at all.
space_group = None
.type = space_group
.help = Space group (used for boxed maps)
.style = hidden
unit_cell = None
.type = unit_cell
.help = Unit Cell (used for boxed maps)
.style = hidden
original_unit_cell = None
.type = unit_cell
.help = Original unit cell (of input map). Used internally
.style = hidden
original_unit_cell_grid = None
.type = ints
.help = Original unit cell grid (of input map). Used internally
.style = hidden
molecular_mass = None
.type = float
.help = Molecular mass of molecule in Da. Used as alternative method \
of specifying solvent content.
.short_caption = Molecular mass in Da
solvent_content = None
.type = float
.help = Solvent fraction of the cell. Used for ID of \
solvent content in boxed maps.
.short_caption = Solvent content
solvent_content_iterations = 3
.type = int
.help = Iterations of solvent fraction estimation. Used for ID of \
solvent content in boxed maps.
.short_caption = Solvent fraction iterations
.style = hidden
wang_radius = None
.type = float
.help = Wang radius for solvent identification. \
Default is 1.5* resolution
.short_caption = Wang radius
buffer_radius = None
.type = float
.help = Buffer radius for mask smoothing. \
Default is resolution
.short_caption = Buffer radius
pseudo_likelihood = None
.type = bool
.help = Use pseudo-likelihood method for half-map sharpening. \
(In development)
.short_caption = Pseudo-likelihood
.style = hidden
}
reconstruction_symmetry {
symmetry = None
.type = str
.short_caption = Symmetry type
.help = Symmetry used in reconstruction. For example D7, C3, C2\
I (icosahedral),T (tetrahedral), or ANY (try everything and \
use the highest symmetry found). Not needed if ncs_file is supplied. \
include_helical_symmetry = True
.type = bool
.short_caption = Include helical symmetry
.help = You can include or exclude searches for helical symmetry
symmetry_center = None
.type = floats
.short_caption = symmetry center
.help = Center (in A) for symmetry operators (if symmetry is found \
automatically). \
If set to None, first guess is the center of the cell and then \
if that fails, found automatically as the center of the \
density in the map.
optimize_center = False
.type = bool
.short_caption = Optimize symmetry center
.help = Optimize position of symmetry center. Also checks for center \
at (0,0,0) vs center of map
helical_rot_deg = None
.type = float
.short_caption = helical rotation
.help = helical rotation about z in degrees
helical_trans_z_angstrom = None
.type = float
.short_caption = helical translation
.help = helical translation along z in Angstrom units
max_helical_optimizations = 2
.type = int
.short_caption = Max helical optimizations
.help = Number of optimizations of helical parameters\
when finding symmetry
max_helical_ops_to_check = 5
.type = int
.short_caption = Max helical ops to check
.help = Number of helical operations in each direction to check \
when finding symmetry
max_helical_rotations_to_check = None
.type = int
.short_caption = Max helical rotations
.help = Number of helical rotations to check \
when finding symmetry
two_fold_along_x = None
.type = bool
.short_caption = D two-fold along x
.help = Specifies if D or I two-fold is along x (True) or y (False). \
If None, both are tried.
smallest_object = None
.type = float
.short_caption = Smallest object to consider
.help = Dimension of smallest object to consider\
when finding symmetry. Default is 5 * resolution
score_basis = ncs_score cc *None
.type = choice
.short_caption = Symmetry score basis
.help = Symmetry score basis. Normally ncs_score (sqrt(n)* cc) is \
used except for identification of helical symmetry
scale_weight_fractional_translation= 1.05
.type = float
.short_caption = Scale on fractional translation
.help = Give slight increase in weighting in helical symmetry \
search to translations that are a fraction (1/2, 1/3) of \
the d-spacing of the peak of intensity in the fourier \
transform of the density.
random_points = 100
.type = int
.short_caption = Random points
.help = Number of random points in map to examine in finding symmetry
identify_ncs_id = True
.type = bool
.short_caption = Identify NCS ID
.help = If symmetry is not point-group symmetry, try each possible \
operator when evaluating symmetry and choose the one that \
results in the most uniform density at symmetry-related points.
min_ncs_cc = 0.75
.type = float
.short_caption = Minimum symmetry CC to keep it
.help = Minimum symmetry CC to keep operators when identifying \
automatically
n_rescore = 5
.type = int
.short_caption = symmetry operators to rescore
.help = Number of symmetry operators to rescore
op_max = 14
.type = int
.short_caption = Max operators to try
.help = If symmetry is ANY, try up to op_max-fold symmetries
tol_r = 0.02
.type = float
.help = tolerance in rotations for point group or helical symmetry
.short_caption = Rotation tolerance
abs_tol_t = 2
.type = float
.help = tolerance in translations (A) for point group or helical symmetry
.short_caption = Translation tolerance absolute
max_helical_operators= None
.type = int
.help = Maximum helical operators (if extending existing helical\
operators)
.short_caption = Maximum helical operators
rel_tol_t = .05
.type = float
.help = tolerance in translations (fractional) for point group or \
helical symmetry
.short_caption = Translation tolerance fractional
require_helical_or_point_group_symmetry = False
.type = bool
.help = normally helical or point-group symmetry (or none) is expected. \
However in some cases (helical + rotational symmetry for \
example) this is not needed and is not the case.
.short_caption = Require helical or point-group or no symmetry
}
map_modification {
magnification = None
.type = float
.short_caption = Magnification
.help = Magnification to apply to input map. Input map grid will be \
scaled by magnification factor before anything else is done.
b_iso = None
.type = float
.short_caption = Target b_iso
.help = Target B-value for map (sharpening will be applied to yield \
this value of b_iso). If sharpening method is not supplied, \
default is to use b_iso_to_d_cut sharpening.
b_sharpen = None
.type = float
.short_caption = Sharpening
.help = Sharpen with this b-value. Contrast with b_iso that yield a \
targeted value of b_iso. B_sharpen greater than zero is sharpening.\
Less than zero is blurring.
b_blur_hires = 200
.type = float
.short_caption = high_resolution blurring
.help = Blur high_resolution data (higher than d_cut) with \
this b-value. Contrast with b_sharpen applied to data up to\
d_cut. \
Note on defaults: If None and b_sharpen is positive (sharpening) \
then high-resolution data is left as is (not sharpened). \
If None and b_sharpen is negative (blurring) high-resolution data\
is also blurred.
resolution_dependent_b = None
.type = floats
.short_caption = resolution_dependent b
.help = If set, apply resolution_dependent_b (b0 b1 b2). \
Log10(amplitudes) will start at 1, change to b0 at half \
of resolution specified, changing linearly, \
change to b1/2 at resolution specified, \
and change to b1/2+b2 at d_min_ratio*resolution
normalize_amplitudes_in_resdep = False
.type = bool
.short_caption = Normalize amplitudes in resdep
.help = Normalize amplitudes in resolution-dependent sharpening
d_min_ratio = 0.833
.type = float
.short_caption = Sharpen d_min ratio
.help = Sharpening will be applied using d_min equal to \
d_min_ratio times resolution. Default is 0.833
scale_max = 100000
.type = float
.short_caption = Scale_max
.help = Scale amplitudes from inverse FFT to yield maximum of this value
input_d_cut = None
.type = float
.short_caption = d_cut
.help = High-resolution limit for sharpening
rmsd = None
.type = float
.short_caption = RMSD of model
.help = RMSD of model to true model (if supplied). Used to \
estimate expected fall-of with resolution of correct part \
of model-based map. If None, assumed to be resolution \
times rmsd_resolution_factor.
rmsd_resolution_factor = 0.25
.type = float
.short_caption = rmsd resolution factor
.help = default RMSD is resolution times resolution factor
fraction_complete = None
.type = float
.short_caption = Completeness model
.help = Completness of model (if supplied). Used to \
estimate correct part \
of model-based map. If None, estimated from max(FSC).
regions_to_keep = None
.type = int
.short_caption = Regions to keep
.help = You can specify a limit to the number of regions to keep\
when generating the asymmetric unit of density.
auto_sharpen = True
.type = bool
.short_caption = Automatically determine sharpening
.help = Automatically determine sharpening using kurtosis maximization\
or adjusted surface area
auto_sharpen_methods = no_sharpening b_iso *b_iso_to_d_cut \
resolution_dependent model_sharpening \
half_map_sharpening target_b_iso_to_d_cut None
.type = choice(multi=True)
.short_caption = Sharpening methods
.help = Methods to use in sharpening. b_iso searches for b_iso to \
maximize sharpening target (kurtosis or adjusted_sa). \
b_iso_to_d_cut applies b_iso only up to resolution specified, with \
fall-over of k_sharpen. Resolution dependent adjusts 3 parameters \
to sharpen variably over resolution range. Default is \
b_iso_to_d_cut . target_b_iso_to_d_cut uses target_b_iso_ratio \
to set b_iso.
box_in_auto_sharpen = False
.type = bool
.short_caption = Use box for auto_sharpening
.help = Use a representative box of density for initial \
auto-sharpening instead of the entire map.
density_select_in_auto_sharpen = True
.type = bool
.short_caption = density_select to choose box
.help = Choose representative box of density for initial \
auto-sharpening with density_select method \
(choose region where there is high density). \
Normally use this as well as density_select=True which \
carries out density_select at start of segmentation.
density_select_threshold_in_auto_sharpen = None
.type = float
.short_caption = density_select threshold to choose box
.help = Threshold for density select choice of box. Default is 0.05. \
If your map has low overall contrast you might need to make this\
bigger such as 0.2.
allow_box_if_b_iso_set = False
.type = bool
.short_caption = Allow box if b_iso set
.help = Allow box_in_auto_sharpen (if set to True) even if \
b_iso is set. Default is to set box_n_auto_sharpen=False \
if b_iso is set.
soft_mask = True
.type = bool
.help = Use soft mask (smooth change from inside to outside with radius\
based on resolution of map). Required if you use half-map \
sharpening without a model, otherwise optional.
.short_caption = Soft mask
use_weak_density = False
.type = bool
.short_caption = Use box with poor density
.help = When choosing box of representative density, use poor \
density (to get optimized map for weaker density)
discard_if_worse = None
.type = bool
.short_caption = Discard sharpening if worse
.help = Discard sharpening if worse
local_sharpening = None
.type = bool
.short_caption = Local sharpening
.help = Sharpen locally using overlapping regions. \
NOTE: Best to turn off local_aniso_in_local_sharpening \
if symmetry is present.\
If local_aniso_in_local_sharpening is True and symmetry is \
present this can distort the map for some symmetry copies \
because an anisotropy correction is applied\
based on local density in one copy and is transferred without \
rotation to other copies.
local_aniso_in_local_sharpening = None
.type = bool
.short_caption = Local anisotropy
.help = Use local anisotropy in local sharpening. \
Default is True unless symmetry is present.
overall_before_local = True
.type = bool
.short_caption = Overall before local
.help = Apply overall scaling before local scaling
select_sharpened_map = None
.type = int
.short_caption = Sharpened map to use
.help = Select a single sharpened map to use
read_sharpened_maps = None
.type = bool
.short_caption = Read sharpened maps
.help = Read in previously-calculated sharpened maps
write_sharpened_maps = None
.type = bool
.short_caption = Write sharpened maps
.help = Write out local sharpened maps
smoothing_radius = None
.type = float
.short_caption = Smoothing radius
.help = Sharpen locally using smoothing_radius. Default is 2/3 of \
mean distance between centers for sharpening
box_center = None
.type = floats
.short_caption = Center of box
.help = You can specify the center of the box (A units)
box_size = 40 40 40
.type = ints
.short_caption = Size of box
.help = You can specify the size of the boxes to use (grid units)
target_n_overlap = 10
.type = int
.short_caption = Target overlap of boxes
.help = You can specify the targeted overlap of boxes in local \
sharpening
restrict_map_size = None
.type = bool
.short_caption = Restrict box map size
.help = Restrict box map to be inside full map (required for cryo-EM data).\
Default is True if use_sg_symmetry=False.
restrict_z_turns_for_helical_symmetry = 1
.type = float
.short_caption = Restrict Z turns for helical symmetry
.help = Restrict Z turns for helical symmetry. Number of \
turns of helix going each direction in Z is specified.
restrict_z_distance_for_helical_symmetry = None
.type = float
.short_caption = Restrict Z distance for helical symmetry
.help = Restrict Z distance (+/- this distance from center) \
for helical symmetry.
remove_aniso = True
.type = bool
.short_caption = Remove aniso
.help = You can remove anisotropy (overall and locally) during sharpening
cc_cut = 0.2
.type = float
.short_caption = Min reliable CC in half-maps
.help = Estimate of minimum highly reliable CC in half-map FSC. Used\
to decide at what CC value to smooth the remaining CC values.
max_cc_for_rescale = 0.2
.type = float
.short_caption = Max CC for rescaleMin reliable CC in half-maps
.help = Used along with cc_cut and scale_using_last to correct for \
small errors in FSC estimation at high resolution. If the \
value of FSC near the high-resolution limit is above \
max_cc_for_rescale, assume these values are correct and do not \
correct them.
scale_using_last = 3
.type = int
.short_caption = Last N bins in FSC assumed to be about zero
.help = If set, assume that the last scale_using_last bins in the FSC \
for half-map or model sharpening are about zero (corrects for \
errors in the half-map process).
max_box_fraction = 0.5
.type = float
.short_caption = Max size of box for auto_sharpening
.help = If box is greater than this fraction of entire map, use \
entire map.
density_select_max_box_fraction = 0.95
.type = float
.short_caption = Max size of box for density_select
.help = If box is greater than this fraction of entire map, use \
entire map for density_select. Default is 0.95
mask_atoms = True
.type = bool
.short_caption = Mask atoms
.help = Mask atoms when using model sharpening
mask_atoms_atom_radius = 3
.type =float
.short_caption = Mask radius
.help = Mask for mask_atoms will have mask_atoms_atom_radius
value_outside_atoms = None
.type = str
.short_caption = Value outside atoms
.help = Value of map outside atoms (set to 'mean' to have mean \
value inside and outside mask be equal)
k_sharpen = 10
.type = float
.short_caption = sharpening transition
.help = Steepness of transition between sharpening (up to resolution \
) and not sharpening (d < resolution). Note: for blurring, \
all data are blurred (regardless of resolution), while for \
sharpening, only data with d about resolution or lower are \
sharpened. This prevents making very high-resolution data too \
strong. Note 2: if k_sharpen is zero, then no \
transition is applied and all data is sharpened or blurred. \
Note 3: only used if b_iso is set.
iterate = False
.type = bool
.short_caption = Iterate auto-sharpening
.help = You can iterate auto-sharpening. This is useful in cases where \
you do not specify the solvent content and it is not \
accurately estimated until sharpening is optimized.
optimize_b_blur_hires = False
.type = bool
.short_caption = Optimize value of b_blur_hires
.help = Optimize value of b_blur_hires. \
Only applies for auto_sharpen_methods b_iso_to_d_cut and \
b_iso. This is normally carried out and helps prevent \
over-blurring at high resolution if the same map is \
sharpened more than once.
optimize_d_cut = None
.type = bool
.short_caption = Optimize value of d_cut
.help = Optimize value of d_cut. \
Only applies for auto_sharpen_methods b_iso_to_d_cut and \
b_iso. Not normally carried out.
adjust_region_weight = True
.type = bool
.short_caption = Adjust region weight
.help = Adjust region_weight to make overall change in surface area \
equal to overall change in normalized regions over the range \
of search_b_min to search_b_max using b_iso_to_d_cut.
region_weight_method = initial_ratio *delta_ratio b_iso
.type = choice
.short_caption = Region weight method
.help = Method for choosing region_weights. Initial_ratio uses \
ratio of surface area to regions at low B value. Delta \
ratio uses change in this ratio from low to high B. B_iso \
uses resolution-dependent b_iso (not weights) with the \
formula b_iso=5.9*d_min**2
region_weight_factor = 1.0
.type = float
.short_caption = Region weight factor
.help = Multiplies region_weight after calculation with \
region_weight_method above
region_weight_buffer = 0.1
.type = float
.short_caption = Region weight factor buffer
.help = Region_weight adjusted to be region_weight_buffer \
away from minimum or maximum values
region_weight_default = 30.
.type = float
.short_caption = Region weight default
.help = Region_weight adjusted to be region_weight_default\
if no information available
target_b_iso_ratio = 5.9
.type = float
.short_caption = Target b_iso ratio
.help = Target b_iso ratio : b_iso is estimated as \
target_b_iso_ratio * resolution**2
signal_min = 3.0
.type = float
.short_caption = Minimum signal
.help = Minimum signal in estimation of optimal b_iso. If\
not achieved, use any other method chosen.
target_b_iso_model_scale = 0.
.type = float
.short_caption = scale on target b_iso ratio for model
.help = For model sharpening, the target_biso is scaled \
(normally zero).
search_b_min = -100
.type = float
.short_caption = Low bound for b_iso search
.help = Low bound for b_iso search.
search_b_max = 300
.type = float
.short_caption = High bound for b_iso search
.help = High bound for b_iso search.
search_b_n = 21
.type = int
.short_caption = Number of b_iso values to search
.help = Number of b_iso values to search.
residual_target = 'adjusted_sa'
.type = str
.short_caption = Residual target
.help = Target for maximization steps in sharpening. \
Can be kurtosis or adjusted_sa (adjusted surface area)
sharpening_target = 'adjusted_sa'
.type = str
.short_caption = Overall sharpening target
.help = Overall target for sharpening. Can be kurtosis or adjusted_sa \
(adjusted surface area). Used to decide which sharpening approach \
is used. Note that during optimization, residual_target is used \
(they can be the same.)
region_weight = 40
.type = float
.short_caption = Region weighting
.help = Region weighting in adjusted surface area calculation.\
Score is surface area minus region_weight times number of regions.\
Default is 40. A smaller value will give more sharpening.
sa_percent = 30.
.type = float
.short_caption = Percent of target regions in adjusted_sa
.help = Percent of target regions used in calulation of adjusted \
surface area. Default is 30.
fraction_occupied = 0.20
.type = float
.short_caption = Fraction of molecular volume inside contours
.help = Fraction of molecular volume targeted to be inside contours. \
Used to set contour level. Default is 0.20
n_bins = 20
.type = int
.short_caption = Resolution bins
.help = Number of resolution bins for sharpening. Default is 20.
max_regions_to_test = 30
.type = int
.short_caption = Max regions to test
.help = Number of regions to test for surface area in adjusted_sa \
scoring of sharpening
eps = None
.type = float
.short_caption = Shift used in calculation of derivatives for \
sharpening maximization. Default is 0.01 for kurtosis and 0.5 for \
adjusted_sa.
k_sol = 0.35
.type = float
.help = k_sol value for model map calculation. IGNORED (Not applied)
.short_caption = k_sol IGNORED
.style = hidden
b_sol = 50
.type = float
.help = b_sol value for model map calculation. IGNORED (Not applied)
.short_caption = b_sol IGNORED
.style = hidden
}
segmentation {
select_au_box = None
.type = bool
.help = Select box containing at least one representative region of \
the map. Also select just symmetry operators relevant to that box. \
Default is true if number of operators is at least \
n_ops_to_use_au_box
.short_caption = select au box
n_ops_to_use_au_box = 25
.type = int
.help = If number of operators is this big or more and \
select_au_box is None, set it to True.
.short_caption = N ops to use au_box
n_au_box = 5
.type = int
.help = Number of symmetry copies to try and get inside au_box
.short_caption = N au box
lower_bounds = None
.type = ints
.help = You can select a part of your map for analysis with \
lower_bounds and upper_bounds.
.short_caption = Lower bounds
upper_bounds = None
.type = ints
.help = You can select a part of your map for analysis with \
lower_bounds and upper_bounds.
.short_caption = Upper bounds
density_select = True
.type = bool
.help = Run map_box with density_select=True to cut out the region \
in the input map that contains density. Useful if the input map \
is much larger than the structure. Done before segmentation is\
carried out.
.short_caption = Trim map to density
density_select_threshold = 0.05
.type = float
.help = Choose region where density is this fraction of maximum or greater
.short_caption = threshold for density_select
get_half_height_width = None
.type = bool
.help = Use 4 times half-width at half-height as estimate of max size
.short_caption = Half-height width estimation
box_ncs_au = True
.type = bool
.help = Box the map containing just the au of the map
.short_caption = Box NCS au
cell_cutoff_for_solvent_from_mask = 150
.type = float
.help = For cells with average edge over this cutoff, use the\
low resolution mask (backup) method for solvent estimation
.short_caption = Cell cutoff for solvent_from_mask
mask_padding_fraction = 0.025
.type = float
.help = Adjust threshold of standard deviation map in low resolution \
mask identification of solvent content to give this much more \
inside mask than would be obtained with the value of\
fraction_of_max_mask_threshold.
.short_caption = Mask padding fraction
fraction_of_max_mask_threshold = .05
.type = float
.help = threshold of standard deviation map in low resolution mask \
identification of solvent content.
.short_caption = Fraction of max mask_threshold
mask_threshold = None
.type = float
.help = threshold in identification of overall mask. If None, guess \
volume of molecule from sequence and symmetry copies.
.short_caption = Density select threshold
grid_spacing_for_au = 3
.type = int
.help = Grid spacing for asymmetric unit when constructing asymmetric unit.
.short_caption = Grid spacing for constructing asymmetric unit
radius = None
.type = float
.help = Radius for constructing asymmetric unit.
.short_caption = Radius for constructing asymmetric unit
value_outside_mask = 0.0
.type = float
.help = Value to assign to density outside masks
.short_caption = Value outside mask
density_threshold = None
.type = float
.short_caption = Density threshold
.help = Threshold density for identifying regions of density. \
Applied after normalizing the density in the region of \
the molecule to an rms of 1 and mean of zero.
starting_density_threshold = None
.type = float
.short_caption = Starting density threshold
.help = Optional guess of threshold density
iteration_fraction = 0.2
.type = float
.short_caption = Iteration fraction
.help = On iteration of finding regions, assume target volume is \
this fraction of the value on previous iteration
max_overlap_fraction = 0.05
.type = float
.short_caption = Max overlap
.help = Maximum fractional overlap allowed to density in another \
asymmetric unit. Definition of a bad region.
remove_bad_regions_percent = 1
.type = float
.short_caption = Remove worst overlapping regions
.help = Remove the worst regions that are part of more than one NCS \
asymmetric unit, up to remove_bad_regions_percent of the total
require_complete = True
.type = bool
.short_caption = Require all symmetry copies to be represented for a region
.help = Require all symmetry copies to be represented for a region
split_if_possible = True
.type = bool
.short_caption = Split regions if mixed
.help = Split regions that are split in some symmetry copies.\
If None, split if most copies are split.
write_all_regions = False
.type = bool
.short_caption = Write all regions
.help = Write all regions to ccp4 map files.
max_per_au = None
.type = int
.short_caption = Max regions in au
.help = Maximum number of regions to be kept in the NCS asymmetric unit
max_per_au_ratio = 5.
.type = int
.short_caption = Max ratio of regions to expected
.help = Maximum ratio of number of regions to be kept in the \
NCS asymmetric unit to those expected
min_ratio_of_ncs_copy_to_first = 0.5
.type = float
.short_caption = Minimum ratio of ncs copy to first
.help = Minimum ratio of the last ncs_copy region size to maximum
min_ratio = 0.1
.type = float
.short_caption = Minimum ratio to keep
.help = Minimum ratio of region size to maximum to keep it
max_ratio_to_target = 3
.type = float
.help = Maximum ratio of grid points in top region to target
.short_caption = Max ratio to target
min_ratio_to_target = 0.3
.type = float
.help = Minimum ratio of grid points in top region to target
.short_caption = Min ratio to target
min_volume = 10
.type = int
.help = Minimum region size to consider (in grid points)
.short_caption = Minimum region size
residues_per_region = 50
.type = float
.help = Target number of residues per region
.short_caption = Residues per region
seeds_to_try = 10
.type = int
.help = Number of regions to try as centers
.short_caption = Seeds to try
iterate_with_remainder = True
.type = bool
.short_caption = Iterate
.help = Iterate looking for regions based on remainder from first analysis
weight_rad_gyr = 0.1
.type = float
.short_caption = Weight on radius of gyration
.help = Weight on radius of gyration of group of regions in NCS AU \
relative to weight on closeness to neighbors. Normalized to\
largest cell dimension with weight=weight_rad_gyr*300/cell_max
expand_size = None
.type = int
.help = Grid points to expand size of regions when excluding for next \
round. If None, set to approx number of grid points to get \
expand_target below
.short_caption = Expand size
expand_target = 1.5
.type = float
.help = Target expansion of regions (A)
.short_caption = Expand target
mask_additional_expand_size = 1
.type = int
.help = Mask expansion in addition to expand_size for final map
.short_caption = Mask additional expansion
mask_expand_ratio = 1
.type = int
.help = Mask expansion relative to resolution for save_box_map_ncs_au
.short_caption = Mask expand ratio
exclude_points_in_ncs_copies = True
.type = bool
.help = Exclude points that are in symmetry copies when creating NCS au. \
Does not apply if add_neighbors=True
.short_caption = Exclude points in symmetry copies
add_neighbors = True
.type = bool
.help = Add neighboring regions around the au. Turns off \
exclude_points_in_ncs_copies also.
.short_caption = Add neighbors
add_neighbors_dist = 1.
.type = float
.help = Max increase in radius of gyration by adding region to keep it.
.short_caption = Add neighbors dist
}
control {
verbose = False
.type = bool
.help = '''Verbose output'''
.short_caption = Verbose output
shift_only = None
.type = bool
.short_caption = Shift only
.help = Shift map and half_maps and stop
sharpen_only = None
.type = bool
.short_caption = Sharpen only
.help = Sharpen map and stop
check_ncs = None
.type = bool
.short_caption = Check NCS
.help = Check the NCS symmetry by estimating NCS correlation and stop
resolve_size = None
.type = int
.help = "Size of resolve to use. "
.style = hidden
quick = True
.type = bool
.help = Run quickly if possible
.short_caption = Quick run
memory_check = True
.type = bool
.help = Map-to-model checks to make sure you have enough memory on \
your machine to run. You can disable this by setting this \
keyword to False. The estimates are approximate so it is \
possible your job could run even if the check fails. Note \
the check does not take any other uses of the memory on \
your machine into account.
.short_caption = Memory check
save_box_map_ncs_au = False
.type = bool
.help = Controls whether the map_box ncs_au is saved. Internal use only
.style = hidden
write_files = True
.type = bool
.help = Controls whether files are written
.short_caption = Write files
multiprocessing = *multiprocessing sge lsf pbs condor pbspro slurm
.type = choice
.short_caption = multiprocessing type
.help = Choices are multiprocessing (single machine) or queuing systems
queue_run_command = None
.type = str
.short_caption = Queue run command
.help = run command for queue jobs. For example qsub.
nproc = 1
.type = int
.short_caption = Number of processors
.help = Number of processors to use
.style = renderer:draw_nproc_widget bold
}
""", process_includes=True)
master_params = master_phil
class map_and_b_object:
def __init__(self,
map_data=None,
starting_b_iso=None,
final_b_iso=None):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
class pdb_info_object:
def __init__(self,
file_name=None,
n_residues=None,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
import time
self.init_asctime=time.asctime()
def show_summary(self,out=sys.stdout):
print("PDB file:%s" %(self.file_name), end=' ', file=out)
if self.n_residues:
print(" Residues: %d" %(self.n_residues), file=out)
else:
print(file=out)
class seq_info_object:
def __init__(self,
file_name=None,
sequence=None,
n_residues=None,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
import time
self.init_asctime=time.asctime()
def show_summary(self,out=sys.stdout):
if self.file_name:
print("Sequence file:%s" %(self.file_name), end=' ', file=out)
if self.n_residues:
print(" Residues: %d" %(self.n_residues), file=out)
else:
print(file=out)
class ncs_info_object:
def __init__(self,
file_name=None,
number_of_operators=None,
is_helical_symmetry=None,
original_number_of_operators=None,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
import time
self.init_asctime=time.asctime()
if original_number_of_operators is None:
self.original_number_of_operators=number_of_operators
self._has_updated_operators=False
def show_summary(self,out=sys.stdout):
print("NCS file:%s Operators: %d" %(self.file_name,
self.number_of_operators), file=out)
if self.is_helical_symmetry:
print("Helical symmetry is present", file=out)
def has_updated_operators(self):
return self._has_updated_operators
def update_number_of_operators(self,number_of_operators=None):
self.number_of_operators=number_of_operators
self._has_updated_operators=True
def update_is_helical_symmetry(self,is_helical_symmetry=None):
self.is_helical_symmetry=is_helical_symmetry
self._has_updated_operators=True
class map_info_object:
def __init__(self,
file_name=None,
origin=None,
all=None,
crystal_symmetry=None,
is_map=None,
map_id=None,
b_sharpen=None,
id=None,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
import time
self.init_asctime=time.asctime()
def show_summary(self,out=sys.stdout):
if self.is_map:
print("Map file:%s" %(self.file_name), end=' ', file=out)
else:
print("Mask file:%s" %(self.file_name), end=' ', file=out)
if self.id is not None:
print("ID: %d" %(self.id), end=' ', file=out)
if self.b_sharpen is not None:
print("B-sharpen: %7.2f" %(self.b_sharpen), end=' ', file=out)
if self.map_id is not None:
print("Map ID: %s" %(self.map_id), file=out)
else:
print(file=out)
if self.origin and self.all:
print(" Origin: %d %d %d Extent: %d %d %d" %(
tuple(self.origin)+tuple(self.all)), file=out)
if self.crystal_symmetry:
print(" Map unit cell: %.1f %.1f %.1f %.1f %.1f %.1f " %(
self.crystal_symmetry.unit_cell().parameters()), file=out)
def lower_upper_bounds(self):
lower_bounds=self.origin
upper_bounds=[]
for a,b in zip(self.origin,self.all):
upper_bounds.append(a+b)
return list(self.origin),list(upper_bounds)
class info_object:
def __init__(self,
acc=None,
ncs_obj=None,
min_b=None,
max_b=None,
b_sharpen=None, # b_sharpen applied to map
ncs_group_list=None,
origin_shift=None,
crystal_symmetry=None, # after density_select
original_crystal_symmetry=None, # before density_select
full_crystal_symmetry=None, # from real_map object
full_unit_cell_grid=None, # from real_map object
edited_volume_list=None,
region_range_dict=None,
selected_regions=None,
ncs_related_regions=None,
self_and_ncs_related_regions=None,
map_files_written=None,
bad_region_list=None,
region_centroid_dict=None,
original_id_from_id=None,
remainder_id_dict=None, # dict relating regions in a remainder object to
params=None, # input params
input_pdb_info=None,
input_map_info=None,
input_ncs_info=None,
input_seq_info=None,
shifted_pdb_info=None,
shifted_map_info=None,
shifted_ncs_info=None,
shifted_used_ncs_info=None,
n_residues=None,
solvent_fraction=None,
output_ncs_au_map_info=None,
output_ncs_au_mask_info=None,
output_ncs_au_pdb_info=None,
output_box_map_info=None,
output_box_mask_info=None,
output_region_map_info_list=None,
output_region_pdb_info_list=None,
sharpening_info_obj=None,
box_map_bounds_first=None,
box_map_bounds_last=None,
final_output_sharpened_map_file=None,
box_map_ncs_au=None,
box_map_ncs_au_crystal_symmetry=None,
):
if not selected_regions: selected_regions=[]
if not ncs_related_regions: ncs_related_regions=[]
if not self_and_ncs_related_regions: self_and_ncs_related_regions=[]
if not map_files_written: map_files_written=[]
if not output_region_map_info_list: output_region_map_info_list=[]
if not output_region_pdb_info_list: output_region_pdb_info_list=[]
from libtbx import adopt_init_args
adopt_init_args(self, locals())
self.object_type="segmentation_info"
import time
self.init_asctime=time.asctime()
def set_box_map_ncs_au_map_data(self,box_map_ncs_au_map_data=None,
box_map_ncs_au_half_map_data_list=None,
box_map_ncs_au_crystal_symmetry=None):
self.box_map_ncs_au_map_data=box_map_ncs_au_map_data.deep_copy()
self.box_map_ncs_au_half_map_data_list=[]
for hm in box_map_ncs_au_half_map_data_list:
self.box_map_ncs_au_half_map_data_list.append(hm.deep_copy())
self.box_map_ncs_au_crystal_symmetry=box_map_ncs_au_crystal_symmetry
if self.origin_shift and self.origin_shift != (0,0,0):
self.box_map_ncs_au_map_data=self.shift_map_back(
map_data=self.box_map_ncs_au_map_data,
crystal_symmetry=self.box_map_ncs_au_crystal_symmetry,
shift_cart=self.origin_shift)
new_hm_list=[]
for hm in self.box_map_ncs_au_half_map_data_list:
hm=self.shift_map_back(
map_data=hm,
crystal_symmetry=self.box_map_ncs_au_crystal_symmetry,
shift_cart=self.origin_shift)
new_hm_list.append(hm)
self.box_map_ncs_au_half_map_data_list=new_hm_list
def shift_map_back(self,map_data=None,
crystal_symmetry=None,shift_cart=None):
from scitbx.matrix import col
new_origin=self.origin_shift_grid_units(crystal_symmetry=crystal_symmetry,
map_data=map_data,shift_cart=shift_cart,reverse=True)
new_all=list(col(map_data.all())+col(new_origin))
shifted_map_data = map_data.deep_copy()
shifted_map_data.resize(flex.grid(new_origin,new_all))
return shifted_map_data
def origin_shift_grid_units(self,crystal_symmetry=None,map_data=None,
shift_cart=None,reverse=False):
# Get origin shift in grid units from shift_cart
from scitbx.matrix import col
cell=crystal_symmetry.unit_cell().parameters()[:3]
origin_shift_grid=[]
for s,c,a in zip(shift_cart,cell,map_data.all()):
if s<0:
delta=-0.5
else:
delta=0.5
origin_shift_grid.append( int(delta+ a*s/c))
if reverse:
return list(-col(origin_shift_grid))
else:
return origin_shift_grid
def is_segmentation_info_object(self):
return True
def set_params(self,params):
self.params=deepcopy(params)
def set_input_seq_info(self,file_name=None,sequence=None,n_residues=None):
self.input_seq_info=seq_info_object(file_name=file_name,
sequence=sequence,
n_residues=n_residues)
def set_input_pdb_info(self,file_name=None,n_residues=None):
self.input_pdb_info=pdb_info_object(file_name=file_name,
n_residues=n_residues)
def set_input_ncs_info(self,file_name=None,number_of_operators=None):
self.input_ncs_info=ncs_info_object(file_name=file_name,
number_of_operators=number_of_operators)
def update_ncs_info(self,number_of_operators=None,is_helical_symmetry=None,
shifted=False):
if shifted:
ncs_info=self.shifted_ncs_info
else:
ncs_info=self.input_ncs_info
assert ncs_info
if number_of_operators is not None:
ncs_info.update_number_of_operators(
number_of_operators=number_of_operators)
if is_helical_symmetry is not None:
ncs_info.update_is_helical_symmetry(
is_helical_symmetry=is_helical_symmetry)
def set_sharpening_info(self,sharpening_info_obj=None):
self.sharpening_info_obj=sharpening_info_obj
def set_input_map_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None):
self.input_map_info=map_info_object(file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
is_map=True)
def set_ncs_obj(self,ncs_obj=None):
self.ncs_obj=ncs_obj
def set_origin_shift(self,origin_shift=None):
if not origin_shift: origin_shift=(0,0,0)
self.origin_shift=tuple(origin_shift)
def set_crystal_symmetry(self,crystal_symmetry):
self.crystal_symmetry=deepcopy(crystal_symmetry)
def set_original_crystal_symmetry(self,crystal_symmetry):
self.original_crystal_symmetry=deepcopy(crystal_symmetry)
def set_full_crystal_symmetry(self,crystal_symmetry):
self.full_crystal_symmetry=deepcopy(crystal_symmetry)
def set_full_unit_cell_grid(self,unit_cell_grid):
self.full_unit_cell_grid=deepcopy(unit_cell_grid)
def set_box_map_bounds_first_last(self,box_map_bounds_first,
box_map_bounds_last):
self.box_map_bounds_first=box_map_bounds_first
self.box_map_bounds_last=[]
for l in box_map_bounds_last:
self.box_map_bounds_last.append(l+1) # it is one bigger...
def set_accessor(self,acc):
self.acc=acc
def set_shifted_map_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None,b_sharpen=None):
self.shifted_map_info=map_info_object(file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
b_sharpen=b_sharpen,
is_map=True)
def set_shifted_pdb_info(self,file_name=None,n_residues=None):
self.shifted_pdb_info=pdb_info_object(file_name=file_name,
n_residues=n_residues)
def set_shifted_ncs_info(self,file_name=None,number_of_operators=None,
is_helical_symmetry=None):
self.shifted_ncs_info=ncs_info_object(file_name=file_name,
number_of_operators=number_of_operators,
is_helical_symmetry=is_helical_symmetry)
def set_shifted_used_ncs_info(self,file_name=None,number_of_operators=None,
is_helical_symmetry=None):
self.shifted_used_ncs_info=ncs_info_object(file_name=file_name,
number_of_operators=number_of_operators,
is_helical_symmetry=is_helical_symmetry)
def set_solvent_fraction(self,solvent_fraction):
self.solvent_fraction=solvent_fraction
def set_n_residues(self,n_residues): # may not be the same as seq file
self.n_residues=n_residues
def set_output_ncs_au_map_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None):
self.output_ncs_au_map_info=map_info_object(file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
is_map=True)
def set_output_ncs_au_mask_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None):
self.output_ncs_au_mask_info=map_info_object(file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
is_map=False)
def set_output_ncs_au_pdb_info(self,file_name=None,n_residues=None):
self.output_ncs_au_pdb_info=pdb_info_object(file_name=file_name,
n_residues=n_residues)
def set_output_box_map_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None):
self.output_box_map_info=map_info_object(file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
is_map=True)
def set_output_box_mask_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None):
self.output_box_mask_info=map_info_object(file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
is_map=False)
def add_output_region_map_info(self,file_name=None,crystal_symmetry=None,
origin=None,all=None,map_id=None):
self.output_region_map_info_list.append(map_info_object(
file_name=file_name,
crystal_symmetry=crystal_symmetry,
origin=origin,
all=all,
id=len(self.output_region_map_info_list)+1,
map_id=map_id,
is_map=True)
)
def add_output_region_pdb_info(self,file_name=None,n_residues=None):
self.output_region_pdb_info_list.append(pdb_info_object(
file_name=file_name,
n_residues=n_residues)
)
def show_summary(self,out=sys.stdout):
print("\n========== Summary of %s: ========\n" %(self.object_type), file=out)
print("Created: %s" %(self.init_asctime), file=out)
print("\nInput files used:\n", file=out)
if self.input_map_info:
self.input_map_info.show_summary(out=out)
if self.input_pdb_info:
self.input_pdb_info.show_summary(out=out)
if self.input_ncs_info:
self.input_ncs_info.show_summary(out=out)
if self.input_seq_info:
self.input_seq_info.show_summary(out=out)
print(file=out)
if self.crystal_symmetry:
print("Working unit cell: %.1f %.1f %.1f %.1f %.1f %.1f " %(
self.crystal_symmetry.unit_cell().parameters()), file=out)
if self.n_residues:
print("Estimated total number of residues: %d" %(self.n_residues), file=out)
if self.solvent_fraction:
print("Estimated solvent fraction: %5.3f" %(self.solvent_fraction), file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("\nOrigin offset applied: %.1f %.1f %.1f" %(self.origin_shift), file=out)
else:
print("\nNo origin offset applied", file=out)
if self.shifted_map_info:
print("\nShifted/sharpened map, pdb and ncs files created "+\
"(after origin offset):\n", file=out)
if self.shifted_map_info:
self.shifted_map_info.show_summary(out=out)
if self.shifted_pdb_info:
self.shifted_pdb_info.show_summary(out=out)
if self.shifted_ncs_info:
self.shifted_ncs_info.show_summary(out=out)
if self.output_ncs_au_pdb_info:
print("\nOutput PDB file with dummy atoms representing the NCS AU:", file=out)
self.output_ncs_au_pdb_info.show_summary(out=out)
if self.output_ncs_au_mask_info or self.output_ncs_au_map_info:
print("\nOutput map files showing just the NCS AU (same size", end=' ', file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("\nand location as shifted map files:\n", file=out)
else:
print("\nand location as input map:\n", file=out)
if self.output_ncs_au_mask_info:
self.output_ncs_au_mask_info.show_summary(out=out)
if self.output_ncs_au_map_info:
self.output_ncs_au_map_info.show_summary(out=out)
if self.output_box_mask_info or self.output_box_map_info:
print("\nOutput cut-out map files trimmed to contain just "+\
"the \nNCS AU (superimposed on", end=' ', file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("shifted map files, note origin offset):\n", file=out)
else:
print("input map, note origin offset):\n", file=out)
if self.output_box_mask_info:
self.output_box_mask_info.show_summary(out=out)
if self.output_box_map_info:
self.output_box_map_info.show_summary(out=out)
if self.output_region_pdb_info_list:
print("\nOutput PDB files representing one region of connected"+\
" density.\nThese are useful for marking where to look in cut-out map"+\
" files.", file=out)
for output_region_pdb_info in self.output_region_pdb_info_list:
output_region_pdb_info.show_summary(out=out)
if self.output_region_map_info_list:
print("\nOutput cut-out map files trimmed to contain just "+\
"one region of \nconnected density (superimposed on", end=' ', file=out)
if self.origin_shift and self.origin_shift != (0,0,0):
print("shifted map files, note origin offset):\n", file=out)
else:
print(" input map, note origin offset):\n", file=out)
for output_region_map_info in self.output_region_map_info_list:
output_region_map_info.show_summary(out=out)
print("\n"+50*"="+"\n", file=out)
class make_ccp4_map: # just a holder so map_to_structure_factors will run
def __init__(self,map=None,unit_cell=None):
self.data=map
self.unit_cell_parameters=unit_cell.parameters()
self.space_group_number=1
self.unit_cell_grid=map.all()
def crystal_symmetry(self):
return crystal.symmetry(self.unit_cell_parameters,
self.space_group_number)
class b_vs_region_info:
def __init__(self):
self.b_iso=0.
self.b_vs_region_dict={}
self.sa_sum_v_vs_region_dict={}
self.sa_nn_vs_region_dict={}
self.sa_ratio_b_vs_region_dict={}
class box_sharpening_info:
def __init__(self,tracking_data=None,
crystal_symmetry=None,
solvent_fraction=None,
b_iso=None,
resolution=None,
d_min_ratio=None,
scale_max=None,
lower_bounds=None,
upper_bounds=None,
wrapping=None,
n_real=None,
n_buffer=None,
map_data=None,
smoothing_radius=None,
smoothed_box_mask_data=None,
original_box_map_data=None,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
del self.tracking_data # do not save it
if tracking_data:
self.crystal_symmetry=tracking_data.crystal_symmetry
self.solvent_fraction=tracking_data.solvent_fraction
self.wrapping=tracking_data.params.crystal_info.use_sg_symmetry
def get_gaussian_weighting(self,out=sys.stdout):
# return a gaussian function centered on center of the map, fall-off
# based on smoothing_radius
# Calculate weight map, max near location of centers_ncs_cart
# U=rmsd**2
# (b_eff=8*3.14159**2*U)
# rmsd is at least distance between centers, not too much bigger than
# unit cell size, typically 10-20 A,
print("\nFall-off of local weight is 1/%6.1f A\n" %(
self.smoothing_radius), file=out)
u=self.smoothing_radius**2
from cctbx import xray
xrs,scatterers=set_up_xrs(crystal_symmetry=self.crystal_symmetry)
unit_cell=self.crystal_symmetry.unit_cell()
for xyz_fract in [(0.5,0.5,0.5,)]:
scatterers.append( xray.scatterer(scattering_type="H", label="H",
site=xyz_fract, u=u, occupancy=1.0))
xrs = xray.structure(xrs, scatterers=scatterers)
f_array,phases=get_f_phases_from_map(map_data=self.map_data,
crystal_symmetry=self.crystal_symmetry,
d_min=self.resolution,
scale_max=self.scale_max,
d_min_ratio=self.d_min_ratio,
get_remove_aniso_object=False,# don't need it
out=out)
weight_f_array=f_array.structure_factors_from_scatterers(
algorithm = 'direct',
xray_structure = xrs).f_calc()
weight_map=get_map_from_map_coeffs(map_coeffs=weight_f_array,
crystal_symmetry=self.crystal_symmetry,n_real=self.map_data.all())
min_value=weight_map.as_1d().min_max_mean().min
weight_map=weight_map-min_value # all positive or zero
max_value=weight_map.as_1d().min_max_mean().max
weight_map=weight_map/max(1.e-10,max_value) # normalize; max=1 now
min_value=1.e-10 # just a small value for all distances far from center
s = (weight_map <min_value ) # make extra sure every point is above this
weight_map=weight_map.set_selected(s,min_value)
return weight_map
def remove_buffer_from_bounds(self,minimum=1):
# back off by n_buffer in each direction, leave at
# least minimum grid on either side of center
adjusted_lower_bounds,adjusted_upper_bounds=[],[]
delta_lower_bounds,delta_upper_bounds=[],[]
for lb,ub in zip(self.lower_bounds,self.upper_bounds):
sum=lb+ub
if sum >=0:
mid=(1+sum)//2
else:
mid=(-1+sum)//2
alb=min(mid-minimum,lb+self.n_buffer)
aub=max(mid+minimum,ub-self.n_buffer)
adjusted_lower_bounds.append(alb)
adjusted_upper_bounds.append(aub)
delta_lower_bounds.append(alb-lb)
delta_upper_bounds.append(aub-ub)
return adjusted_lower_bounds,adjusted_upper_bounds,\
delta_lower_bounds,delta_upper_bounds
def merge_into_overall_map(self,overall_map=None):
# Smoothly fill out edges of the small map with overall_map
assert self.smoothed_box_mask_data is not None
assert self.original_box_map_data is not None
self.map_data= (self.map_data * self.smoothed_box_mask_data) + \
(self.original_box_map_data * (1-self.smoothed_box_mask_data))
def remove_buffer(self,out=sys.stdout):
# remove the buffer from this box
new_lower_bounds,new_upper_bounds,delta_lower,delta_upper=\
self.remove_buffer_from_bounds()
cut_out_lower_bounds=[]
cut_out_upper_bounds=[]
for o,a,dlb,dub in zip(self.map_data.origin(),self.map_data.all(),
delta_lower,delta_upper):
cut_out_lower_bounds.append(o+dlb)
cut_out_upper_bounds.append(a+dub-1)
self.map_data,self.crystal_symmetry,\
self.smoothed_box_mask_data,self.original_box_map_data=cut_out_map(
map_data=self.map_data,
crystal_symmetry=self.crystal_symmetry,
soft_mask=False,
resolution=self.resolution,
shift_origin=True,
min_point=cut_out_lower_bounds,
max_point=cut_out_upper_bounds,out=out)
self.lower_bounds=new_lower_bounds
self.upper_bounds=new_upper_bounds
class sharpening_info:
def __init__(self,
tracking_data=None,
crystal_symmetry=None,
is_crystal=None,
sharpening_method=None,
solvent_fraction=None,
n_residues=None,
ncs_copies=None,
ncs_file=None,
seq_file=None,
sequence=None,
n_real=None,
region_weight=None,
n_bins=None,
eps=None,
d_min=None,
d_min_ratio=None,
scale_max=None,
input_d_cut=None,
b_blur_hires=None,
rmsd=None,
rmsd_resolution_factor=None,
k_sol=None,
b_sol=None,
fraction_complete=None,
wrapping=None,
sharpening_target=None,
residual_target=None,
fraction_occupied=None,
nproc=None,
multiprocessing=None,
queue_run_command=None,
resolution=None, # changed from d_cut
resolution_dependent_b=None, # linear sharpening
normalize_amplitudes_in_resdep=None, # linear sharpening
b_sharpen=None,
b_iso=None, # expected B_iso after applying b_sharpen
k_sharpen=None,
optimize_b_blur_hires=None,
iterate=None,
optimize_d_cut=None,
kurtosis=None,
adjusted_sa=None,
sa_ratio=None,
normalized_regions=None,
score=None,
input_weight_map_pickle_file=None,
output_weight_map_pickle_file=None,
read_sharpened_maps=None,
write_sharpened_maps=None,
select_sharpened_map=None,
output_directory=None,
smoothing_radius=None,
local_sharpening=None,
local_aniso_in_local_sharpening=None,
overall_before_local=None,
use_local_aniso=None,
original_aniso_obj=None,
auto_sharpen=None,
box_in_auto_sharpen=None,
density_select_in_auto_sharpen=None,
density_select_threshold_in_auto_sharpen=None,
use_weak_density=None,
discard_if_worse=None,
max_box_fraction=None,
cc_cut=None,
max_cc_for_rescale=None,
scale_using_last=None,
density_select_max_box_fraction=None,
mask_atoms=None,
mask_atoms_atom_radius=None,
value_outside_atoms=None,
soft_mask=None,
allow_box_if_b_iso_set=None,
search_b_min=None,
search_b_max=None,
search_b_n=None,
adjust_region_weight=None,
region_weight_method=None,
region_weight_factor=None,
region_weight_buffer=None,
region_weight_default=None,
target_b_iso_ratio=None,
signal_min=None,
target_b_iso_model_scale=None,
box_sharpening_info_obj=None,
chain_type=None,
target_scale_factors=None,
remove_aniso=None,
d_min_list=None,
verbose=None,
resolve_size=None,
pdb_inp=None, # XXX probably do not need this
local_solvent_fraction=None,
wang_radius=None,
buffer_radius=None,
pseudo_likelihood=None,
preliminary_sharpening_done=False,
):
from libtbx import adopt_init_args
adopt_init_args(self, locals())
del self.tracking_data # don't need it as part of the object
del self.box_sharpening_info_obj# don't need it as part of the object
del self.pdb_inp # don't need it as part of the object
if tracking_data: # use tracking data information
self.update_with_tracking_data(tracking_data=tracking_data)
if box_sharpening_info_obj: # update information
self.update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
if self.resolution_dependent_b is None:
self.resolution_dependent_b=[0,0,0]
if self.target_scale_factors and \
self.sharpening_method!='model_sharpening' \
and self.sharpening_method!='half_map_sharpening':
assert self.sharpening_method is None # XXX may want to print out error
self.sharpening_method='model_sharpening'
if self.sharpening_method=='b_iso' and self.k_sharpen is not None:
self.k_sharpen=None
if pdb_inp:
self.sharpening_method='model_sharpening'
self.box_in_auto_sharpen=True
self.density_select_in_auto_sharpen=False
self.sharpening_target='model'
def get_d_cut(self):
if self.input_d_cut is not None:
return self.input_d_cut
else:
return self.resolution
def get_target_b_iso(self):
if self.target_b_iso_ratio is None:
return None
if self.resolution is None:
return None
return self.target_b_iso_ratio*self.resolution**2
def set_resolution_dependent_b(self,
resolution_dependent_b=None,
sharpening_method='resolution_dependent'):
if resolution_dependent_b:
self.resolution_dependent_b=resolution_dependent_b
if sharpening_method:
self.sharpening_method=sharpening_method
def sharpening_is_defined(self):
if self.sharpening_method is None:
return False
if self.target_scale_factors:
return True
if self.sharpening_method=='target_b_iso_to_d_cut':
return True
if self.b_iso is not None or \
self.b_sharpen is not None or \
(self.resolution_dependent_b is not None and
self.resolution_dependent_b!=[0,0,0]):
return True
return False
def update_with_box_sharpening_info(self,box_sharpening_info_obj=None):
if not box_sharpening_info_obj:
return self
self.crystal_symmetry=box_sharpening_info_obj.crystal_symmetry
self.solvent_fraction=box_sharpening_info_obj.solvent_fraction
self.wrapping=box_sharpening_info_obj.wrapping
self.n_real=box_sharpening_info_obj.n_real
return self
def update_with_tracking_data(self,tracking_data=None):
self.update_with_params(params=tracking_data.params,
crystal_symmetry=tracking_data.crystal_symmetry,
solvent_fraction=tracking_data.solvent_fraction,
n_residues=tracking_data.n_residues,
ncs_copies=tracking_data.input_ncs_info.number_of_operators)
return self
def update_with_params(self,params=None,
crystal_symmetry=None,
is_crystal=None,
solvent_fraction=None,
auto_sharpen=None,
sharpening_method=None,
pdb_inp=None,
half_map_data_list=None,
n_residues=None,ncs_copies=None):
self.crystal_symmetry=crystal_symmetry
self.is_crystal=is_crystal
self.solvent_fraction=solvent_fraction
self.auto_sharpen=auto_sharpen
self.n_residues=n_residues
self.ncs_copies=ncs_copies
self.seq_file=params.input_files.seq_file
self.chain_type=params.crystal_info.chain_type
self.verbose=params.control.verbose
self.resolve_size=params.control.resolve_size
self.multiprocessing=params.control.multiprocessing
self.nproc=params.control.nproc
self.queue_run_command=params.control.queue_run_command
self.wrapping=params.crystal_info.use_sg_symmetry
self.fraction_occupied=params.map_modification.fraction_occupied
self.sa_percent=params.map_modification.sa_percent
self.region_weight=params.map_modification.region_weight
self.max_regions_to_test=params.map_modification.max_regions_to_test
self.regions_to_keep=params.map_modification.regions_to_keep
self.d_min_ratio=params.map_modification.d_min_ratio
self.scale_max=params.map_modification.scale_max
self.input_d_cut=params.map_modification.input_d_cut
self.b_blur_hires=params.map_modification.b_blur_hires
self.rmsd=params.map_modification.rmsd
self.rmsd_resolution_factor=params.map_modification.rmsd_resolution_factor
self.k_sol=params.map_modification.k_sol
self.b_sol=params.map_modification.b_sol
self.fraction_complete=params.map_modification.fraction_complete
self.resolution=params.crystal_info.resolution # changed from d_cut
# NOTE:
# resolution=X-ray resolution or nominal resolution of cryoEM map
# high-res cutoff of reflections is d_min*d_min_ratio
self.buffer_radius=params.crystal_info.buffer_radius
self.wang_radius=params.crystal_info.wang_radius
self.pseudo_likelihood=params.crystal_info.pseudo_likelihood
self.max_box_fraction=params.map_modification.max_box_fraction
self.cc_cut=params.map_modification.cc_cut
self.max_cc_for_rescale=params.map_modification.max_cc_for_rescale
self.scale_using_last=params.map_modification.scale_using_last
self.density_select_max_box_fraction=params.map_modification.density_select_max_box_fraction
self.mask_atoms=params.map_modification.mask_atoms
self.mask_atoms_atom_radius=params.map_modification.mask_atoms_atom_radius
self.value_outside_atoms=params.map_modification.value_outside_atoms
self.soft_mask=params.map_modification.soft_mask
self.allow_box_if_b_iso_set=params.map_modification.allow_box_if_b_iso_set
self.k_sharpen=params.map_modification.k_sharpen
self.optimize_b_blur_hires=params.map_modification.optimize_b_blur_hires
self.iterate=params.map_modification.iterate
self.optimize_d_cut=params.map_modification.optimize_d_cut
self.sharpening_target=params.map_modification.sharpening_target
self.residual_target=params.map_modification.residual_target
self.eps=params.map_modification.eps
self.n_bins=params.map_modification.n_bins
self.input_weight_map_pickle_file=params.input_files.input_weight_map_pickle_file
self.output_weight_map_pickle_file=params.output_files.output_weight_map_pickle_file
self.read_sharpened_maps=params.map_modification.read_sharpened_maps
self.write_sharpened_maps=params.map_modification.write_sharpened_maps
self.select_sharpened_map=params.map_modification.select_sharpened_map
self.output_directory=params.output_files.output_directory
self.smoothing_radius=params.map_modification.smoothing_radius
self.local_sharpening=params.map_modification.local_sharpening
self.local_aniso_in_local_sharpening=\
params.map_modification.local_aniso_in_local_sharpening
self.overall_before_local=\
params.map_modification.overall_before_local
self.box_in_auto_sharpen=params.map_modification.box_in_auto_sharpen
self.density_select_in_auto_sharpen=params.map_modification.density_select_in_auto_sharpen
self.density_select_threshold_in_auto_sharpen=params.map_modification.density_select_threshold_in_auto_sharpen
self.use_weak_density=params.map_modification.use_weak_density
self.discard_if_worse=params.map_modification.discard_if_worse
self.box_center=params.map_modification.box_center
self.box_size=params.map_modification.box_size
self.target_n_overlap=params.map_modification.target_n_overlap
self.restrict_map_size=params.map_modification.restrict_map_size
self.remove_aniso=params.map_modification.remove_aniso
self.min_ratio_of_ncs_copy_to_first=\
params.segmentation.min_ratio_of_ncs_copy_to_first
self.max_ratio_to_target=params.segmentation.max_ratio_to_target
self.min_ratio_to_target=params.segmentation.min_ratio_to_target
self.residues_per_region=params.segmentation.residues_per_region
self.mask_padding_fraction=\
params.segmentation.mask_padding_fraction
self.fraction_of_max_mask_threshold=\
params.segmentation.fraction_of_max_mask_threshold
self.cell_cutoff_for_solvent_from_mask=\
params.segmentation.cell_cutoff_for_solvent_from_mask
self.starting_density_threshold=\
params.segmentation.starting_density_threshold
self.density_threshold=params.segmentation.density_threshold
self.min_ratio=params.segmentation.min_ratio
self.min_volume=params.segmentation.min_volume
self.search_b_min=params.map_modification.search_b_min
self.search_b_max=params.map_modification.search_b_max
self.search_b_n=params.map_modification.search_b_n
self.adjust_region_weight=params.map_modification.adjust_region_weight
self.region_weight_method=params.map_modification.region_weight_method
self.region_weight_factor=params.map_modification.region_weight_factor
self.region_weight_buffer=params.map_modification.region_weight_buffer
self.region_weight_default=params.map_modification.region_weight_default
self.target_b_iso_ratio=params.map_modification.target_b_iso_ratio
self.signal_min=params.map_modification.signal_min
self.target_b_iso_model_scale=params.map_modification.target_b_iso_model_scale
if sharpening_method is not None:
self.sharpening_method=sharpening_method
if not self.sharpening_method and \
len(params.map_modification.auto_sharpen_methods)==1:
self.sharpening_method=params.map_modification.auto_sharpen_methods[0]
if half_map_data_list or self.sharpening_method=='half_map_sharpening':
self.sharpening_method='half_map_sharpening'
self.sharpening_target='half_map'
elif pdb_inp or self.sharpening_method=='model_sharpening':
self.sharpening_method='model_sharpening'
self.box_in_auto_sharpen=True
self.density_select_in_auto_sharpen=False
self.sharpening_target='model'
elif params.map_modification.b_iso is not None or \
params.map_modification.b_sharpen is not None:
if self.sharpening_method is None:
raise Sorry("b_iso is not set")
# if sharpening values are specified, set them
if params.map_modification.b_iso is not None:
self.b_iso=params.map_modification.b_iso # but we need b_sharpen
elif params.map_modification.b_sharpen is not None:
self.b_sharpen=params.map_modification.b_sharpen
elif (params.map_modification.resolution_dependent_b is not None
and params.map_modification.resolution_dependent_b!=[0,0,0]):
self.sharpening_method='resolution_dependent'
self.resolution_dependent_b=\
params.map_modification.resolution_dependent_b
if self.sharpening_method=='b_iso' and self.k_sharpen is not None:
self.k_sharpen=None
return self
def show_summary(self,verbose=False,out=sys.stdout):
method_summary_dict={
'b_iso':"Overall b_iso sharpening",
'b_iso_to_d_cut':"b_iso sharpening to high_resolution cutoff",
'resolution_dependent':"Resolution-dependent sharpening",
'model_sharpening':"Model sharpening",
'half_map_sharpening':"Half-map sharpening",
'no_sharpening':"No sharpening",
None:"No sharpening",
}
target_summary_dict={
'adjusted_sa':"Adjusted surface area",
'kurtosis':"Map kurtosis",
'model':"Map-model CC",
}
print("\nSummary of sharpening:\n", file=out)
print("Sharpening method used: %s\n" %(
method_summary_dict.get(self.sharpening_method)), file=out)
if self.sharpening_method=="b_iso":
if self.b_sharpen is not None:
print("Overall b_sharpen applied: %7.2f A**2" %(
self.b_sharpen), file=out)
if self.b_iso is not None:
print("Final b_iso obtained: %7.2f A**2" %(self.b_iso), file=out)
elif self.sharpening_method=="b_iso_to_d_cut":
if self.b_sharpen is not None:
print("Overall b_sharpen applied: %7.2f A**2" %(
self.b_sharpen), file=out)
if self.b_iso is not None:
print("Final b_iso obtained: %7.2f A**2" %(self.b_iso), file=out)
if self.input_d_cut:
print("High-resolution cutoff: %7.2f A" %(self.input_d_cut), file=out)
else:
print("High-resolution cutoff: %7.2f A" %(self.resolution), file=out)
elif self.sharpening_method=="resolution_dependent":
print("Resolution-dependent b values (%7.2f,%7.2f,%7.2f)\n" %(
tuple(self.resolution_dependent_b)), file=out)
print("Effective b_iso vs resolution obtained:", file=out)
from cctbx.maptbx.refine_sharpening import get_effective_b_values
d_min_values,b_values=get_effective_b_values(
d_min_ratio=self.d_min_ratio,
resolution_dependent_b=self.resolution_dependent_b,
resolution=self.resolution)
print(" Resolution Effective B-iso", file=out)
print(" (A) (A**2)", file=out)
for dd,b in zip(d_min_values,b_values):
print(" %7.1f %7.2f " %(
dd,b), file=out)
elif self.sharpening_method=="model_sharpening":
print("Resolution-dependent model sharpening", file=out)
if self.d_min_list and self.target_scale_factors:
print("Scale vs resolution:", file=out)
for d_min,sc in zip(
self.d_min_list,
self.target_scale_factors):
print("Dmin: %7.2f Scale: %9.6f" %(d_min,sc), file=out)
elif self.sharpening_method=="half_map_sharpening":
print("Resolution-dependent half-map sharpening", file=out)
if self.d_min_list and self.target_scale_factors:
print("Scale vs resolution:", file=out)
for d_min,sc in zip(
self.d_min_list,
self.target_scale_factors):
print("Dmin: %7.2f Scale: %9.6f" %(d_min,sc), file=out)
if self.sharpening_method in ["b_iso_to_d_cut"] and \
self.k_sharpen and self.resolution:
print("Transition from sharpening"+\
" to not sharpening (k_sharpen):%7.2f " %(self.k_sharpen), file=out)
print("\nSharpening target used: %s" %(
target_summary_dict.get(self.sharpening_target)), file=out)
if self.adjusted_sa is not None:
print("Final adjusted map surface area: %7.2f" %(self.adjusted_sa), file=out)
if self.kurtosis is not None:
print("Final map kurtosis: %7.2f" %(self.kurtosis), file=out)
print(file=out)
if verbose:
for x in dir(self):
if x.startswith("__"): continue
if type(getattr(self,x)) in [type('a'),type(1),type(1.),type([]),
type((1,2,))]:
print("%s : %s" %(x,getattr(self,x)), file=out)
def get_effective_b_iso(self,map_data=None,out=sys.stdout):
map_coeffs_ra,map_coeffs,f_array,phases=effective_b_iso(
map_data=map_data,
resolution=self.resolution,
d_min_ratio=self.d_min_ratio,
scale_max=self.scale_max,
crystal_symmetry=self.crystal_symmetry,
out=out)
return map_coeffs_ra.b_iso
def sharpen_and_score_map(self,map_data=None,set_b_iso=False,out=sys.stdout):
if self.n_real is None: # need to get it
self.n_real=map_data.all()
map_and_b=sharpen_map_with_si(
sharpening_info_obj=self,
map_data=map_data,
resolution=self.resolution,out=out)
self.map_data=map_and_b.map_data
if set_b_iso:
self.b_iso=map_and_b.final_b_iso
score_map(map_data=self.map_data,
sharpening_info_obj=self,
out=null_out())
return self
def show_score(self,out=sys.stdout):
print("Adjusted surface area: %7.3f Kurtosis: %7.3f Score: %7.3f\n" %(
self.adjusted_sa,self.kurtosis,self.score), file=out)
def is_target_b_iso_to_d_cut(self):
if self.sharpening_method=='target_b_iso_to_d_cut':
return True
else:
return False
def is_b_iso_sharpening(self):
if self.is_resolution_dependent_sharpening():
return False
if self.is_model_sharpening():
return False
if self.is_half_map_sharpening():
return False
return True
def is_resolution_dependent_sharpening(self):
if self.sharpening_method=='resolution_dependent':
return True
else:
return False
def is_model_sharpening(self):
if self.sharpening_method=='model_sharpening':
return True
else:
return False
def is_half_map_sharpening(self):
if self.sharpening_method=='half_map_sharpening':
return True
else:
return False
def as_map_coeffs(self,out=sys.stdout):
map_data=getattr(self,'map_data',None)
if map_data:
map_coeffs,dummy=get_f_phases_from_map(map_data=self.map_data,
crystal_symmetry=self.crystal_symmetry,
d_min=self.resolution,
d_min_ratio=self.d_min_ratio,
scale_max=self.scale_max,
return_as_map_coeffs=True,
out=out)
return map_coeffs
else:
return None
def as_map_data(self):
return getattr(self,'map_data',None)
class ncs_group_object:
def __init__(self,
ncs_obj=None,
ncs_ops_used=None,
ncs_group_list=None,
edited_mask=None,
crystal_symmetry=None,
max_cell_dim=None,
origin_shift=None,
edited_volume_list=None,
region_range_dict=None,
selected_regions=None,
ncs_related_regions=None,
self_and_ncs_related_regions=None,
equiv_dict=None,
map_files_written=None,
bad_region_list=None,
region_centroid_dict=None,
region_scattered_points_dict=None,
shared_group_dict=None,
co=None,
min_b=None,
max_b=None,
original_id_from_id=None,
remainder_id_dict=None, # dict relating regions in a remainder object to
# those in the original map
):
if not selected_regions: selected_regions=[]
if not ncs_related_regions: ncs_related_regions=[]
if not self_and_ncs_related_regions: self_and_ncs_related_regions=[]
if not map_files_written: map_files_written=[]
from libtbx import adopt_init_args
adopt_init_args(self, locals())
if self.crystal_symmetry and not self.max_cell_dim:
self.max_cell_dim=0.
for x in self.crystal_symmetry.unit_cell().parameters()[:3]:
self.max_cell_dim=max(max_cell_dim,x)
def as_info_object(self):
return info_object(
ncs_obj=self.ncs_obj,
max_b=self.max_b,
min_b=self.min_b,
ncs_group_list=self.ncs_group_list,
origin_shift=self.origin_shift,
edited_volume_list=self.edited_volume_list,
region_range_dict=self.region_range_dict,
selected_regions=self.selected_regions,
ncs_related_regions=self.ncs_related_regions,
self_and_ncs_related_regions=self.self_and_ncs_related_regions,
bad_region_list=self.bad_region_list,
region_centroid_dict=self.region_centroid_dict,
original_id_from_id=self.original_id_from_id,
map_files_written=self.map_files_written,
)
def set_ncs_ops_used(self,ncs_ops_used):
self.ncs_ops_used=deepcopy(ncs_ops_used)
def set_selected_regions(self,selected_regions):
self.selected_regions=deepcopy(selected_regions)
def set_ncs_related_regions(self,ncs_related_regions):
self.ncs_related_regions=deepcopy(ncs_related_regions)
def set_self_and_ncs_related_regions(self,self_and_ncs_related_regions):
self.self_and_ncs_related_regions=deepcopy(self_and_ncs_related_regions)
def set_map_files_written(self,map_files_written):
self.map_files_written=deepcopy(map_files_written)
def scale_map(map,scale_rms=1.0,out=sys.stdout):
sd=map.as_double().as_1d().sample_standard_deviation()
if (sd > 1.e-10):
scale=scale_rms/sd
if 0: print("Scaling map by %7.3f to set SD=1" %(scale), file=out)
map=map*scale
else:
print("Cannot scale map...all zeros", file=out)
return map
def scale_map_coeffs(map_coeffs,scale_max=None,out=sys.stdout):
f_array,phases=map_coeffs_as_fp_phi(map_coeffs)
max_value=f_array.data().min_max_mean().max
if scale_max:
scale=scale_max/max(1.e-10,max_value)
else:
scale=1.0
if 0:
print("Scaling map_coeffs by %9.3f to yield maximum of %7.0f" %(
scale,scale_max), file=out)
return f_array.array(data=f_array.data()*scale
).phase_transfer(phase_source=phases, deg=True)
def get_map_object(file_name=None,must_allow_sharpening=None,
get_map_labels=None,out=sys.stdout):
# read a ccp4 map file and return sg,cell and map objects 2012-01-16
if not os.path.isfile(file_name):
raise Sorry("The map file %s is missing..." %(file_name))
map_labels=None
if file_name.endswith(".xplor"):
import iotbx.xplor.map
m = iotbx.xplor.map.reader(file_name=file_name)
m.unit_cell_grid=m.data.all() # just so we have something
m.space_group_number=0 # so we have something
else:
from iotbx import mrcfile
m = mrcfile.map_reader(file_name=file_name)
print("MIN MAX MEAN RMS of map: %7.2f %7.2f %7.2f %7.2f " %(
m.header_min, m.header_max, m.header_mean, m.header_rms), file=out)
print("grid: ",m.unit_cell_grid, file=out)
print("cell: %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f " %tuple(
m.unit_cell_parameters), file=out)
print("SG: ",m.space_group_number, file=out)
if must_allow_sharpening and m.cannot_be_sharpened():
raise Sorry("Input map is already modified and should not be sharpened")
if get_map_labels:
map_labels=m.labels
print("ORIGIN: ",m.data.origin(), file=out)
print("EXTENT: ",m.data.all(), file=out)
print("IS PADDED: ",m.data.is_padded(), file=out)
map_data=m.data
acc=map_data.accessor()
shift_needed = not \
(map_data.focus_size_1d() > 0 and map_data.nd() == 3 and
map_data.is_0_based())
if(shift_needed):
map_data = map_data.shift_origin()
origin_shift=(
m.data.origin()[0]/m.data.all()[0],
m.data.origin()[1]/m.data.all()[1],
m.data.origin()[2]/m.data.all()[2])
origin_frac=origin_shift # NOTE: fraction of NEW cell
else:
origin_frac=(0.,0.,0.)
# determine if we need to trim off the outer part of the map duplicating inner
offsets=[]
need_offset=False
for g,e in zip(m.unit_cell_grid,map_data.all() ):
offset=e-g
offsets.append(offset)
if offsets == [1,1,1]:
if origin_frac!=(0.,0.,0.): # this was a shifted map...we can't do this
raise Sorry("Sorry if a CCP4 map has an origin other than (0,0,0) "+
"the extent \nof the map must be the same as the grid or 1 "+
"\ngreater for "+
"segment_and_split_map routines."+
"The file %s has a grid of %s and extent of %s" %(
file_name,str(m.unit_cell_grid),str(map_data.all())))
map=map_data[:-1,:-1,:-1]
acc=map.accessor()
else:
map=map_data
# now get space group and cell
from cctbx import crystal
from cctbx import sgtbx
from cctbx import uctbx
if m.space_group_number==0:
n=1 # fix mrc formatting
else:
n=m.space_group_number
if hasattr(m,'unit_cell_parameters'):
space_group_info=sgtbx.space_group_info(number=n)
unit_cell=uctbx.unit_cell(m.unit_cell_parameters)
original_unit_cell_grid=m.unit_cell_grid
original_crystal_symmetry=crystal.symmetry(
unit_cell=unit_cell,space_group_info=space_group_info)
if original_crystal_symmetry and map.all()==m.unit_cell_grid:
crystal_symmetry=original_crystal_symmetry
print("\nUnit cell crystal symmetry used: ", file=out)
else:
crystal_symmetry=m.crystal_symmetry()
print("\nBox crystal symmetry used: ", file=out)
crystal_symmetry.show_summary(f=out)
space_group=crystal_symmetry.space_group()
unit_cell=crystal_symmetry.unit_cell()
else:
space_group=None
unit_cell=None
crystal_symmetry=None
original_crystal_symmetry,original_unit_cell_grid=None,None
map=scale_map(map,out=out)
if get_map_labels:
return map,space_group,unit_cell,crystal_symmetry,origin_frac,acc,\
original_crystal_symmetry,original_unit_cell_grid,map_labels
else:
return map,space_group,unit_cell,crystal_symmetry,origin_frac,acc,\
original_crystal_symmetry,original_unit_cell_grid
def write_ccp4_map(crystal_symmetry, file_name, map_data,
output_unit_cell_grid=None, labels=None):
if output_unit_cell_grid is None:
output_unit_cell_grid=map_data.all()
if labels is None:
labels=flex.std_string([""])
iotbx.mrcfile.write_ccp4_map(
file_name=file_name,
unit_cell=crystal_symmetry.unit_cell(),
space_group=crystal_symmetry.space_group(),
unit_cell_grid = output_unit_cell_grid,
map_data=map_data.as_double(),
labels=labels)
def set_up_xrs(crystal_symmetry=None): # dummy xrs to write out atoms
lines=["ATOM 92 SG CYS A 10 8.470 28.863 18.423 1.00 22.05 S"] # just a random line to set up x-ray structure
from cctbx.array_family import flex
from cctbx import xray
pdb_inp=iotbx.pdb.input(source_info="",lines=lines)
xrs = pdb_inp.xray_structure_simple(crystal_symmetry=crystal_symmetry)
scatterers = flex.xray_scatterer()
return xrs,scatterers
def write_atoms(tracking_data=None,sites=None,file_name=None,
crystal_symmetry=None,
atom_name=None,resname=None,atom_type=None,occ=None,
out=sys.stdout):
if crystal_symmetry is None:
crystal_symmetry=tracking_data.crystal_symmetry
xrs,scatterers=set_up_xrs(crystal_symmetry=crystal_symmetry)
from cctbx import xray
unit_cell=crystal_symmetry.unit_cell()
for xyz_cart in sites:
scatterers.append( xray.scatterer(scattering_type="O",
label="O",
site=unit_cell.fractionalize(xyz_cart), u=0.38, occupancy=1.0))
text=write_xrs(xrs=xrs,scatterers=scatterers,file_name=file_name,out=out)
if atom_name and resname and atom_type:
text=text.replace("O O "," %2s %3s A" %(atom_name,resname) )
text=text.replace(" O"," %1s" %(atom_type))
if occ:
text=text.replace(" 1.00 "," %.2f " %(occ))
return text
def write_xrs(xrs=None,scatterers=None,file_name="atoms.pdb",out=sys.stdout):
from cctbx import xray
xrs = xray.structure(xrs, scatterers=scatterers)
text=xrs.as_pdb_file()
if file_name:
f=open(file_name,'w')
print(text, file=f)
f.close()
print("Atoms written to %s" %file_name, file=out)
return text
def get_b_iso(miller_array,d_min=None,return_aniso_scale_and_b=False,
d_max=100000.):
if d_min:
res_cut_array=miller_array.resolution_filter(d_max=d_max,
d_min=d_min)
else:
res_cut_array=miller_array
from mmtbx.scaling import absolute_scaling
try:
aniso_scale_and_b=absolute_scaling.ml_aniso_absolute_scaling(
miller_array=res_cut_array, n_residues=200, n_bases=0, ignore_errors=True)
b_cart=aniso_scale_and_b.b_cart
except Exception as e:
b_cart=[0,0,0]
aniso_scale_and_b=None
b_aniso_mean=0.
if b_cart:
for k in [0,1,2]:
b_aniso_mean+=b_cart[k]
if return_aniso_scale_and_b:
return b_aniso_mean/3.0,aniso_scale_and_b
else: # usual
return b_aniso_mean/3.0
def map_coeffs_as_fp_phi(map_coeffs):
amplitudes=map_coeffs.amplitudes()
amplitudes.set_observation_type_xray_amplitude()
assert amplitudes.is_real_array()
phases=map_coeffs.phases(deg=True)
return amplitudes,phases
def map_coeffs_to_fp(map_coeffs):
amplitudes=map_coeffs.amplitudes()
amplitudes.set_observation_type_xray_amplitude()
assert amplitudes.is_real_array()
return amplitudes
def get_f_phases_from_model(f_array=None,pdb_inp=None,overall_b=None,
k_sol=None, b_sol=None, out=sys.stdout):
xray_structure=pdb_inp.construct_hierarchy().extract_xray_structure(
crystal_symmetry=f_array.crystal_symmetry())
print("Getting map coeffs from model with %s atoms.." %(
xray_structure.sites_frac().size()), file=out)
if overall_b is not None:
print("Setting overall b_iso to %7.1f for model " %(
overall_b), file=out)
xray_structure.set_b_iso(value=overall_b)
model_f_array=f_array.structure_factors_from_scatterers(
xray_structure = xray_structure).f_calc()
return model_f_array
def get_f_phases_from_map(map_data=None,crystal_symmetry=None,d_min=None,
d_max=100000.,
d_min_ratio=None,return_as_map_coeffs=False,remove_aniso=None,
get_remove_aniso_object=True,
scale_max=None,
origin_frac=None,
out=sys.stdout):
if d_min is not None:
d_min_use=d_min
if d_min_ratio is not None:
d_min_use=d_min*d_min_ratio
else:
d_min_use=None
from mmtbx.command_line.map_to_structure_factors import run as map_to_sf
if crystal_symmetry.space_group().type().number() in [0,1]:
args=['d_min=None','box=True','keep_origin=False',
'scale_max=%s' %scale_max]
else: # cannot use box for other space groups
args=['d_min=%s'%(d_min_use),'box=False','keep_origin=False',
'scale_max=%s' %scale_max]
map_coeffs=map_to_sf(args=args,
space_group_number=crystal_symmetry.space_group().type().number(),
ccp4_map=make_ccp4_map(map_data,crystal_symmetry.unit_cell()),
return_as_miller_arrays=True,nohl=True,out=null_out())
if d_min_use:
map_coeffs=map_coeffs.resolution_filter(d_min=d_min_use,d_max=d_max)
if origin_frac and tuple(origin_frac) != (0.,0.,0.): # shift origin
map_coeffs=map_coeffs.translational_shift(origin_frac,deg=False)
map_coeffs=scale_map_coeffs(map_coeffs,scale_max=scale_max,out=out)
if remove_aniso:
print("\nRemoving aniso in data before analysis\n", file=out)
get_remove_aniso_object=True
from cctbx.maptbx.refine_sharpening import analyze_aniso
map_coeffs,map_coeffs_ra=analyze_aniso(
remove_aniso=remove_aniso,
get_remove_aniso_object=get_remove_aniso_object,
map_coeffs=map_coeffs,resolution=d_min,out=out)
if return_as_map_coeffs:
return map_coeffs,map_coeffs_ra
else:
return map_coeffs_as_fp_phi(map_coeffs)
def apply_sharpening(map_coeffs=None,
sharpening_info_obj=None,
n_real=None,b_sharpen=None,crystal_symmetry=None,
target_scale_factors=None,
f_array=None,phases=None,d_min=None,k_sharpen=None,
b_blur_hires=None,
out=sys.stdout):
if map_coeffs and f_array is None and phases is None:
f_array,phases=map_coeffs_as_fp_phi(map_coeffs)
if sharpening_info_obj is not None:
b_sharpen=sharpening_info_obj.b_sharpen
b_blur_hires=sharpening_info_obj.b_blur_hires
k_sharpen=sharpening_info_obj.k_sharpen
if sharpening_info_obj.input_d_cut:
d_min=sharpening_info_obj.input_d_cut
else:
d_min=sharpening_info_obj.resolution# changed from d_cut
n_real=sharpening_info_obj.n_real
target_scale_factors=sharpening_info_obj.target_scale_factors
n_bins=sharpening_info_obj.n_bins
remove_aniso=sharpening_info_obj.remove_aniso
resolution=sharpening_info_obj.resolution
if target_scale_factors:
assert sharpening_info_obj is not None
print("\nApplying target scale factors vs resolution", file=out)
if not map_coeffs:
map_coeffs=f_array.phase_transfer(phase_source=phases,deg=True)
f_array,phases=map_coeffs_as_fp_phi(map_coeffs)
f_array_b_iso=get_b_iso(f_array,d_min=d_min)
if not f_array.binner():
(local_d_max,local_d_min)=f_array.d_max_min()
f_array.setup_binner(n_bins=n_bins,d_max=local_d_max,d_min=local_d_min)
from cctbx.maptbx.refine_sharpening import apply_target_scale_factors
map_and_b=apply_target_scale_factors(f_array=f_array,phases=phases,
resolution=d_min,
target_scale_factors=target_scale_factors,
n_real=n_real,
out=out)
return map_and_b
elif b_sharpen is None or (
b_sharpen in [0,None] and k_sharpen in [0,None]):
if not map_coeffs:
map_coeffs=f_array.phase_transfer(phase_source=phases,deg=True)
map_data=get_map_from_map_coeffs(map_coeffs=map_coeffs,
crystal_symmetry=crystal_symmetry,n_real=n_real)
return map_and_b_object(map_data=map_data)
elif k_sharpen is None or d_min is None or k_sharpen<=0 or \
( b_blur_hires is None and b_sharpen < 0):
# 2016-08-10 original method: apply b_sharpen to all data
# Use this if blurring (b_sharpen<0) or if k_sharpen is not set
from cctbx import adptbx # next lines from xtriage (basic_analysis.py)
b_cart_aniso_removed=[ b_sharpen, b_sharpen, b_sharpen, 0, 0, 0]
from mmtbx.scaling import absolute_scaling
u_star_aniso_removed=adptbx.u_cart_as_u_star(
f_array.unit_cell(), adptbx.b_as_u( b_cart_aniso_removed ) )
f_array_sharpened=absolute_scaling.anisotropic_correction(
f_array,0.0,u_star_aniso_removed,must_be_greater_than=-0.0001)
else:
# Apply sharpening only to data from infinity to d_min, with transition
# steepness of k_sharpen.
# 2017-08-21 if b_blur_hires is set, sharpen with
# b_sharpen-b_blur_hires data beyond d_min (with same
# transition, so transition goes from b_sharpen TO b_sharpen-b_blur_hires
data_array=f_array.data()
sthol_array=f_array.sin_theta_over_lambda_sq()
d_spacings=f_array.d_spacings()
scale_array=flex.double()
import math
if b_blur_hires is not None:
b_sharpen_hires_use=b_sharpen-b_blur_hires
else:
b_sharpen_hires_use=0.
for x,(ind,sthol),(ind1,d) in zip(data_array,sthol_array,d_spacings):
# for small value b=b_sharpen
# for large value b=-b_sharpen_hires_use
# transition is determined by k_sharpen
value=min(20.,max(-20.,k_sharpen*(d_min-d)))
lowres_weight=1./(1.+math.exp(value))
hires_weight=max(0.,1-lowres_weight)
b_sharpen_use=b_sharpen*lowres_weight+b_sharpen_hires_use*hires_weight
log_scale=sthol*b_sharpen_use
scale_array.append(math.exp(log_scale))
data_array=data_array*scale_array
f_array_sharpened=f_array.customized_copy(data=data_array)
actual_b_iso=get_b_iso(f_array_sharpened,d_min=d_min)
print("B-iso after sharpening by b_sharpen=%6.1f is %7.2f\n" %(
b_sharpen,actual_b_iso), file=out)
sharpened_map_coeffs=f_array_sharpened.phase_transfer(
phase_source=phases,deg=True)
# And get new map
map_data=get_map_from_map_coeffs(map_coeffs=sharpened_map_coeffs,
crystal_symmetry=crystal_symmetry,
n_real=n_real)
mb=map_and_b_object(map_data=map_data,final_b_iso=actual_b_iso)
return mb
def get_map_from_map_coeffs(map_coeffs=None,crystal_symmetry=None,
n_real=None,apply_sigma_scaling=True):
from cctbx import maptbx
from cctbx.maptbx import crystal_gridding
if map_coeffs.crystal_symmetry().space_group_info()!= \
crystal_symmetry.space_group_info():
assert str(map_coeffs.crystal_symmetry().space_group_info()
).replace(" ","").lower()=='p1'
# use map_coeffs.crystal_symmetry
crystal_symmetry=map_coeffs.crystal_symmetry()
if n_real:
cg=crystal_gridding(
unit_cell=crystal_symmetry.unit_cell(),
space_group_info=crystal_symmetry.space_group_info(),
pre_determined_n_real=n_real)
else:
cg=None
fft_map = map_coeffs.fft_map( resolution_factor = 0.25,
crystal_gridding=cg,
symmetry_flags=maptbx.use_space_group_symmetry)
if apply_sigma_scaling:
fft_map.apply_sigma_scaling()
else:
fft_map.apply_volume_scaling()
map_data=fft_map.real_map_unpadded()
return map_data
def find_symmetry_center(map_data,crystal_symmetry=None,out=sys.stdout):
# find center if necessary:
origin=list(map_data.origin())
all=list(map_data.all())
centroid_wx={}
centroid_w={}
from cctbx import maptbx
for ai in [0,1,2]:
centroid_wx[ai]=0.
centroid_w[ai]=0.
for i in range(0,all[ai]):
if ai==0:
start_tuple=tuple((i,0,0))
end_tuple=tuple((i,all[1],all[2]))
elif ai==1:
start_tuple=tuple((0,i,0))
end_tuple=tuple((all[0],i,all[2]))
elif ai==2:
start_tuple=tuple((0,0,i))
end_tuple=tuple((all[0],all[1],i))
new_map_data = maptbx.copy(map_data,
start_tuple,end_tuple)
mean_value=max(0.,new_map_data.as_1d().as_double().min_max_mean().mean)
centroid_wx[ai]+=mean_value*(i-origin[ai])
centroid_w[ai]+=mean_value
if centroid_w[ai]>0:
centroid_wx[ai]=centroid_wx[ai]/centroid_w[ai]
print("CENTROID OF DENSITY: (%7.2f, %7.2f, %7.2f) (grid units) " %(
tuple((centroid_wx[0],centroid_wx[1],centroid_wx[2],))), file=out)
xyz_fract=matrix.col((centroid_wx[0]/all[0],centroid_wx[1]/all[1],centroid_wx[2]/all[2],))
xyz_cart=crystal_symmetry.unit_cell().orthogonalize(xyz_fract)
print("CENTROID (A): (%7.3f, %7.3f, %7.3f) " %(
tuple(xyz_cart)), file=out)
return xyz_cart
def get_center_of_map(map_data,crystal_symmetry):
all=list(map_data.all())
origin=list(map_data.origin())
sx,sy,sz=[all[0]/2+origin[0],all[1]/2+origin[1],all[2]/2+origin[2]]
site_fract=matrix.col((sx/all[0],sy/all[1],sz/all[2],))
return crystal_symmetry.unit_cell().orthogonalize(site_fract)
def select_remaining_ncs_ops( map_data=None,
crystal_symmetry=None,
random_points=None,
closest_sites=None,
ncs_object=None,
out=sys.stdout):
# identify which NCS ops still apply. Choose the ones that maximize
# scoring with score_ncs_in_map
if ncs_object.max_operators()<1:
return ncs_object
used_ncs_id_list=[ncs_object.ncs_groups()[0].identity_op_id()]
ncs_copies=ncs_object.max_operators()
# find ncs_id that maximizes score (if any)
improving=True
from copy import deepcopy
best_ops_to_keep=deepcopy(used_ncs_id_list)
working_best_ops_to_keep=None
best_score=None
while improving:
improving=False
working_best_ops_to_keep=deepcopy(best_ops_to_keep)
working_score=None
for ncs_id in range(ncs_copies):
if ncs_id in best_ops_to_keep:continue
ops_to_keep=deepcopy(best_ops_to_keep)
ops_to_keep.append(ncs_id)
ncs_used_obj=ncs_object.deep_copy(ops_to_keep=ops_to_keep)
score,ncs_cc=score_ncs_in_map(map_data=map_data,ncs_object=ncs_used_obj,
ncs_in_cell_only=True,
allow_score_with_pg=False,
sites_orth=closest_sites,
crystal_symmetry=crystal_symmetry,out=null_out())
if score is None: continue
if working_score is None or score >working_score:
working_score=score
working_best_ops_to_keep=deepcopy(ops_to_keep)
if working_score is not None and (
best_score is None or working_score>best_score):
improving=True
best_score=working_score
best_ops_to_keep=deepcopy(working_best_ops_to_keep)
ncs_used_obj=ncs_object.deep_copy(ops_to_keep=best_ops_to_keep)
return ncs_used_obj
def run_get_ncs_from_map(params=None,
map_data=None,
crystal_symmetry=None,
map_symmetry_center=None,
ncs_obj=None,
out=sys.stdout,
):
# Get or check NCS operators. Try various possibilities for center of NCS
ncs_obj_to_check=None
if params.reconstruction_symmetry.symmetry and (
not ncs_obj or ncs_obj.max_operators()<2):
if params.reconstruction_symmetry.optimize_center:
center_try_list=[True,False]
else:
center_try_list=[True]
elif ncs_obj:
center_try_list=[True]
ncs_obj_to_check=ncs_obj
elif params.reconstruction_symmetry.optimize_center:
center_try_list=[None]
else:
return None,None,None # did not even try
# check separately for helical symmetry
if params.reconstruction_symmetry.symmetry.lower()=='helical':
helical_list=[True]
elif params.reconstruction_symmetry.symmetry.lower() in ['all','any'] and\
params.reconstruction_symmetry.include_helical_symmetry:
helical_list=[False,True]
else:
helical_list=[False]
new_ncs_obj,ncs_cc,ncs_score=None,None,None
for use_center_of_map in center_try_list:
for include_helical in helical_list:
local_params=deepcopy(params)
local_params.reconstruction_symmetry.include_helical_symmetry=\
include_helical
new_ncs_obj,ncs_cc,ncs_score=get_ncs_from_map(params=local_params,
map_data=map_data,
map_symmetry_center=map_symmetry_center,
use_center_of_map_as_center=use_center_of_map,
crystal_symmetry=crystal_symmetry,
ncs_obj_to_check=ncs_obj_to_check,
out=out
)
if new_ncs_obj:
return new_ncs_obj,ncs_cc,ncs_score
return new_ncs_obj,ncs_cc,ncs_score
def get_ncs_from_map(params=None,
map_data=None,
map_symmetry_center=None,
symmetry=None,
symmetry_center=None,
helical_rot_deg=None,
helical_trans_z_angstrom=None,
two_fold_along_x=None,
op_max=None,
crystal_symmetry=None,
optimize_center=None,
sites_orth=None,
random_points=None,
n_rescore=None,
use_center_of_map_as_center=None,
min_ncs_cc=None,
identify_ncs_id=None,
ncs_obj_to_check=None,
ncs_in_cell_only=False,
out=sys.stdout):
# Purpose: check through standard point groups and helical symmetry to see
# if map has symmetry. If symmetry==ANY then take highest symmetry that fits
# Otherwise limit to the one specified with symmetry.
# Use a library of symmetry matrices. For helical symmetry generate it
# along the z axis.
# Center of symmetry is as supplied, or center of map or center of density
# If center is not supplied and use_center_of_map_as_center, try that
# and return None if it fails to achieve a map cc of min_ncs_cc
if ncs_in_cell_only is None:
ncs_in_cell_only=(not params.crystal_info.use_sg_symmetry)
if symmetry is None:
symmetry=params.reconstruction_symmetry.symmetry
if symmetry_center is None:
symmetry_center=params.reconstruction_symmetry.symmetry_center
if optimize_center is None:
optimize_center=params.reconstruction_symmetry.optimize_center
if helical_rot_deg is None:
helical_rot_deg=params.reconstruction_symmetry.helical_rot_deg
if helical_trans_z_angstrom is None:
helical_trans_z_angstrom=\
params.reconstruction_symmetry.helical_trans_z_angstrom
if n_rescore is None:
n_rescore=params.reconstruction_symmetry.n_rescore
if random_points is None:
random_points=params.reconstruction_symmetry.random_points
if op_max is None:
op_max=params.reconstruction_symmetry.op_max
if two_fold_along_x is None:
two_fold_along_x=params.reconstruction_symmetry.two_fold_along_x
if identify_ncs_id is None:
identify_ncs_id=params.reconstruction_symmetry.identify_ncs_id
if min_ncs_cc is None:
min_ncs_cc=params.reconstruction_symmetry.min_ncs_cc
# if ncs_obj_to_check is supplied...just use that ncs
if ncs_obj_to_check and ncs_obj_to_check.max_operators()>1:
symmetry="SUPPLIED NCS"
if map_symmetry_center is None:
map_symmetry_center=get_center_of_map(map_data,crystal_symmetry)
if optimize_center is None:
if symmetry_center is None and (not use_center_of_map_as_center):
optimize_center=True
print("Setting optimize_center=True as no symmetry_center is supplied", file=out)
else:
optimize_center=False
if symmetry_center is not None:
symmetry_center=matrix.col(symmetry_center)
elif use_center_of_map_as_center:
print("Using center of map as NCS center", file=out)
symmetry_center=map_symmetry_center
else: # Find it
if not ncs_obj_to_check:
print("Finding NCS center as it is not supplied", file=out)
symmetry_center=find_symmetry_center(
map_data,crystal_symmetry=crystal_symmetry,
out=out)
print("Center of NCS (A): (%7.3f, %7.3f, %7.3f) " %(
tuple(symmetry_center)), file=out)
ncs_list=get_ncs_list(params=params,
symmetry=symmetry,
symmetry_center=symmetry_center,
helical_rot_deg=helical_rot_deg,
two_fold_along_x=two_fold_along_x,
op_max=op_max,
helical_trans_z_angstrom=helical_trans_z_angstrom,
ncs_obj_to_check=ncs_obj_to_check,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
out=out,
)
print("Total of %d NCS types to examine..." %(len(ncs_list)), file=out)
if not sites_orth:
sites_orth=get_points_in_map(
map_data,n=random_points,crystal_symmetry=crystal_symmetry)
# some random points in the map
# Now make sure symmetry applied to points in points_list gives similar values
results_list=[]
for ncs_obj in ncs_list:
symmetry=ncs_obj.get_ncs_name()
score,cc_avg=score_ncs_in_map(map_data=map_data,ncs_object=ncs_obj,
identify_ncs_id=identify_ncs_id,
ncs_in_cell_only=ncs_in_cell_only,
sites_orth=sites_orth,crystal_symmetry=crystal_symmetry,out=out)
if cc_avg < min_ncs_cc:
score=0. # Do not allow low CC values to be used
if score is None:
print("symmetry:",symmetry," no score",ncs_obj.max_operators(), file=out)
else:
results_list.append([score,cc_avg,ncs_obj,symmetry])
if not results_list:
return None,None,None
results_list.sort()
results_list.reverse()
# Rescore top n_rescore
if n_rescore and not ncs_obj_to_check:
print("Rescoring top %d results" %(min(n_rescore,len(results_list))), file=out)
rescore_list=results_list[n_rescore:]
new_sites_orth=get_points_in_map(
map_data,n=10*random_points,crystal_symmetry=crystal_symmetry)
new_sites_orth.extend(sites_orth)
for orig_score,orig_cc_avg,ncs_obj,symmetry in results_list[:n_rescore]:
score,cc_avg=score_ncs_in_map(map_data=map_data,ncs_object=ncs_obj,
identify_ncs_id=identify_ncs_id,
ncs_in_cell_only=ncs_in_cell_only,
sites_orth=new_sites_orth,crystal_symmetry=crystal_symmetry,out=out)
if cc_avg < min_ncs_cc:
score=0. # Do not allow low CC values to be used
if score is None:
print("symmetry:",symmetry," no score",ncs_obj.max_operators(), file=out)
else:
rescore_list.append([score,cc_avg,ncs_obj,symmetry])
rescore_list.sort()
rescore_list.reverse()
results_list=rescore_list
if len(results_list)==1:
# check for C1
score,cc_avg,ncs_obj,symmetry=results_list[0]
if symmetry and symmetry.strip()=='C1':
score=1.
cc_avg=1.
results_list=[[score,cc_avg,ncs_obj,symmetry],]
print("Ranking of NCS types:", file=out)
if min_ncs_cc is not None:
print("NOTE: any NCS type with CC < %.2f (min_ncs_cc) is unscored " %(
min_ncs_cc), file=out)
print("\n SCORE CC OPERATORS SYMMETRY", file=out)
for score,cc_avg,ncs_obj,symmetry in results_list:
if not symmetry: symmetry=""
if not cc_avg: cc_avg=0.0
print(" %6.2f %5.2f %2d %s" %(
score,cc_avg,ncs_obj.max_operators(), symmetry.strip(),), file=out)
score,cc_avg,ncs_obj,ncs_info=results_list[0]
# Optimize center if necessary
if optimize_center:
symmetry_center,cc_avg,score,ncs_obj=optimize_center_position(
map_data,sites_orth,
crystal_symmetry,
ncs_info,symmetry_center,ncs_obj,score,cc_avg,
params=params,
helical_rot_deg=helical_rot_deg,
two_fold_along_x=two_fold_along_x,
op_max=op_max,
min_ncs_cc=min_ncs_cc,
identify_ncs_id=identify_ncs_id,
ncs_obj_to_check=ncs_obj_to_check,
ncs_in_cell_only=ncs_in_cell_only,
helical_trans_z_angstrom=helical_trans_z_angstrom,out=out)
print("New center: (%7.3f, %7.3f, %7.3f)" %(tuple(symmetry_center)), file=out)
if cc_avg < min_ncs_cc:
print("No suitable symmetry found", file=out)
return None,None,None
print("\nBest NCS type is: ", end=' ', file=out)
print("\n SCORE CC OPERATORS SYMMETRY", file=out)
if not ncs_info: ncs_info=""
print(" %6.2f %5.2f %2d %s Best NCS type" %(
score,cc_avg,ncs_obj.max_operators(), ncs_info.strip(),), file=out)
return ncs_obj,cc_avg,score
def optimize_center_position(map_data,sites_orth,crystal_symmetry,
ncs_info,symmetry_center,ncs_obj,score,cc_avg,
params=None,
helical_rot_deg=None,
two_fold_along_x=None,
op_max=None,
identify_ncs_id=None,
ncs_obj_to_check=None,
ncs_in_cell_only=None,
min_ncs_cc=None,
helical_trans_z_angstrom=None,out=sys.stdout):
if ncs_info is None:
ncs_info="None"
symmetry=ncs_info.split()[0]
print("Optimizing center position...type is %s" %(ncs_info), file=out)
if len(ncs_info.split())>1 and ncs_info.split()[1]=='(a)':
two_fold_along_x=True
elif len(ncs_info.split())>1 and ncs_info.split()[1]=='(b)':
two_fold_along_x=False
else:
two_fold_along_x=None
best_center=matrix.col(symmetry_center)
best_ncs_obj=ncs_obj
best_score=score
best_cc_avg=cc_avg
print("Starting center: (%7.3f, %7.3f, %7.3f)" %(tuple(best_center)), file=out)
from libtbx.utils import null_out
scale=5.
for itry in range(6):
scale=scale/5.
for i in range(-4,5):
for j in range(-4,5):
local_center=matrix.col(symmetry_center)+matrix.col((scale*i,scale*j,0.,))
ncs_list=get_ncs_list(params=params,symmetry=symmetry,
symmetry_center=local_center,
helical_rot_deg=helical_rot_deg,
two_fold_along_x=two_fold_along_x,
op_max=op_max,
helical_trans_z_angstrom=helical_trans_z_angstrom,
ncs_obj_to_check=ncs_obj_to_check,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
out=null_out(),
)
if ncs_list:
ncs_obj=ncs_list[0]
score,cc_avg=score_ncs_in_map(map_data=map_data,ncs_object=ncs_obj,
identify_ncs_id=identify_ncs_id,
ncs_in_cell_only=ncs_in_cell_only,
sites_orth=sites_orth,crystal_symmetry=crystal_symmetry,out=out)
if cc_avg < min_ncs_cc:
score=0. # Do not allow low CC values to be used
else:
ncs_obj=None
score,cc_avg=None,None
if best_score is None or score>best_score:
best_cc_avg=cc_avg
best_score=score
best_center=local_center
best_ncs_obj=ncs_obj
symmetry_center=best_center
cc_avg=best_cc_avg
score=best_score
ncs_obj=best_ncs_obj
return best_center,best_cc_avg,best_score,best_ncs_obj
def score_ncs_in_map_point_group_symmetry(
map_data=None,ncs_object=None,sites_orth=None,
crystal_symmetry=None,out=sys.stdout):
ncs_group=ncs_object.ncs_groups()[0]
all_value_lists=[]
for c,t,r in zip(ncs_group.centers(),
ncs_group.translations_orth(),
ncs_group.rota_matrices()):
new_sites_cart=flex.vec3_double()
r_inv=r.inverse()
for site in sites_orth:
new_sites_cart.append(r_inv * (matrix.col(site) - t))
# get value at new_sites cart and make sure they are all the same...
new_sites_fract=crystal_symmetry.unit_cell().fractionalize(new_sites_cart)
values=flex.double()
for site_fract in new_sites_fract:
values.append(map_data.value_at_closest_grid_point(site_fract))
all_value_lists.append(values)
return get_cc_among_value_lists(all_value_lists)
def get_cc_among_value_lists(all_value_lists):
a=all_value_lists[0]
cc_avg=0.
cc_low=None
cc_n=0.
for j in range(1,len(all_value_lists)):
b=all_value_lists[j]
cc=flex.linear_correlation(a,b).coefficient()
cc_avg+=cc
cc_n+=1.
if cc_low is None or cc<cc_low:
cc_low=cc
cc_avg=cc_avg/max(1.,cc_n)
if cc_n>0:
import math
return cc_low*math.sqrt(len(all_value_lists)),cc_avg
else:
return None,None
def score_ncs_in_map(map_data=None,ncs_object=None,sites_orth=None,
identify_ncs_id=None,
ncs_in_cell_only=None,
allow_score_with_pg=True,
crystal_symmetry=None,out=sys.stdout):
if not ncs_object or ncs_object.max_operators()<2:
return None,None
ncs_group=ncs_object.ncs_groups()[0]
# don't use point-group symmetry if we have only some of the ops
if allow_score_with_pg and (
(not identify_ncs_id) or ncs_group.is_point_group_symmetry()):
return score_ncs_in_map_point_group_symmetry(
map_data=map_data,ncs_object=ncs_object,
sites_orth=sites_orth,crystal_symmetry=crystal_symmetry,out=out)
# This version does not assume point-group symmetry: find the NCS
# operator that maps each point on to all others the best, then save
# that list of values
identify_ncs_id_list=list(range(ncs_group.n_ncs_oper()))+[None]
all_value_lists=[]
if not sites_orth:
sites_orth=get_points_in_map(map_data,n=100,
minimum_fraction_of_max=0.05,
crystal_symmetry=crystal_symmetry)
for site in sites_orth:
best_id=0
best_score=None
best_values=None
for site_ncs_id in identify_ncs_id_list: #last is real one
if site_ncs_id is None:
site_ncs_id=best_id
real_thing=True
else:
real_thing=False
if identify_ncs_id and site_ncs_id:
local_site=ncs_group.rota_matrices()[site_ncs_id] * matrix.col(site) + \
ncs_group.translations_orth()[site_ncs_id]
else:
local_site=site
new_sites_cart=flex.vec3_double()
for c,t,r in zip(ncs_group.centers(),
ncs_group.translations_orth(),
ncs_group.rota_matrices()):
r_inv=r.inverse()
new_sites_cart.append(r_inv * (matrix.col(local_site) - t))
new_sites_fract=crystal_symmetry.unit_cell().fractionalize(
new_sites_cart)
values=flex.double()
for site_frac in new_sites_fract:
if (not ncs_in_cell_only) or (
site_frac[0]>=0 and site_frac[0]<=1 and \
site_frac[1]>=0 and site_frac[1]<=1 and \
site_frac[2]>=0 and site_frac[2]<=1):
values.append(map_data.value_at_closest_grid_point(site_frac))
else:
values.append(0.)
score=values.standard_deviation_of_the_sample()
if real_thing or (best_score is None or score < best_score):
best_score=score
best_id=site_ncs_id
best_values=values
all_value_lists.append(best_values)
values_by_site_dict={} # all_value_lists[j][i] -> values_by_site_dict[i][j]
# there are sites_orth.size() values of j
# there are len(ncs_group_centers)==len(all_value_lists[0]) values of i
for i in range(len(all_value_lists[0])):
values_by_site_dict[i]=flex.double() # value_list[0][1]
for j in range(sites_orth.size()):
values_by_site_dict[i].append(all_value_lists[j][i])
new_all_values_lists=[]
for i in range(len(all_value_lists[0])):
new_all_values_lists.append(values_by_site_dict[i])
score,cc=get_cc_among_value_lists(new_all_values_lists)
return score,cc
def get_points_in_map(map_data,n=None,
minimum_fraction_of_max=0.,
random_xyz=None,
max_tries_ratio=100,crystal_symmetry=None):
map_1d=map_data.as_1d()
map_mean=map_1d.min_max_mean().mean
map_max=map_1d.min_max_mean().max
minimum_value=map_mean+minimum_fraction_of_max*(map_max-map_mean)
points_list=flex.vec3_double()
import random
random.seed(1)
nu,nv,nw=map_data.all()
xyz_fract=crystal_symmetry.unit_cell().fractionalize(
tuple((17.4,27.40128571,27.32985714,)))
for i in range(int(max_tries_ratio*n)): # max tries
ix=random.randint(0,nu-1)
iy=random.randint(0,nv-1)
iz=random.randint(0,nw-1)
xyz_fract=matrix.col((ix/nu,iy/nv,iz/nw,))
value=map_data.value_at_closest_grid_point(xyz_fract)
if value > minimum_value and value <map_max:
if random_xyz:
offset=[]
for i in range(3):
offset.append((random.random()-0.5)*2.*random_xyz)
offset=crystal_symmetry.unit_cell().fractionalize(matrix.col(offset))
new_xyz_fract=[]
for x,o in zip(xyz_fract,offset):
new_xyz_fract.append(max(0,min(1,x+o)))
xyz_fract=matrix.col(new_xyz_fract)
points_list.append(xyz_fract)
if points_list.size()>=n: break
sites_orth=crystal_symmetry.unit_cell().orthogonalize(points_list)
return sites_orth
def get_ncs_list(params=None,symmetry=None,
symmetry_center=None,
helical_rot_deg=None,
helical_trans_z_angstrom=None,
op_max=None,
two_fold_along_x=None,
ncs_obj_to_check=None,
crystal_symmetry=None,
map_data=None,
include_helical_symmetry=None,
max_helical_ops_to_check=None,
require_helical_or_point_group_symmetry=None,
out=sys.stdout):
# params.reconstruction_symmetry.require_helical_or_point_group_symmetry
# params.reconstruction_symmetry.include_helical_symmetry):
# params.reconstruction_symmetry.max_helical_ops_to_check))
if ncs_obj_to_check and ncs_obj_to_check.max_operators()>1:
return [ncs_obj_to_check] # ,["SUPPLIED NCS"]
from mmtbx.ncs.ncs import generate_ncs_ops
ncs_list=generate_ncs_ops(
symmetry=symmetry,
helical_rot_deg=helical_rot_deg,
helical_trans_z_angstrom=helical_trans_z_angstrom,
op_max=op_max,
two_fold_along_x=two_fold_along_x,
include_helical_symmetry=\
params.reconstruction_symmetry.include_helical_symmetry,
max_helical_ops_to_check=\
params.reconstruction_symmetry.max_helical_ops_to_check,
require_helical_or_point_group_symmetry=\
params.reconstruction_symmetry.require_helical_or_point_group_symmetry,
out=out)
# Generate helical symmetry from map if necessary
if symmetry.lower()=='helical' or (
symmetry.lower() in ['all','any'] and
params.reconstruction_symmetry.include_helical_symmetry):
if helical_rot_deg is None or helical_trans_z_angstrom is None:
# returns ncs for symmetry_center at symmetry_center
ncs_object,helical_rot_deg,helical_trans_z_angstrom=\
find_helical_symmetry(params=params,
symmetry_center=symmetry_center,
map_data=map_data,
crystal_symmetry=crystal_symmetry,out=out)
if ncs_object:
ncs_name="Type: Helical %5.2f deg %6.2f Z-trans " %(
helical_rot_deg,helical_trans_z_angstrom)
ncs_object.set_ncs_name(ncs_name)
ncs_list.append(ncs_object)
if symmetry_center and tuple(symmetry_center) != (0,0,0,):
print("Offsetting NCS center by (%.2f, %.2f, %.2f) A " %(tuple(symmetry_center)), file=out)
new_list=[]
for ncs_obj in ncs_list:
new_list.append(ncs_obj.coordinate_offset(coordinate_offset=symmetry_center))
ncs_list=new_list
if (require_helical_or_point_group_symmetry):
for ncs_obj in ncs_list:
assert ncs_obj.is_helical_along_z() or ncs_obj.is_point_group_symmetry()
return ncs_list
def find_helical_symmetry(params=None,
symmetry_center=None,
map_data=None,
crystal_symmetry=None,
max_z_to_test=2, max_peaks_to_score=5,out=sys.stdout):
params=deepcopy(params) # so changing them does not go back
if str(params.reconstruction_symmetry.score_basis)=='None':
params.reconstruction_symmetry.score_basis='cc'
if params.reconstruction_symmetry.smallest_object is None:
params.reconstruction_symmetry.smallest_object=\
5*params.crystal_info.resolution
print("\nLooking for helical symmetry with center at (%.2f, %.2f, %.2f) A\n" %(
tuple(symmetry_center)), file=out)
print("\nFinding likely translations along z...", file=out)
map_coeffs,dummy=get_f_phases_from_map(map_data=map_data,
crystal_symmetry=crystal_symmetry,
d_min=params.crystal_info.resolution,
return_as_map_coeffs=True,
out=out)
f_array,phases=map_coeffs_as_fp_phi(map_coeffs)
from cctbx.maptbx.refine_sharpening import quasi_normalize_structure_factors
(d_max,d_min)=f_array.d_max_min()
f_array.setup_binner(d_max=d_max,d_min=d_min,n_bins=20)
normalized=quasi_normalize_structure_factors(
f_array,set_to_minimum=0.01)
# Now look along c* and get peaks
zero_index_a=0 # look along c*
zero_index_b=1
c_star_list=get_c_star_list(f_array=normalized,
zero_index_a=zero_index_a,
zero_index_b=zero_index_b)
likely_z_translations=get_helical_trans_z_angstrom(params=params,
c_star_list=c_star_list,
crystal_symmetry=crystal_symmetry,
out=out)
# Now for each z...get the rotation. Try +/- z and try rotations
abc=crystal_symmetry.unit_cell().parameters()[:3]
max_z=max(abc[0],abc[1])
if params.reconstruction_symmetry.max_helical_rotations_to_check:
min_delta_rot=360/max(1,
params.reconstruction_symmetry.max_helical_rotations_to_check)
else:
min_delta_rot=0.01
delta_rot=max(min_delta_rot,
(180./3.141659)*params.crystal_info.resolution/max_z)
delta_z=params.crystal_info.resolution/4.
print("\nOptimizing helical paramers:", file=out)
print("\n Rot Trans Score CC", file=out)
n_rotations=int(0.5+360/delta_rot)
rotations=[]
for k in range(1,n_rotations):
helical_rot_deg=k*delta_rot
if helical_rot_deg > 180: helical_rot_deg=helical_rot_deg-360
rotations.append(helical_rot_deg)
done=False
n_try=0
score_list=[]
quick=params.control.quick
best_ncs_cc=None
best_ncs_obj=None
best_score=None
best_helical_trans_z_angstrom=None
best_helical_rot_deg=None
import math
for helical_trans_z_angstrom in likely_z_translations[:max_z_to_test]:
n_try+=1
if done: break
for helical_rot_deg in rotations:
if done: break
if abs(helical_trans_z_angstrom)+abs(math.sin(
helical_rot_deg*(3.14159/180.))*max_z) < \
params.reconstruction_symmetry.smallest_object:
continue # this is identity
new_ncs_obj,ncs_cc,ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg=try_helical_params(
optimize=0,
helical_rot_deg=helical_rot_deg,
helical_trans_z_angstrom=helical_trans_z_angstrom,
params=params,
map_data=map_data,
map_symmetry_center=symmetry_center, # should not be needed XXX
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=null_out())
if not ncs_score: continue
if ncs_cc> 0.95 or \
ncs_cc > 2*params.reconstruction_symmetry.min_ncs_cc or \
(quick and (ncs_cc> 0.90 or
ncs_cc > 1.5*params.reconstruction_symmetry.min_ncs_cc)):
print(" %.2f %.2f %.2f %.2f (ok to go on)" %(
new_helical_rot_deg,new_helical_trans_z_angstrom,
ncs_score,ncs_cc), file=out)
done=True
if params.control.verbose:
print(" %.2f %.2f %.2f %.2f" %(
new_helical_rot_deg,new_helical_trans_z_angstrom,ncs_score,ncs_cc), file=out)
score_list.append(
[ncs_score,ncs_cc,new_helical_rot_deg,new_helical_trans_z_angstrom])
score_list.sort()
score_list.reverse()
done=False
for ncs_score,ncs_cc,helical_rot_deg,helical_trans_z_angstrom in \
score_list[:max_peaks_to_score]:
if done: continue
# rescore and optimize:
new_ncs_obj,ncs_cc,ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg=try_helical_params(
params=params,
best_score=best_score,
helical_rot_deg=helical_rot_deg,
helical_trans_z_angstrom=helical_trans_z_angstrom,
delta_z=delta_z/2.,
delta_rot=delta_rot/2.,
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=null_out())
if not ncs_score or ncs_score <0:
continue
if best_score is None or ncs_score>best_score:
best_ncs_cc=ncs_cc
best_ncs_obj=new_ncs_obj
best_score=ncs_score
best_helical_trans_z_angstrom=new_helical_trans_z_angstrom
best_helical_rot_deg=new_helical_rot_deg
# after trying out a range quit if good enough
if best_ncs_cc> 0.90 or \
best_ncs_cc > 1.5*params.reconstruction_symmetry.min_ncs_cc or \
( (quick or n_try>1) and \
ncs_cc>=params.reconstruction_symmetry.min_ncs_cc):
print(" %.2f %.2f %.2f %.2f (high enough to go on)" %(
best_helical_rot_deg,best_helical_trans_z_angstrom,
best_score,best_ncs_cc), file=out)
done=True
# Optimize one more time trying fractional values, but only if
# that makes the delta less than the resolution
print("\nTrying fraction of rot/trans", file=out)
for iter in [0,1]:
if not best_helical_rot_deg: continue
for ifract in range(2,11):
if iter==0: # try fractional
test_helical_rot_deg=best_helical_rot_deg/ifract
test_helical_trans_z_angstrom=best_helical_trans_z_angstrom/ifract
if test_helical_trans_z_angstrom > params.crystal_info.resolution*1.1:
continue # skip it...would have been a peak if ok
else: # iter >0
if ifract > 0:
continue # skip these
else: # optimize current best
test_helical_rot_deg=best_helical_rot_deg
test_helical_trans_z_angstrom=best_helical_trans_z_angstrom
new_ncs_obj,new_ncs_cc,new_ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg=\
try_helical_params(
params=params,
helical_rot_deg=test_helical_rot_deg,
helical_trans_z_angstrom=test_helical_trans_z_angstrom,
delta_z=delta_z/2.,
delta_rot=delta_rot/2.,
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=out)
if new_ncs_score is not None:
new_ncs_score=new_ncs_score*\
params.reconstruction_symmetry.scale_weight_fractional_translation
# give slight weight to smaller
if best_score is None or new_ncs_score > best_score:
best_ncs_cc=new_ncs_cc
best_ncs_obj=new_ncs_obj
best_score=new_ncs_score
best_helical_trans_z_angstrom=new_helical_trans_z_angstrom
best_helical_rot_deg=new_helical_rot_deg
print(" %.2f %.2f %.2f %.2f (improved fractions)" %(
best_helical_rot_deg,best_helical_trans_z_angstrom,
best_score,best_ncs_cc), file=out)
else:
print(" %.2f %.2f %.2f %.2f (worse with fractions)" %(
new_helical_rot_deg,new_helical_trans_z_angstrom,
new_ncs_score,new_ncs_cc), file=out)
# Optimize one more time trying multiples of values to get better param
imult=int(0.5+
0.33*max_z/params.reconstruction_symmetry.max_helical_ops_to_check)
working_ncs_cc=best_ncs_cc
working_ncs_obj=best_ncs_obj
working_score=None
if best_helical_rot_deg:
working_helical_rot_deg=best_helical_rot_deg*imult
working_helical_trans_z_angstrom=best_helical_trans_z_angstrom*imult
else:
working_helical_rot_deg=None
working_helical_trans_z_angstrom=None
imult=0
if imult > 1:
print("\nTrying %sx multiples of rot/trans" %(imult), file=out)
improved=False
for iter in [1,2,3]:
if iter > 1 and not improved: break
improved=False
new_ncs_obj,new_ncs_cc,new_ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg=\
try_helical_params(
params=params,
helical_rot_deg=working_helical_rot_deg,
helical_trans_z_angstrom=working_helical_trans_z_angstrom,
delta_z=delta_z/(2.*iter),
delta_rot=delta_rot/(2.*iter),
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=out)
if new_ncs_score is not None:
if working_score is None or new_ncs_score > working_score:
working_ncs_cc=new_ncs_cc
working_ncs_obj=new_ncs_obj
working_score=new_ncs_score
working_helical_trans_z_angstrom=new_helical_trans_z_angstrom
working_helical_rot_deg=new_helical_rot_deg
print(" %.2f %.2f %.2f %.2f (Scoring for multiple)" %(
working_helical_rot_deg,working_helical_trans_z_angstrom,
working_score,working_ncs_cc), file=out)
# and rescore with this:
working_helical_rot_deg=working_helical_rot_deg/imult
working_helical_trans_z_angstrom=working_helical_trans_z_angstrom/imult
for iter in [1,2,3]:
new_ncs_obj,new_ncs_cc,new_ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg=\
try_helical_params(
params=params,
helical_rot_deg=working_helical_rot_deg,
helical_trans_z_angstrom=working_helical_trans_z_angstrom,
delta_z=delta_z/(2.*iter),
delta_rot=delta_rot/(2.*iter),
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=null_out())
if new_ncs_score is not None:
working_ncs_obj,working_ncs_cc,working_ncs_score,\
working_helical_trans_z_angstrom,working_helical_rot_deg=\
new_ncs_obj,new_ncs_cc,new_ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg
if best_score is None or working_ncs_score > best_score:
if params.control.verbose:
print("\nTaking parameters from multiples", file=out)
best_ncs_cc=working_ncs_cc
best_ncs_obj=working_ncs_obj
best_score=working_ncs_score
best_helical_trans_z_angstrom=working_helical_trans_z_angstrom
best_helical_rot_deg=working_helical_rot_deg
print(" %.2f %.2f %.2f %.2f (improved by multiples)" %(
best_helical_rot_deg,best_helical_trans_z_angstrom,
best_score,best_ncs_cc), file=out)
improved=True
working_ncs_cc=best_ncs_cc
working_ncs_obj=best_ncs_obj
working_score=None
working_helical_trans_z_angstrom=best_helical_trans_z_angstrom*imult
working_helical_rot_deg=best_helical_rot_deg*imult
if best_helical_rot_deg and best_helical_trans_z_angstrom and best_score and best_ncs_cc:
# Check to make sure there is no overlap
print(" %.2f %.2f %.2f %.2f (Final)" %(
best_helical_rot_deg,best_helical_trans_z_angstrom,\
best_score,best_ncs_cc), file=out)
from mmtbx.ncs.ncs import get_helical_symmetry
ncs_object=get_helical_symmetry(
helical_rot_deg=best_helical_rot_deg,
helical_trans_z_angstrom=best_helical_trans_z_angstrom,
max_ops=params.reconstruction_symmetry.max_helical_ops_to_check)
else:
ncs_object=None
return ncs_object,best_helical_rot_deg,best_helical_trans_z_angstrom
def try_helical_params(
optimize=None,
best_score=None,
delta_z=None,
delta_rot=None,
helical_rot_deg=None,
helical_trans_z_angstrom=None,
params=None,
map_data=None,
map_symmetry_center=None,
symmetry_center=None,
crystal_symmetry=None,
out=sys.stdout):
if delta_z is None or delta_rot is None:
assert not optimize
local_params=deepcopy(params)
local_params.reconstruction_symmetry.min_ncs_cc=-100
local_params.reconstruction_symmetry.n_rescore=0
local_params.reconstruction_symmetry.symmetry='helical'
local_params.reconstruction_symmetry.helical_rot_deg=helical_rot_deg
local_params.reconstruction_symmetry.helical_trans_z_angstrom=\
helical_trans_z_angstrom
abc=crystal_symmetry.unit_cell().parameters()[:3]
max_z=max(abc[0],abc[1])
import math
if abs(helical_trans_z_angstrom)+abs(math.sin(
helical_rot_deg*(3.14159/180.))*max_z) < \
params.reconstruction_symmetry.smallest_object:
return None,None,None,\
None,None
best_helical_trans_z_angstrom,best_helical_rot_deg=\
helical_trans_z_angstrom,helical_rot_deg
best_ncs_score=best_score
best_ncs_obj=None
best_ncs_cc=None
test_ncs_obj,test_ncs_cc,test_ncs_score=get_ncs_from_map(params=local_params,
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=null_out())
if params.reconstruction_symmetry.score_basis=='cc':
test_ncs_score=test_ncs_cc
if best_ncs_score is None or test_ncs_score>best_ncs_score:
best_ncs_score=test_ncs_score
best_ncs_cc=test_ncs_cc
best_ncs_obj=test_ncs_obj
if optimize is None:
optimize=params.reconstruction_symmetry.max_helical_optimizations
# save in case we need to go back
working_helical_trans_z_angstrom,working_helical_rot_deg=\
helical_trans_z_angstrom,helical_rot_deg
working_ncs_score=best_ncs_score
working_ncs_cc=best_ncs_cc
working_ncs_obj=best_ncs_obj
if optimize and (best_score is None or best_ncs_score > best_score):
# try with few to many operators..
if params.control.verbose:
print("\nOptimizing by varying number of operators", file=out)
print("Starting score: %.2f" %(working_ncs_score), file=out)
for k in range(optimize):
local_params.reconstruction_symmetry.max_helical_ops_to_check=min(k+1,
params.reconstruction_symmetry.max_helical_ops_to_check)
best_score=None # start over for each number of operators
for i in [0,-1,1]:
for j in [0,-1,1]:
new_ncs_obj,new_ncs_cc,new_ncs_score,\
new_helical_trans_z_angstrom,new_helical_rot_deg=try_helical_params(
optimize=False,
helical_rot_deg=max(0.1,best_helical_rot_deg+i*delta_rot),
helical_trans_z_angstrom=max(0.01,best_helical_trans_z_angstrom+j*delta_z),
delta_z=delta_z,
delta_rot=delta_rot,
params=params,
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=out)
if new_ncs_score and new_ncs_score>0 and (
best_score is None or new_ncs_score>best_score):
if params.control.verbose:
print("Working score for %s ops: %.2f" %(
local_params.reconstruction_symmetry.max_helical_ops_to_check,
new_ncs_score), file=out)
best_score=new_ncs_score
best_ncs_score=new_ncs_score
best_helical_trans_z_angstrom=new_helical_trans_z_angstrom
best_helical_rot_deg=new_helical_rot_deg
best_ncs_obj=new_ncs_obj
best_ncs_cc=new_ncs_cc
delta_rot=delta_rot/2
delta_z=delta_z/2
#rescore with what we now have (best values) and compare with working
local_params.reconstruction_symmetry.max_helical_ops_to_check=\
params.reconstruction_symmetry.max_helical_ops_to_check
if params.control.verbose:
print("Rescoring with original number of operators (%s)" %(
local_params.reconstruction_symmetry.max_helical_ops_to_check), file=out)
local_params.reconstruction_symmetry.helical_rot_deg=best_helical_rot_deg
local_params.reconstruction_symmetry.helical_trans_z_angstrom=\
best_helical_trans_z_angstrom
best_ncs_obj,best_ncs_cc,best_ncs_score=get_ncs_from_map(
params=local_params,
map_data=map_data,
map_symmetry_center=symmetry_center,
symmetry_center=symmetry_center,
crystal_symmetry=crystal_symmetry,
out=null_out())
if params.reconstruction_symmetry.score_basis=='cc':
best_ncs_score=best_ncs_cc
# now take it if better
if best_ncs_cc and best_ncs_cc>working_ncs_cc:
if params.control.verbose:
print("Using optimized values (%.2f > %.2f)" %(
best_ncs_cc,working_ncs_cc), file=out)
# keep these (best)
else:
if params.control.verbose:
print("Rejecting optimized values (%.2f <= %.2f)" %(
best_ncs_cc,working_ncs_cc), file=out)
# resture working values
best_helical_trans_z_angstrom,best_helical_rot_deg=\
working_helical_trans_z_angstrom,working_helical_rot_deg
best_ncs_score=working_ncs_score
best_ncs_obj=working_ncs_obj
best_ncs_cc=working_ncs_cc
if params.reconstruction_symmetry.score_basis=='cc':
best_ncs_score=best_ncs_cc
return best_ncs_obj,best_ncs_cc,best_ncs_score,\
best_helical_trans_z_angstrom,best_helical_rot_deg
def get_d_and_value_list(c_star_list):
d_list=[]
from scitbx.array_family import flex
value_list=flex.double()
for hkl,value,d in c_star_list:
d_list.append(d)
value_list.append(value)
max_value=value_list.min_max_mean().max
if value_list.size()>3:
max_value=value_list[2]
new_d_list=[]
new_value_list=[]
for d,value in zip(d_list,value_list):
if value < max_value/1000: # reject those that are really zero
continue
new_d_list.append(d)
new_value_list.append(value)
return new_d_list,new_value_list
def get_helical_trans_z_angstrom(params=None,
c_star_list=None,crystal_symmetry=None,
minimum_ratio=2.,
max_first_peak=1,out=sys.stdout):
# Find highest-resolution peak along c*...guess it is n*z_translation
# where n is small
max_z=flex.double(crystal_symmetry.unit_cell().parameters()[:3]).min_max_mean().max
if params.control.verbose:
print("Values along c*: ", file=out)
d_list,value_list=get_d_and_value_list(c_star_list)
for d,value in zip(d_list,value_list):
if params.control.verbose:
print(" %.2f A : %.2f " %(d,value), file=out)
d_list,value_list=get_max_min_list(
d_list=d_list,value_list=value_list,minimum_ratio=1.0)
d_list,value_list=get_max_min_list(
d_list=d_list,value_list=value_list,minimum_ratio=2.0,
maximum_only=True)
sort_list=[]
for d,value in zip(d_list,value_list):
sort_list.append([value,d])
sort_list.sort()
sort_list.reverse()
likely_z_translations=[]
dis_min=params.crystal_info.resolution/4
for value,d in sort_list:
likely_z_translations_local=[]
for i in range(1,max_first_peak+1):
z=d/i
if z > max_z: continue
if z < dis_min: continue # no way
if is_close_to_any(z=z,others=likely_z_translations,
dis_min=dis_min): continue
likely_z_translations_local.append(z)
likely_z_translations.append(z)
print("\nMaximal values along c* and likely z-translations: ", file=out)
for z in likely_z_translations:
print(" %.2f A " %(z), end=' ', file=out)
print(file=out)
return likely_z_translations
def small_deltas(z_translations,dis_min=None):
delta_list=[]
for z,z1 in zip(z_translations,z_translations[1:]):
delta=abs(z-z1)
if not is_close_to_any(z=delta,others=z_translations+delta_list,
dis_min=dis_min):
delta_list.append(abs(delta))
return delta_list
def is_close_to_any(z=None,others=None,
dis_min=None):
for x in others:
if abs(x-z)<dis_min:
return True
return False
def get_max_min_list(d_list=None,value_list=None,
minimum_ratio=None,maximum_only=False):
max_min_list=[]
max_min_d_list=[]
for value_prev,d_prev, \
value,d, \
value_next,d_next in zip(
value_list+[0,0],
d_list+[0,0],
[0]+value_list+[0],
[0]+d_list+[0],
[0,0]+value_list,
[0,0]+d_list):
if d and ( value >= value_prev *minimum_ratio) and (
value >= value_next*minimum_ratio): # local max
max_min_list.append(value)
max_min_d_list.append(d)
if (not maximum_only) and d and ( value <= value_prev ) and (
value <= value_next): # local min
max_min_list.append(value)
max_min_d_list.append(d)
return max_min_d_list,max_min_list
def get_c_star_list(f_array=None,
zero_index_a=0,zero_index_b=1,zero_index_c=2):
c_star_list=[]
for value,(indices,d) in zip(f_array.data(),
f_array.d_spacings()):
if indices[zero_index_a]==0 and indices[zero_index_b]==0 and \
indices[zero_index_c] >=4:
c_star_list.append([tuple(indices),value,d])
c_star_list.sort()
return c_star_list
def get_params_from_args(args):
command_line = iotbx.phil.process_command_line_with_files(
map_file_def="input_files.map_file",
seq_file_def="input_files.seq_file",
pdb_file_def="input_files.pdb_in",
ncs_file_def="input_files.ncs_file",
args=args,
master_phil=master_phil)
return command_line.work.extract()
def get_mask_around_molecule(map_data=None,
wang_radius=None,
buffer_radius=None,
return_masked_fraction=False,
minimum_fraction_of_max=0.01,
solvent_content=None,
solvent_content_iterations=None,
crystal_symmetry=None, out=sys.stdout):
# use iterated solvent fraction tool to identify mask around molecule
try:
from phenix.autosol.map_to_model import iterated_solvent_fraction
solvent_fraction,mask=iterated_solvent_fraction(
crystal_symmetry=crystal_symmetry,
wang_radius=wang_radius,
solvent_content=solvent_content,
solvent_content_iterations=solvent_content_iterations,
map_as_double=map_data,
out=out)
except Exception as e:
print("No mask obtained...", file=out)
return None,None
if not mask:
print("No mask obtained...", file=out)
return None,None
# Now expand the mask to increase molecular region
expand_size=estimate_expand_size(
crystal_symmetry=crystal_symmetry,
map_data=map_data,
expand_target=buffer_radius,
out=out)
print("Target mask expand size is %d based on buffer_radius of %7.1f A" %(
expand_size,buffer_radius), file=out)
co,sorted_by_volume,min_b,max_b=get_co(map_data=mask,
threshold=0.5,wrapping=False)
masked_fraction=sorted_by_volume[1][0]/mask.size()
bool_region_mask = co.expand_mask(id_to_expand=sorted_by_volume[1][1],
expand_size=expand_size)
s=(bool_region_mask==True)
expanded_fraction=s.count(True)/s.size()
print("\nLargest masked region before buffering: %7.2f" %(masked_fraction),
file=out)
print("\nLargest masked region after buffering: %7.2f" %(expanded_fraction),
file=out)
if solvent_content:
delta_as_is=abs(solvent_content- (1-masked_fraction))
delta_expanded=abs(solvent_content- (1-expanded_fraction))
if delta_expanded > delta_as_is:
# already there
expand_size=0
print ("Setting expand size to zero as masked fraction already ",
"close to solvent_content",file=out)
s=None
minimum_size=sorted_by_volume[1][0] * minimum_fraction_of_max
if expand_size==0:
result=co.result()
else:
result=None
for v1,i1 in sorted_by_volume[1:]:
if v1 < minimum_size: break
if expand_size > 0:
bool_region_mask = co.expand_mask(
id_to_expand=i1, expand_size=expand_size)
else:
bool_region_mask=(result==i1)
if s is None:
s = (bool_region_mask==True)
else:
s |= (bool_region_mask==True)
mask.set_selected(s,1)
mask.set_selected(~s,0)
masked_fraction=mask.count(1)/mask.size()
print("Masked fraction after buffering: %7.2f" %(masked_fraction), file=out)
if return_masked_fraction:
return mask.as_double(),1-masked_fraction
else: # usual return solvent fraction estimate
return mask.as_double(),solvent_fraction # This is solvent fraction est.
def get_mean_in_and_out(sel=None,
map_data=None,
verbose=False,
out=sys.stdout):
mean_value_in,fraction_in=get_mean_in_or_out(sel=sel,
map_data=map_data,
out=out)
mean_value_out,fraction_out=get_mean_in_or_out(sel= ~sel,
map_data=map_data,
out=out)
if mean_value_out is None:
mean_value_out=mean_value_in
if mean_value_in is None:
mean_value_in=mean_value_out
if verbose:
print("\nMean inside mask: %7.2f Outside mask: %7.2f Fraction in: %7.2f" %(
mean_value_in,mean_value_out,fraction_in), file=out)
return mean_value_in,mean_value_out,fraction_in
def get_mean_in_or_out(sel=None,
map_data=None,
out=sys.stdout):
masked_map=map_data.deep_copy()
masked_map.set_selected(~sel,0)
mean_after_zeroing_in_or_out=masked_map.as_1d().min_max_mean().mean
masked_map.set_selected(sel,1)
fraction_in_or_out=masked_map.as_1d().min_max_mean().mean
if fraction_in_or_out >1.e-10:
mean_value=mean_after_zeroing_in_or_out/fraction_in_or_out
else:
mean_value=None
return mean_value,fraction_in_or_out
def apply_soft_mask(map_data=None,
mask_data=None,
rad_smooth=None,
crystal_symmetry=None,
set_outside_to_mean_inside=False,
set_mean_to_zero=False,
threshold=0.5,
verbose=False,
out=sys.stdout):
# apply a soft mask based on mask_data to map_data.
# set value outside mask==mean value inside mask or mean value outside mask
s = mask_data > threshold # s marks inside mask
# get mean inside or outside mask
if verbose:
print("\nStarting map values inside and outside mask:", file=out)
mean_value_in,mean_value_out,fraction_in=get_mean_in_and_out(sel=s,
verbose=verbose,map_data=map_data, out=out)
if verbose:
print("\nMask inside and outside values", file=out)
mask_mean_value_in,mask_mean_value_out,mask_fraction_in=get_mean_in_and_out(
sel=s,map_data=mask_data, verbose=verbose,out=out)
# Smooth the mask in place. First make it a binary mask
mask_data = mask_data.set_selected(~s, 0) # outside mask==0
mask_data = mask_data.set_selected( s, 1)
if mask_data.count(1) and mask_data.count(0): # something to do
if verbose:
print("Smoothing mask...", file=out)
maptbx.unpad_in_place(map=mask_data)
mask_data = maptbx.smooth_map(
map = mask_data,
crystal_symmetry = crystal_symmetry,
rad_smooth = rad_smooth)
# Make sure that mask_data max value is now 1, scale if not
max_mask_data_value=mask_data.as_1d().min_max_mean().max
if max_mask_data_value > 1.e-30 and max_mask_data_value!=1.0:
mask_data=mask_data*(1./max_mask_data_value)
if verbose:
print("Scaling mask by %.2f to yield maximum of 1.0 " %(
1./max_mask_data_value), file=out)
else:
if verbose:
print("Not smoothing mask that is a constant...", file=out)
if verbose:
print("\nSmoothed mask inside and outside values", file=out)
smoothed_mean_value_in,smoothed_mean_value_out,smoothed_fraction_in=\
get_mean_in_and_out(sel=s,map_data=mask_data, verbose=verbose,out=out)
# Now replace value outside mask with mean_value, value inside with current,
# smoothly going from one to the other based on mask_data
# set_to_mean will be a constant map with value equal to inside or outside
if set_outside_to_mean_inside or mean_value_out is None:
target_value_for_outside=mean_value_in
if verbose:
print("Setting value outside mask to mean inside (%.2f)" %(
target_value_for_outside), file=out)
else:
target_value_for_outside=mean_value_out
if verbose:
print("Setting value outside mask to mean outside (%.2f)" %(
target_value_for_outside), file=out)
set_to_mean=mask_data.deep_copy()
ss = set_to_mean > -1.e+30 # select everything
set_to_mean.set_selected(ss, target_value_for_outside)
masked_map= (map_data * mask_data ) + (set_to_mean * (1-mask_data))
if set_mean_to_zero: # remove average
masked_map=masked_map - masked_map.as_1d().min_max_mean().mean
if verbose:
print("\nFinal mean value inside and outside mask:", file=out)
mean_value_in,mean_value_out,fraction_in=get_mean_in_and_out(sel=s,
map_data=masked_map, verbose=verbose,out=out)
return masked_map,mask_data
def estimate_expand_size(
crystal_symmetry=None,
map_data=None,
expand_target=None,
out=sys.stdout):
abc = crystal_symmetry.unit_cell().parameters()[:3]
N_ = map_data.all()
nn=0.
for i in range(3):
delta=abc[i]/N_[i]
nn+=expand_target/delta
nn=max(1,int(0.5+nn/3.))
print("Expand size (grid units): %d (about %4.1f A) " %(
nn,nn*abc[0]/N_[0]), file=out)
return max(1,nn)
def get_max_z_range_for_helical_symmetry(params,out=sys.stdout):
if not params.input_files.ncs_file: return
ncs_obj,dummy_tracking_data=get_ncs(params=params,out=out)
if not ncs_obj.is_helical_along_z(): return
if params.map_modification.restrict_z_distance_for_helical_symmetry: #take it
return params.map_modification.restrict_z_distance_for_helical_symmetry
if not params.map_modification.restrict_z_turns_for_helical_symmetry: return
print("Identifying maximum z-range for helical symmetry", file=out)
print("Maximum of %7.1f turns up and down in Z allowed..." %(
params.map_modification.restrict_z_turns_for_helical_symmetry), file=out)
r,t=ncs_obj.ncs_groups()[0].helix_rt_forwards()
cost=r[0]
sint=r[1]
import math
theta=abs(180.*math.atan2(sint,cost)/3.14159)
trans=abs(t)
pitch=trans*360./max(0.1,theta)
max_z=params.map_modification.restrict_z_turns_for_helical_symmetry*pitch
print("Z-values restricted to +/- %7.1f A" %(max_z), file=out)
print("\nRunning map-box once to get position of molecule, again to"+\
" apply\n Z restriction\n", file=out)
return max_z
def dist(x,y):
dd=0.
for a,b in zip(x,y):
dd+=(a-b)**2
return dd**0.5
def get_ncs_closest_sites(
closest_sites=None,
sites_cart=None,
used_ncs_id_list=None,
box_ncs_object=None,
box_crystal_symmetry=None,
out=sys.stdout):
# try to find NCS ops mapping sites_cart close to closest_sites
best_id=None
best_rms=None
best_sites=closest_sites.deep_copy()
for ncs_id in range(box_ncs_object.max_operators()):
if ncs_id in used_ncs_id_list: continue
test_sites=closest_sites.deep_copy()
ncs_sites_cart=get_ncs_sites_cart(sites_cart=sites_cart,
ncs_obj=box_ncs_object,unit_cell=box_crystal_symmetry.unit_cell(),
ncs_id=ncs_id,
ncs_in_cell_only=False)
test_sites.extend(ncs_sites_cart)
rms=radius_of_gyration_of_vector(test_sites)
if best_rms is None or rms < best_rms:
best_rms=rms
best_ncs_id=ncs_id
best_sites=test_sites.deep_copy()
used_ncs_id_list.append(best_ncs_id)
return best_sites,used_ncs_id_list
def get_closest_sites(
high_points=None,
sites_cart=None,
box_ncs_object=None,
box_crystal_symmetry=None,
out=sys.stdout):
if not box_ncs_object.is_point_group_symmetry() and not \
box_ncs_object.is_helical_along_z():
# extract point_group symmetry if present and box_ncs_object doesn't have it
print("Trying to extract point-group symmetry from box_ncs_object "+\
"with %d ops" %( box_ncs_object.max_operators()), file=out)
ncs_object=box_ncs_object.deep_copy(extract_point_group_symmetry=True)
if ncs_object:
print("New number of operators satisfying point-group symmetry: %d" %(
ncs_object.max_operators()), file=out)
box_ncs_object=ncs_object
else:
print("No point-group symmetry found", file=out)
ncs_copies=box_ncs_object.max_operators()
closest_sites=high_points
from scitbx.matrix import col
for id in range(sites_cart.size()):
local_sites_cart=sites_cart[id:id+1]
local_sites_cart.extend(get_ncs_sites_cart(sites_cart=local_sites_cart,
ncs_obj=box_ncs_object,unit_cell=box_crystal_symmetry.unit_cell(),
ncs_in_cell_only=True))
if local_sites_cart.size() <ncs_copies: continue # some were out of range
xx=col((0.,0.,0.,))
for site in closest_sites:
xx+=col(site)
xx=xx/max(1,closest_sites.size())
target=flex.vec3_double()
target.append(xx)
dd,id1,id2=target.min_distance_between_any_pair_with_id(
local_sites_cart)
best_points=local_sites_cart[id2:id2+1]
closest_sites.extend(best_points)
return closest_sites[1:]
def get_range(sites=None,unit_cell=None,map_data=None,
boundary_tolerance=None,out=sys.stdout):
x_values=flex.double()
y_values=flex.double()
z_values=flex.double()
for site_cart in sites:
(x,y,z)=tuple(site_cart)
x_values.append(x)
y_values.append(y)
z_values.append(z)
x_min_max_mean=x_values.min_max_mean()
x_min=x_min_max_mean.min
x_max=x_min_max_mean.max
y_min_max_mean=y_values.min_max_mean()
y_min=y_min_max_mean.min
y_max=y_min_max_mean.max
z_min_max_mean=z_values.min_max_mean()
z_min=z_min_max_mean.min
z_max=z_min_max_mean.max
print("\nRange for box:", file=out)
print(" X Y Z", file=out)
print(" LOW: %7.1f %7.1f %7.1f " %(tuple([x_min,y_min,z_min])), file=out)
print(" HIGH: %7.1f %7.1f %7.1f \n" %(tuple([x_max,y_max,z_max])), file=out)
# move to 0, 1 if near ends
if x_min<=boundary_tolerance: x_min=0.
if y_min<=boundary_tolerance: y_min=0.
if z_min<=boundary_tolerance: z_min=0.
a,b,c,al,bet,gam=unit_cell.parameters()
if x_min>=a-boundary_tolerance: x_min=a
if y_min>=b-boundary_tolerance: y_min=b
if z_min>=c-boundary_tolerance: z_min=c
print("\nAdjusted range for box:", file=out)
print(" X Y Z", file=out)
print(" LOW: %7.1f %7.1f %7.1f " %(tuple([x_min,y_min,z_min])), file=out)
print(" HIGH: %7.1f %7.1f %7.1f \n" %(tuple([x_max,y_max,z_max])), file=out)
nx,ny,nz=map_data.all()
# convert to grid units
i_min=max(0,min(nx,int(0.5+nx*x_min/a)))
j_min=max(0,min(ny,int(0.5+ny*y_min/b)))
k_min=max(0,min(nz,int(0.5+nz*z_min/c)))
i_max=max(0,min(nx,int(0.5+nx*x_max/a)))
j_max=max(0,min(ny,int(0.5+ny*y_max/b)))
k_max=max(0,min(nz,int(0.5+nz*z_max/c)))
lower_bounds=[i_min,j_min,k_min]
upper_bounds=[i_max,j_max,k_max]
print("\nGrid bounds for box:", file=out)
print(" X Y Z", file=out)
print(" LOW: %7d %7d %7d " %(tuple([i_min,j_min,k_min])), file=out)
print(" HIGH: %7d %7d %7d \n" %(tuple([i_max,j_max,k_max])), file=out)
return lower_bounds,upper_bounds
def get_bounds_for_au_box(params,
box=None,out=sys.stdout):
# Try to get bounds for a box that include one au
if not box.ncs_object or box.ncs_object.max_operators()<2:
return None,None,None
box_ncs_object=box.ncs_object
box_map_data=box.map_box
box_crystal_symmetry=box.box_crystal_symmetry
random_points=10*params.reconstruction_symmetry.random_points
sites_cart=get_points_in_map(box_map_data,n=random_points,
minimum_fraction_of_max=params.segmentation.density_select_threshold,
random_xyz=params.crystal_info.resolution*2.,
crystal_symmetry=box_crystal_symmetry)
assert sites_cart.size() >0
# apply symmetry to sites_orth and see where the molecule is
ncs_sites_cart=get_ncs_sites_cart(sites_cart=sites_cart,
ncs_obj=box_ncs_object,unit_cell=box_crystal_symmetry.unit_cell(),
ncs_in_cell_only=True)
# generate this in a lower-memory way XXX
low_res_map_data=get_low_res_map_data(sites_cart=ncs_sites_cart,
d_min=params.crystal_info.resolution*7.,
crystal_symmetry=box_crystal_symmetry,
out=out)
high_points=get_high_points_from_map( # actually returns just one.
map_data=low_res_map_data,
unit_cell=box_crystal_symmetry.unit_cell(),out=out)
from scitbx.matrix import col
high_points=high_points[0:1]
cutout_center=col(high_points[0])
print("Center of box will be near (%7.1f,%7.1f,%7.1f)" %(
tuple(cutout_center)))
# now figure out box that contains at least one copy of each ncs-related
# point.
# Find closest ncs-related points for each unique random point to this
# center. Starting box is the box that contains all of these.
closest_sites=get_closest_sites(
high_points=high_points,
sites_cart=sites_cart,
box_ncs_object=box_ncs_object,
box_crystal_symmetry=box_crystal_symmetry,
out=out)
if not closest_sites or closest_sites.size()<1:
print("\nNo sites representing au of map found...skipping au box\n", file=out)
return None,None,None
print("\nTotal of %d sites representing 1 au found" %(
closest_sites.size()), file=out)
# write out closest_sites to match original position
coordinate_offset=-1*matrix.col(box.shift_cart)
write_atoms(file_name='one_au.pdb',
crystal_symmetry=box_crystal_symmetry,sites=closest_sites+coordinate_offset)
unique_closest_sites=closest_sites.deep_copy()
# Now if desired, find NCS-related groups of sites
if params.segmentation.n_au_box >1:
print("\nFinding up to %d related au" %(params.segmentation.n_au_box), file=out)
print("Starting RMSD of sites: %7.1f A " %(
radius_of_gyration_of_vector(closest_sites)), file=out)
sites_orig=closest_sites.deep_copy()
used_ncs_id_list=[box_ncs_object.ncs_groups()[0].identity_op_id()]
for i in range(params.segmentation.n_au_box-1):
closest_sites,used_ncs_id_list=get_ncs_closest_sites(
used_ncs_id_list=used_ncs_id_list,
closest_sites=closest_sites,
sites_cart=sites_orig,
box_ncs_object=box_ncs_object,
box_crystal_symmetry=box_crystal_symmetry,
out=out)
print("\nNew total of %d sites representing %d au found" %(
closest_sites.size(),params.segmentation.n_au_box), file=out)
print("New rmsd: %7.1f A " %(
radius_of_gyration_of_vector(closest_sites)), file=out)
lower_bounds,upper_bounds=get_range(
sites=closest_sites,map_data=box_map_data,
boundary_tolerance=params.crystal_info.resolution,
unit_cell=box_crystal_symmetry.unit_cell(),
out=out)
return lower_bounds,upper_bounds,unique_closest_sites+coordinate_offset
def get_low_res_map_data(sites_cart=None,
crystal_symmetry=None,
d_min=None,
out=sys.stdout):
from cctbx import xray
xrs,scatterers=set_up_xrs(crystal_symmetry=crystal_symmetry)
unit_cell=crystal_symmetry.unit_cell()
sites_fract=unit_cell.fractionalize(sites_cart)
for xyz_fract in sites_fract:
scatterers.append( xray.scatterer(scattering_type="H", label="H",
site=xyz_fract, u=0, occupancy=1.0))
xrs = xray.structure(xrs, scatterers=scatterers)
# generate f_array to d_min with xrs
f_array= xrs.structure_factors(d_min=d_min, anomalous_flag=False).f_calc()
weight_f_array=f_array.structure_factors_from_scatterers(
algorithm = 'direct',
xray_structure = xrs).f_calc()
return get_map_from_map_coeffs(map_coeffs=weight_f_array,
crystal_symmetry=crystal_symmetry)
def get_bounds_for_helical_symmetry(params,
box=None,crystal_symmetry=None):
original_cell=box.map_data.all()
new_cell=box.map_box.all()
z_first=box.gridding_first[2]
z_last=box.gridding_last[2]
assert z_last>=z_first
z_middle=(z_first+z_last)//2
delta_z=crystal_symmetry.unit_cell().parameters()[5]/box.map_data.all()[2]
n_z_max= int(0.5+
params.map_modification.restrict_z_distance_for_helical_symmetry/delta_z)
new_z_first=max(z_first,z_middle-n_z_max)
new_z_last=min(z_last,z_middle+n_z_max)
lower_bounds=deepcopy(box.gridding_first)
upper_bounds=deepcopy(box.gridding_last)
lower_bounds[2]=new_z_first
upper_bounds[2]=new_z_last
return lower_bounds,upper_bounds
def check_memory(map_data,ratio_needed,maximum_fraction_to_use=0.90,
maximum_map_size=1,
out=sys.stdout):
map_size=map_data.size()/(1024*1024*1024)
if maximum_map_size and map_size>maximum_map_size:
raise Sorry("Maximum map size for this tool is %s GB" %(maximum_map_size))
needed_memory=ratio_needed*map_size
from libtbx.utils import guess_total_memory # returns total memory
bytes_total_memory=guess_total_memory()
if bytes_total_memory:
total_memory=bytes_total_memory/(1024*1024*1024)
else:
total_memory=None
print("\nMap size is " +\
"%.2f GB. This will require about %.1f GB of memory" %(
map_size,needed_memory) +"\nfor this stage of analysis\n", file=out)
if total_memory:
print("Total memory on this computer is about %.1f GB." %(
total_memory), file=out)
if (needed_memory>= 0.5* total_memory):
print("\n ** WARNING: It is possible that this computer may not"+\
" have **\n *** sufficient memory to complete this job. ***\n", file=out)
if (needed_memory >= maximum_fraction_to_use*total_memory):
raise Sorry("This computer does not have sufficient "+
"memory (%.0f GB needed) \nto run this job" %(needed_memory))
def get_params(args,map_data=None,crystal_symmetry=None,
half_map_data_list=None,
sharpening_target_pdb_inp=None,
ncs_object=None,
sequence=None,
out=sys.stdout):
params=get_params_from_args(args)
print("\nSegment_and_split_map\n", file=out)
print("Command used: %s\n" %(
" ".join(['segment_and_split_map']+args)), file=out)
master_params.format(python_object=params).show(out=out)
# Set space-group defaults
if params.crystal_info.use_sg_symmetry:
if params.map_modification.restrict_map_size is None:
params.map_modification.restrict_map_size=False
if params.crystal_info.is_crystal is None:
params.crystal_info.is_crystal=True
else:
if params.map_modification.restrict_map_size is None:
params.map_modification.restrict_map_size=True
if params.crystal_info.is_crystal is None:
params.crystal_info.is_crystal=False
# Turn off files if desired
if params.control.write_files is False:
params.output_files.magnification_map_file=None
params.output_files.magnification_map_file = None
params.output_files.magnification_ncs_file = None
params.output_files.shifted_map_file = None
params.output_files.shifted_sharpened_map_file = None
params.output_files.sharpened_map_file = None
params.output_files.shifted_pdb_file = None
params.output_files.shifted_ncs_file = None
params.output_files.shifted_used_ncs_file = None
params.output_files.box_map_file = None
params.output_files.box_mask_file = None
params.output_files.write_output_maps = False
params.output_files.remainder_map_file = None
params.output_files.output_info_file = None
params.output_files.restored_pdb = None
params.output_files.output_weight_map_pickle_file = None
from cctbx.maptbx.auto_sharpen import set_sharpen_params
params=set_sharpen_params(params,out)
if params.input_files.seq_file and not params.crystal_info.sequence and \
not sequence:
if not params.crystal_info.sequence:
if sequence:
params.crystal_info.sequence=sequence
else:
params.crystal_info.sequence=open(params.input_files.seq_file).read()
print("Read sequence from %s" %(params.input_files.seq_file), file=out)
if not params.crystal_info.resolution and (
params.map_modification.b_iso is not None or \
params.map_modification.auto_sharpen
or params.map_modification.resolution_dependent_b or
params.map_modification.b_sharpen):
raise Sorry("Need resolution for segment_and_split_map with sharpening")
if params.map_modification.auto_sharpen and (
params.map_modification.b_iso is not None or
params.map_modification.b_sharpen is not None or
params.map_modification.resolution_dependent_b is not None):
print("Turning off auto_sharpen as it is not compatible with "+\
"b_iso, \nb_sharpen, or resolution_dependent_b", file=out)
params.map_modification.auto_sharpen=False
if params.control.write_files and \
params.output_files.output_directory and \
(not os.path.isdir(params.output_files.output_directory)):
os.mkdir(params.output_files.output_directory)
if not params.output_files.output_directory:
params.output_files.output_directory=""
# Test to see if we can use adjusted_sa as target and use box_map with it
if (params.map_modification.residual_target=='adjusted_sa' or
params.map_modification.sharpening_target=='adjusted_sa') and \
(params.map_modification.box_in_auto_sharpen or
params.map_modification.density_select_in_auto_sharpen):
print("Checking to make sure we can use adjusted_sa as target...", end=' ', file=out)
try:
from phenix.autosol.map_to_model import iterated_solvent_fraction
except Exception as e:
raise Sorry("Please either set box_in_auto_sharpen=False and "+
"\ndensity_select_in_auto_sharpen=False or \n"+\
"set residual_target=kurtosis and sharpening_target=kurtosis")
print("OK", file=out)
if not half_map_data_list: half_map_data_list=[]
if params.input_files.info_file:
map_data=None
pdb_hierarchy=None
from libtbx import easy_pickle
print("Loading tracking data from %s" %(
params.input_files.info_file), file=out)
tracking_data=easy_pickle.load(params.input_files.info_file)
return params,map_data,half_map_data_list,pdb_hierarchy,tracking_data,None
else:
tracking_data=info_object()
tracking_data.set_params(params)
# PDB file
if params.input_files.pdb_file:
print("\nInput PDB file to be used to identify region to work with: %s\n" %(
params.input_files.pdb_file), file=out)
pdb_inp = iotbx.pdb.input(file_name=params.input_files.pdb_file)
pdb_hierarchy = pdb_inp.construct_hierarchy()
pdb_atoms = pdb_hierarchy.atoms()
pdb_atoms.reset_i_seq()
tracking_data.set_input_pdb_info(file_name=params.input_files.pdb_file,
n_residues=pdb_hierarchy.overall_counts().n_residues)
else:
pdb_hierarchy=None
if map_data:
pass # ok
elif params.input_files.map_file:
from iotbx import mrcfile
ccp4_map=iotbx.mrcfile.map_reader(
file_name=params.input_files.map_file)
if not crystal_symmetry:
crystal_symmetry=ccp4_map.crystal_symmetry() # 2018-07-18
tracking_data.set_full_crystal_symmetry(
crystal.symmetry(ccp4_map.unit_cell().parameters(),
ccp4_map.space_group_number))
tracking_data.set_full_unit_cell_grid(ccp4_map.unit_cell_grid)
map_data=ccp4_map.data.as_double()
else:
raise Sorry("Need ccp4 map")
if not crystal_symmetry:
raise Sorry("Need crystal_symmetry")
if params.input_files.half_map_file:
if len(params.input_files.half_map_file) != 2:
raise Sorry("Please supply none or two half_map_file values")
from iotbx import mrcfile
half_map_data_list=[]
half_map_data_list.append(iotbx.mrcfile.map_reader(
file_name=params.input_files.half_map_file[0]).data.as_double())
half_map_data_list.append(iotbx.mrcfile.map_reader(
file_name=params.input_files.half_map_file[1]).data.as_double())
# Get the NCS object
ncs_obj,dummy_tracking_data=get_ncs(params=params,
ncs_object=ncs_object,out=out)
if (not params.map_modification.auto_sharpen or
params.map_modification.b_iso is not None) and (
not params.crystal_info.molecular_mass and
not params.crystal_info.solvent_content and
not params.input_files.seq_file and not params.crystal_info.sequence and
not sequence):
params.crystal_info.solvent_content=get_iterated_solvent_fraction(
crystal_symmetry=crystal_symmetry,
verbose=params.control.verbose,
resolve_size=params.control.resolve_size,
mask_padding_fraction=\
params.segmentation.mask_padding_fraction,
fraction_of_max_mask_threshold=\
params.segmentation.fraction_of_max_mask_threshold,
cell_cutoff_for_solvent_from_mask=\
params.segmentation.cell_cutoff_for_solvent_from_mask,
mask_resolution=params.crystal_info.resolution,
map=map_data,
out=out)
if params.crystal_info.solvent_content:
print("Estimated solvent content: %.2f" %(
params.crystal_info.solvent_content), file=out)
else:
raise Sorry("Unable to estimate solvent content...please supply "+
"solvent_content \nor molecular_mass")
if params.map_modification.auto_sharpen or \
params.map_modification.b_iso is not None or \
params.map_modification.b_sharpen is not None or \
params.map_modification.resolution_dependent_b is not None:
# Sharpen the map
print("Auto-sharpening map before using it", file=out)
local_params=deepcopy(params)
if tracking_data.solvent_fraction: # XXX was previously always done but may not have been set
local_params.crystal_info.solvent_content=tracking_data.solvent_fraction
from cctbx.maptbx.auto_sharpen import run as auto_sharpen
acc=map_data.accessor()
map_data,new_map_coeffs,new_crystal_symmetry,new_si=auto_sharpen(
args=[],params=local_params,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
write_output_files=False,
pdb_inp=sharpening_target_pdb_inp,
ncs_obj=ncs_obj,
return_map_data_only=False,
return_unshifted_map=True,
half_map_data_list=half_map_data_list,
n_residues=tracking_data.n_residues,
ncs_copies=ncs_obj.max_operators(),
out=out)
tracking_data.b_sharpen=new_si.b_sharpen
if not tracking_data.solvent_fraction:
tracking_data.solvent_fraction=new_si.solvent_fraction
if tracking_data.params.output_files.sharpened_map_file:
sharpened_map_file=os.path.join(
tracking_data.params.output_files.output_directory,
tracking_data.params.output_files.sharpened_map_file)
sharpened_map_data=map_data.deep_copy()
if acc is not None: # offset the map to match original if possible
sharpened_map_data.reshape(acc)
print("Gridding of sharpened map:", file=out)
print("Origin: ",sharpened_map_data.origin(), file=out)
print("All: ",sharpened_map_data.all(), file=out)
print("\nWrote sharpened map in original location with "+\
"origin at %s\nto %s" %(
str(sharpened_map_data.origin()),sharpened_map_file), file=out)
# NOTE: original unit cell and grid
write_ccp4_map(tracking_data.full_crystal_symmetry,
sharpened_map_file,sharpened_map_data,
output_unit_cell_grid=tracking_data.full_unit_cell_grid,)
params.input_files.map_file=sharpened_map_file # overwrite map_file name here
# done with any sharpening
params.map_modification.auto_sharpen=False# so we don't do it again later
params.map_modification.b_iso=None
params.map_modification.b_sharpen=None
params.map_modification.resolution_dependent_b=None
if params.control.sharpen_only:
print("Stopping after sharpening", file=out)
return
# check on size right away
if params.control.memory_check:
# map_box and mask generation use about 50GB of memory for
# map with 1 billion elements
check_memory(map_data=map_data,maximum_map_size=None,
ratio_needed=50,out=out)
if params.map_modification.magnification and \
params.map_modification.magnification!=1.0:
print("\nAdjusting magnification by %7.3f\n" %(
params.map_modification.magnification), file=out)
if ncs_obj:
# Magnify ncs
print("NCS before applying magnification...", file=out)
ncs_obj.format_all_for_group_specification(out=out)
ncs_obj=ncs_obj.adjust_magnification(
magnification=params.map_modification.magnification)
if params.output_files.magnification_ncs_file:
file_name=os.path.join(params.output_files.output_directory,
params.output_files.magnification_ncs_file)
print("Writing NCS after magnification of %7.3f to %s" %(
params.map_modification.magnification,file_name), file=out)
ncs_obj.format_all_for_group_specification(out=out)
ncs_obj.format_all_for_group_specification(file_name=file_name)
params.input_files.ncs_file=file_name
else:
raise Sorry("Need magnification_ncs_file defined if magnification is"+
" applied \nto input NCS file")
# Magnify map
shrunk_uc = []
for i in range(3):
shrunk_uc.append(
crystal_symmetry.unit_cell().parameters()[i] *
params.map_modification.magnification )
uc_params=crystal_symmetry.unit_cell().parameters()
from cctbx import uctbx
new_unit_cell=uctbx.unit_cell(
parameters=(shrunk_uc[0],shrunk_uc[1],shrunk_uc[2],
uc_params[3],uc_params[4],uc_params[5]))
print("Original unit cell: (%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f)" %(
crystal_symmetry.unit_cell().parameters()), file=out)
crystal_symmetry=crystal.symmetry(
unit_cell=new_unit_cell,
space_group=crystal_symmetry.space_group())
print("New unit cell: (%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f)" %(
crystal_symmetry.unit_cell().parameters()), file=out)
# magnify original unit cell too..
cell=list(tracking_data.full_crystal_symmetry.unit_cell().parameters())
for i in range(3):
cell[i]=cell[i]*params.map_modification.magnification
tracking_data.set_full_crystal_symmetry(
crystal.symmetry(tuple(cell),ccp4_map.space_group_number))
print("New original (full unit cell): "+\
" (%7.4f, %7.4f, %7.4f, %7.4f, %7.4f, %7.4f)" %(
tracking_data.full_crystal_symmetry.unit_cell.parameters()), file=out)
if params.output_files.magnification_map_file:
file_name=os.path.join(params.output_files.output_directory,
params.output_files.magnification_map_file)
# write out magnified map (our working map) (before shifting it)
print("\nWriting magnification map (input map with "+\
"magnification of %7.3f \n" %(params.map_modification.magnification) +\
"applied) to %s \n" %(file_name), file=out)
#write_ccp4_map(crystal_symmetry,file_name,map_data)
# NOTE: original unit cell and grid
write_ccp4_map(tracking_data.full_crystal_symmetry,
file_name,map_data,
output_unit_cell_grid=tracking_data.original_unit_cell_grid,)
params.input_files.map_file=file_name
else:
raise Sorry("Need a file name to write out magnification_map_file")
params.map_modification.magnification=None # no longer need it.
tracking_data.set_input_map_info(file_name=params.input_files.map_file,
crystal_symmetry=crystal_symmetry,
origin=map_data.origin(),
all=map_data.all())
tracking_data.set_crystal_symmetry(crystal_symmetry=crystal_symmetry)
tracking_data.set_original_crystal_symmetry(crystal_symmetry=crystal_symmetry)
tracking_data.set_accessor(acc=map_data.accessor())
# Save center of map
map_symmetry_center=get_center_of_map(map_data,crystal_symmetry)
# Check for helical ncs...if present we may try to cut map at +/- 1 turn
params.map_modification.restrict_z_distance_for_helical_symmetry=\
get_max_z_range_for_helical_symmetry(params,out=out)
# either use map_box with density_select=True or just shift the map
if params.segmentation.density_select:
print("\nTrimming map to density...", file=out)
args= ["output_format=ccp4"]
if params.segmentation.density_select_threshold is not None:
print("Threshold for density selection will be: %6.2f \n"%(
params.segmentation.density_select_threshold), file=out)
args.append("density_select_threshold=%s" %(
params.segmentation.density_select_threshold))
if params.segmentation.get_half_height_width is not None:
args.append("get_half_height_width=%s" %(
params.segmentation.get_half_height_width))
if params.input_files.ncs_file:
args.append("symmetry_file=%s" %(params.input_files.ncs_file))
if params.input_files.pdb_file:
args.append("pdb_file=%s" %(params.input_files.pdb_file))
args.append("ccp4_map_file=%s" %(params.input_files.map_file))
file_name_prefix=os.path.join(params.output_files.output_directory,
"density_select")
args.append("output_file_name_prefix=%s" %(file_name_prefix))
from mmtbx.command_line.map_box import run as run_map_box
args.append("keep_input_unit_cell_and_grid=False") # for new defaults
if params.segmentation.lower_bounds and params.segmentation.upper_bounds:
bounds_supplied=True
print("\nRunning map_box with supplied bounds", file=out)
box=run_map_box(args,
map_data=map_data,
ncs_object=ncs_obj,
crystal_symmetry=crystal_symmetry,
lower_bounds=params.segmentation.lower_bounds,
upper_bounds=params.segmentation.upper_bounds,
write_output_files=params.output_files.write_output_maps,
log=out)
else:
bounds_supplied=False
box=run_map_box(["density_select=True"]+args,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
ncs_object=ncs_obj,
write_output_files=params.output_files.write_output_maps,
log=out)
# Run again to select au box
shifted_unique_closest_sites=None
selected_au_box=None
if params.segmentation.select_au_box is None and box.ncs_object and \
box.ncs_object.max_operators() >= params.segmentation.n_ops_to_use_au_box:
params.segmentation.select_au_box=True
print("Setting select_au_box to True as there are %d operators" %(
box.ncs_object.max_operators()), file=out)
if params.segmentation.select_au_box and not bounds_supplied:
lower_bounds,upper_bounds,unique_closest_sites=get_bounds_for_au_box(
params, box=box,out=out) #unique_closest_sites relative to original map
if lower_bounds and upper_bounds:
bounds_supplied=True
selected_au_box=True
score,ncs_cc=score_ncs_in_map(map_data=box.map_box,
allow_score_with_pg=False,
sites_orth=unique_closest_sites+box.shift_cart,
ncs_object=box.ncs_object,ncs_in_cell_only=True,
crystal_symmetry=box.box_crystal_symmetry,out=null_out())
print("NCS CC before rerunning box: %7.2f SCORE: %7.1f OPS: %d " %(
ncs_cc,score,box.ncs_object.max_operators()), file=out)
print("\nRunning map-box again with boxed range ...", file=out)
del box
box=run_map_box(args,lower_bounds=lower_bounds,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
ncs_object=ncs_obj,
upper_bounds=upper_bounds, log=out)
box.map_box=box.map_box.as_double() # Do we need double?
shifted_unique_closest_sites=unique_closest_sites+box.shift_cart
# Or run again for helical symmetry
elif params.map_modification.restrict_z_distance_for_helical_symmetry and \
not bounds_supplied:
bounds_supplied=True
lower_bounds,upper_bounds=get_bounds_for_helical_symmetry(params,
crystal_symmetry=crystal_symmetry,box=box)
print("\nRunning map-box again with restricted Z range ...", file=out)
box=run_map_box(args,
map_data=map_data,
crystal_symmetry=crystal_symmetry,
ncs_object=ncs_obj,
lower_bounds=lower_bounds,upper_bounds=upper_bounds,
write_output_files=params.output_files.write_output_maps,
log=out)
#-----------------------------
if bounds_supplied and box.ncs_object:
print("Selecting remaining NCS operators", file=out)
box.ncs_object=select_remaining_ncs_ops(
map_data=box.map_box,
crystal_symmetry=box.box_crystal_symmetry,
closest_sites=shifted_unique_closest_sites,
random_points=params.reconstruction_symmetry.random_points,
ncs_object=box.ncs_object,
out=out)
score,ncs_cc=score_ncs_in_map(map_data=box.map_box,
allow_score_with_pg=False,
ncs_object=box.ncs_object,ncs_in_cell_only=True,
sites_orth=shifted_unique_closest_sites,
crystal_symmetry=box.box_crystal_symmetry,out=null_out())
if score is not None:
print("NCS CC after selections: %7.2f SCORE: %7.1f OPS: %d" %(
ncs_cc,score,box.ncs_object.max_operators()), file=out)
#-----------------------------
origin_shift=box.shift_cart
# Note: moving cell with (0,0,0) in middle to (0,0,0) at corner means
# total_shift_cart and origin_shift both positive
map_data=box.map_box
map_data=scale_map(map_data,out=out)
crystal_symmetry=box.box_crystal_symmetry
print("New unit cell: %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f " %(
crystal_symmetry.unit_cell().parameters()), file=out)
tracking_data.set_crystal_symmetry(crystal_symmetry=crystal_symmetry)
print("Moving origin to (0,0,0)", file=out)
print("Adding (%8.2f,%8.2f,%8.2f) to all coordinates\n"%(
tuple(origin_shift)), file=out)
# NOTE: size and cell params are now different!
tracking_data.set_box_map_bounds_first_last(
box.gridding_first,box.gridding_last)
new_half_map_data_list=[]
ii=0
for hm in half_map_data_list:
ii+=1
hm=hm.shift_origin() # shift if necessary
hm=box.cut_and_copy_map(map_data=hm).as_double()
hm.reshape(flex.grid(hm.all()))
new_half_map_data_list.append(hm)
cutout_half_map_file=os.path.join(params.output_files.output_directory,
"cutout_half_map_%s.ccp4" %(ii))
print("Writing cutout half_map data to %s" %(cutout_half_map_file), file=out)
write_ccp4_map(crystal_symmetry,cutout_half_map_file,new_half_map_data_list[-1])
half_map_data_list=new_half_map_data_list
if params.map_modification.soft_mask:
mask_data,map_data,half_map_data_list,\
soft_mask_solvent_fraction,smoothed_mask_data,\
original_box_map_data=\
get_and_apply_soft_mask_to_maps(
resolution=params.crystal_info.resolution,
wang_radius=params.crystal_info.wang_radius,
buffer_radius=params.crystal_info.buffer_radius,
map_data=map_data,crystal_symmetry=crystal_symmetry,
half_map_data_list=half_map_data_list,
out=out)
print("\nSolvent fraction from soft mask procedure: %7.2f (not used)\n" %(
soft_mask_solvent_fraction), file=out)
shifted_ncs_object=box.ncs_object
if not shifted_ncs_object or shifted_ncs_object.max_operators()<2:
from mmtbx.ncs.ncs import ncs
shifted_ncs_object=ncs()
shifted_ncs_object.set_unit_ncs()
else: # shift if necessary...
shift_needed = not \
(map_data.focus_size_1d() > 0 and map_data.nd() == 3 and
map_data.is_0_based())
a,b,c = crystal_symmetry.unit_cell().parameters()[:3]
N_ = map_data.all()
O_ =map_data.origin()
sx,sy,sz= O_[0]/N_[0], O_[1]/N_[1], O_[2]/N_[2]
# Note: If (0,0,0) is in the middle of the box, origin at sx,sy,sz
# is negative, shift of coordinates will be positive
sx_cart,sy_cart,sz_cart=crystal_symmetry.unit_cell().orthogonalize(
[sx,sy,sz])
print("Origin for input map is at (%8.2f,%8.2f,%8.2f)" % (
sx_cart,sy_cart,sz_cart), file=out)
print("Cell dimensions of this map are: (%8.2f,%8.2f,%8.2f)" % (a,b,c), file=out)
if shift_needed:
if(not crystal_symmetry.space_group().type().number() in [0,1]):
raise RuntimeError("Not implemented")
origin_shift=[-sx_cart,-sy_cart,-sz_cart] # positive if (0,0,0) in middle
print("Adding (%8.2f,%8.2f,%8.2f) to all coordinates"%(
tuple(origin_shift))+" to put origin at (0,0,0)\n", file=out)
map_data=map_data.shift_origin()
new_half_map_data_list=[]
for hm in half_map_data_list:
new_half_map_data_list.append(hm.shift_origin())
half_map_data_list=new_half_map_data_list
else:
origin_shift=(0.,0.,0.)
# Get NCS object if any
if params.input_files.ncs_file and not ncs_obj:
ncs_obj,dummy_obj=get_ncs(file_name=params.input_files.ncs_file)
if ncs_obj:
shifted_ncs_object=ncs_obj.coordinate_offset(
coordinate_offset=matrix.col(origin_shift)) # shift to match shifted map
else:
from mmtbx.ncs.ncs import ncs
shifted_ncs_object=ncs()
shifted_ncs_object.set_unit_ncs()
update_tracking_data_with_sharpening(
map_data=map_data,
tracking_data=tracking_data,out=out)
# Set origin shift now
tracking_data.set_origin_shift(origin_shift)
map_symmetry_center=matrix.col(map_symmetry_center)+matrix.col(origin_shift) # New ctr
if shifted_ncs_object and params.control.check_ncs:
ncs_obj_to_check=shifted_ncs_object
else:
ncs_obj_to_check=None
found_ncs=False
if params.reconstruction_symmetry.symmetry or ncs_obj_to_check or \
params.reconstruction_symmetry.optimize_center:
looking_for_ncs=True
new_ncs_obj,ncs_cc,ncs_score=run_get_ncs_from_map(params=params,
map_data=map_data,
map_symmetry_center=map_symmetry_center,
crystal_symmetry=crystal_symmetry,
ncs_obj=ncs_obj_to_check,
out=out,
)
if new_ncs_obj:
found_ncs=True
shifted_ncs_object=new_ncs_obj.deep_copy()
# offset this back to where it would have been before the origin offset..
new_ncs_obj=new_ncs_obj.coordinate_offset(
coordinate_offset=-1*matrix.col(origin_shift))
# XXX save it in tracking_data
if params.output_files.output_directory:
if not os.path.isdir(params.output_files.output_directory):
os.mkdir(params.output_files.output_directory)
file_name=os.path.join(params.output_files.output_directory,
'ncs_from_map.ncs_spec')
f=open(file_name,'w')
new_ncs_obj.format_all_for_group_specification(out=f)
f.close()
print("Wrote NCS operators (for original map) to %s" %(file_name), file=out)
if not params.control.check_ncs:
params.input_files.ncs_file=file_name # set it
else:
looking_for_ncs=False
if params.control.check_ncs:
print("Done checking NCS", file=out)
return params,map_data,half_map_data_list,pdb_hierarchy,tracking_data,None
if looking_for_ncs and (not found_ncs) and \
params.reconstruction_symmetry.symmetry.upper() not in ['ANY','ALL']:
raise Sorry(
"Unable to identify %s symmetry automatically in this map." %(
params.reconstruction_symmetry.symmetry)+
"\nPlease supply a symmetry file with symmetry matrices.")
if params.segmentation.expand_size is None:
params.segmentation.expand_size=estimate_expand_size(
crystal_symmetry=crystal_symmetry,
map_data=map_data,
expand_target=params.segmentation.expand_target,
out=out)
if params.output_files.output_info_file and params.control.shift_only:
write_info_file(params=params,tracking_data=tracking_data,out=out)
return params,map_data,half_map_data_list,pdb_hierarchy,\
tracking_data,shifted_ncs_object
def write_info_file(params=None,tracking_data=None,out=sys.stdout):
# write out the info file
from libtbx import easy_pickle
tracking_data.show_summary(out=out)
print("\nWriting summary information to: %s" %(
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.output_info_file)), file=out)
print("\nTo restore original position of a PDB file built into these maps, use:", file=out)
print("phenix.segment_and_split_map info_file=%s" %(
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.output_info_file))+" pdb_to_restore=mypdb.pdb\n", file=out)
easy_pickle.dump(os.path.join(tracking_data.params.output_files.output_directory,params.output_files.output_info_file),
tracking_data)
def get_and_apply_soft_mask_to_maps(
resolution=None, #params.crystal_info.resolution
wang_radius=None, #params.crystal_info.wang_radius
buffer_radius=None, #params.crystal_info.buffer_radius
map_data=None,crystal_symmetry=None,
solvent_content=None,
solvent_content_iterations=None,
rad_smooth=None,
half_map_data_list=None,
out=sys.stdout):
smoothed_mask_data=None
if not resolution:
raise Sorry("Need resolution for soft_mask")
if not rad_smooth:
rad_smooth=resolution
print("\nApplying soft mask with smoothing radius of %s\n" %(
rad_smooth), file=out)
if wang_radius:
wang_radius=wang_radius
else:
wang_radius=1.5*resolution
if buffer_radius:
buffer_radius=buffer_radius
else:
buffer_radius=2.*resolution
original_map_data=map_data.deep_copy()
# Check to make sure this is possible
cell_dims=crystal_symmetry.unit_cell().parameters()[:3]
min_cell_dim=min(cell_dims)
if wang_radius > 0.25 * min_cell_dim or buffer_radius > 0.25 * min_cell_dim:
raise Sorry("Cell is too small to get solvent fraction")
mask_data,solvent_fraction=get_mask_around_molecule(map_data=map_data,
crystal_symmetry=crystal_symmetry,
wang_radius=wang_radius,
solvent_content=solvent_content,
solvent_content_iterations=solvent_content_iterations,
buffer_radius=buffer_radius,
return_masked_fraction=True,
out=out)
if mask_data:
map_data,smoothed_mask_data=apply_soft_mask(map_data=map_data,
mask_data=mask_data.as_double(),
rad_smooth=rad_smooth,
crystal_symmetry=crystal_symmetry,
out=out)
new_half_map_data_list=[]
if not half_map_data_list: half_map_data_list=[]
for half_map in half_map_data_list:
half_map,smoothed_mask_data=apply_soft_mask(map_data=half_map,
mask_data=mask_data.as_double(),
rad_smooth=rad_smooth,
crystal_symmetry=crystal_symmetry,
out=out)
new_half_map_data_list.append(half_map)
half_map_data_list=new_half_map_data_list
else:
print("Unable to get mask...skipping", file=out)
return mask_data,map_data,half_map_data_list,\
solvent_fraction,smoothed_mask_data,original_map_data
def get_ncs(params=None,tracking_data=None,file_name=None,
ncs_object=None,out=sys.stdout):
if not file_name:
file_name=params.input_files.ncs_file
if (not ncs_object or ncs_object.max_operators()<2) and file_name: print("Reading ncs from %s" %(file_name), file=out)
is_helical_symmetry=None
if (not ncs_object or ncs_object.max_operators()<2) and not file_name: # No ncs supplied...use just 1 ncs copy..
from mmtbx.ncs.ncs import ncs
ncs_object=ncs()
ncs_object.set_unit_ncs()
#ncs_object.display_all(log=out)
elif (not ncs_object or ncs_object.max_operators()<2) and \
not os.path.isfile(file_name):
raise Sorry("The ncs file %s is missing" %(file_name))
else: # get the ncs
if not ncs_object:
from mmtbx.ncs.ncs import ncs
ncs_object=ncs()
try: # see if we can read biomtr records
pdb_inp=iotbx.pdb.input(file_name=file_name)
ncs_object.ncs_from_pdb_input_BIOMT(pdb_inp=pdb_inp,log=out)
except Exception as e: # try as regular ncs object
ncs_object.read_ncs(file_name=file_name,log=out)
#ncs_object.display_all(log=out)
ncs_object.select_first_ncs_group()
if ncs_object.max_operators()<1:
from mmtbx.ncs.ncs import ncs
ncs_object=ncs()
ncs_object.set_unit_ncs()
print("\nTotal of %d NCS operators read\n" %(
ncs_object.max_operators()), file=out)
if not tracking_data or not params:
return ncs_object,None
if ncs_object.max_operators()<2:
print("No NCS present", file=out)
elif ncs_object.is_helical_along_z(
abs_tol_t=tracking_data.params.reconstruction_symmetry.abs_tol_t,
rel_tol_t=tracking_data.params.reconstruction_symmetry.rel_tol_t,
tol_r=tracking_data.params.reconstruction_symmetry.tol_r):
print("This NCS is helical symmetry", file=out)
is_helical_symmetry=True
elif ncs_object.is_point_group_symmetry(
abs_tol_t=tracking_data.params.reconstruction_symmetry.abs_tol_t,
rel_tol_t=tracking_data.params.reconstruction_symmetry.rel_tol_t,
tol_r=tracking_data.params.reconstruction_symmetry.tol_r):
print("This NCS is point-group symmetry", file=out)
elif params.crystal_info.is_crystal:
print("This NCS is crystal symmetry", file=out)
elif not (
params.reconstruction_symmetry.require_helical_or_point_group_symmetry):
print("WARNING: NCS is not crystal symmetry nor point-group "+\
"symmetry nor helical symmetry", file=out)
else:
raise Sorry("Need point-group or helical symmetry.")
if not ncs_object or ncs_object.max_operators()<1:
raise Sorry("Need ncs information from an ncs_info file")
if tracking_data:
tracking_data.set_input_ncs_info(file_name=file_name, # XXX may be updated ops
number_of_operators=ncs_object.max_operators())
if tracking_data and is_helical_symmetry: # update shifted_ncs_info
if tracking_data.shifted_ncs_info: # XXX may not be needed
shifted=True
else:
shifted=False
print("Updating NCS info (shifted=%s)" %(shifted), file=out)
tracking_data.update_ncs_info(is_helical_symmetry=True,shifted=shifted)
if tracking_data.input_map_info and tracking_data.input_map_info.all:
z_range=tracking_data.crystal_symmetry.unit_cell(). \
parameters()[2]
print("Extending NCS operators to entire cell (z_range=%.1f)" %(
z_range), file=out)
max_operators= \
tracking_data.params.reconstruction_symmetry.max_helical_operators
if max_operators:
print("Maximum new number of NCS operators will be %s" %(
max_operators), file=out)
ncs_object.extend_helix_operators(z_range=z_range,
max_operators=max_operators)
#ncs_object.display_all()
print("New number of NCS operators is: %s " %(
ncs_object.max_operators()), file=out)
tracking_data.update_ncs_info(
number_of_operators=ncs_object.max_operators(),is_helical_symmetry=True,
shifted=shifted)
return ncs_object,tracking_data
def score_threshold(b_vs_region=None,threshold=None,
sorted_by_volume=None,n_residues=None,
ncs_copies=None,
fraction_occupied=None,
solvent_fraction=None,
map_data=None,
residues_per_region=50,
min_volume=None,
min_ratio=None,
max_ratio_to_target=None,
min_ratio_to_target=None,
weight_score_grid_points=1.,
weight_score_ratio=1.0,
weight_near_one=0.1,
min_ratio_of_ncs_copy_to_first=None,
target_in_all_regions=None,
crystal_symmetry=None,
chain_type=None,
out=sys.stdout):
# We want about 1 region per 50-100 residues for the biggest region.
# One possibility is to try to maximize the median size of the N top
# regions, where N=number of expected regions= n_residues/residues_per_region
# Also note we have an idea how big a region should be (how many
# grid points) if we make an assumption about the fractional volume that
# should be inside a region compared to the total volume of protein/nucleic
# acid in the region...this gives us target_in_top_regions points.
# So using this, make the median size as close to target_in_top_regions as
# we can.
# If we have solvent fraction but not ncs_copies or n_residues, guess the
# number of residues and ncs copies from the volume
if ncs_copies is not None and n_residues is not None:
expected_regions=max(ncs_copies,
max(1,int(0.5+n_residues/residues_per_region)))
else:
if chain_type in [None,'None']: chain_type="PROTEIN"
assert crystal_symmetry is not None
assert solvent_fraction is not None
volume_per_residue,nres,chain_type=get_volume_of_seq(
"A",chain_type=chain_type,out=out)
expected_regions=max(1,int(0.5+(1-solvent_fraction)*\
crystal_symmetry.unit_cell().volume()/volume_per_residue ))
# NOTE: This is expected residues. expected_regions should be this
# divided by residues_per_region
expected_regions=max(1,int(0.5+expected_regions/residues_per_region))
ncs_copies=1
target_in_top_regions=target_in_all_regions/expected_regions
nn=len(sorted_by_volume)-1 # first one is total
ok=True
too_low=None # marker for way too low
too_high=None
if nn < ncs_copies:
ok=False #return # not enough
v1,i1=sorted_by_volume[1]
if v1 < min_volume:
ok=False #return
if v1 > max_ratio_to_target*target_in_top_regions:
ok=False #return
too_low=True
if v1 < min_volume or v1 < 0.1*min_ratio_to_target*target_in_top_regions:
# way too high
too_high=True
# there should be about ncs_copies copies of each size region if ncs_copies>1
if ncs_copies>1:
v2,i2=sorted_by_volume[max(1,min(ncs_copies,nn))]
score_ratio=v2/v1 # want it to be about 1
if score_ratio < min_ratio_of_ncs_copy_to_first:
ok=False #return # not allowed
else:
score_ratio=1.0 # for ncs_copies=1
nn2=min(nn,max(1,(expected_regions+1)//2))
median_number,iavg=sorted_by_volume[nn2]
# number in each region should be about target_in_top_regions
if median_number > target_in_top_regions:
score_grid_points=target_in_top_regions/max(1.,median_number)
else:
score_grid_points=median_number/target_in_top_regions
if v1> target_in_top_regions:
score_grid_points_b=target_in_top_regions/max(1.,v1)
else:
score_grid_points_b=v1/target_in_top_regions
score_grid_points=0.5*(score_grid_points+score_grid_points_b)
score_grid_points=score_grid_points**2 # maybe even **3
if threshold>1.:
score_near_one=1./threshold
else:
score_near_one=threshold
# Normalize weight_score_ratio by target_in_top_regions:
sc=min(1.,0.5*median_number/max(1,target_in_top_regions))
overall_score=(
(sc*weight_score_ratio*score_ratio+
weight_score_grid_points*score_grid_points+
weight_near_one*score_near_one
) /
(weight_score_ratio+weight_score_grid_points+weight_near_one))
half_expected_regions=max(1,(1+expected_regions)//2)
ratio=sorted_by_volume[min(len(sorted_by_volume)-1,half_expected_regions)][0]/v1
if ok and v1 >= target_in_top_regions/2 and \
len(sorted_by_volume)>half_expected_regions:
last_volume=sorted_by_volume[half_expected_regions][0]
if ratio >=min_ratio and \
last_volume>=min_volume:
has_sufficient_regions=True
else:
has_sufficient_regions=False
else:
has_sufficient_regions=False
print("%7.2f %5.2f %5d %4d %5d %5d %6.3f %5s %5.3f %s %s" %(
b_vs_region.b_iso,threshold,target_in_top_regions,expected_regions,
v1,median_number,ratio,has_sufficient_regions,overall_score,ok,nn), file=out)
if not b_vs_region.b_iso in b_vs_region.b_vs_region_dict.keys():
b_vs_region.b_vs_region_dict[b_vs_region.b_iso]={}
b_vs_region.sa_sum_v_vs_region_dict[b_vs_region.b_iso]={}
b_vs_region.sa_nn_vs_region_dict[b_vs_region.b_iso]={}
b_vs_region.sa_ratio_b_vs_region_dict[b_vs_region.b_iso]={}
b_vs_region.b_vs_region_dict[b_vs_region.b_iso][threshold]=nn
b_vs_region.sa_nn_vs_region_dict[b_vs_region.b_iso][threshold]=None
b_vs_region.sa_ratio_b_vs_region_dict[b_vs_region.b_iso][threshold]=None
return overall_score,has_sufficient_regions,\
too_low,too_high,expected_regions,ok
def choose_threshold(b_vs_region=None,map_data=None,
fraction_occupied=None,
solvent_fraction=None,
n_residues=None,
ncs_copies=None,
scale=0.95,
calculate_sa=None, # calculate surface area of top sa_percent of target
sa_percent=None, # calculate surface area of top sa_fraction of target
density_threshold=None,
starting_density_threshold=None,
wrapping=None,
residues_per_region=None,
min_volume=None,
min_ratio=None,
max_ratio_to_target=None,
min_ratio_to_target=None,
min_ratio_of_ncs_copy_to_first=None,
verbose=None,
crystal_symmetry=None,
chain_type=None,
out=sys.stdout):
best_threshold=None
best_threshold_has_sufficient_regions=None
best_score=None
best_ok=None
if not ncs_copies: ncs_copies=1
print("\nChecking possible cutoffs for region identification", file=out)
print("Scale: %7.3f" %(scale), file=out)
used_ranges=[]
# Assume any threshold that is lower than a threshold that gave a non-zero value
# and is zero is an upper bound on the best value. Same the other way around
upper_bound=1000
lower_bound=0.0001
best_nn=None
if density_threshold is not None: # use it
print("\nUsing input threshold of %5.2f " %(
density_threshold), file=out)
n_range_low_high_list=[[0,0]] # use as is
else:
n_range_low_high_list=[[-16,4],[-32,16],[-64,80]]
if starting_density_threshold is not None:
starting_density_threshold=starting_density_threshold
print("Starting density threshold is: %7.3f" %(
starting_density_threshold), file=out)
else:
starting_density_threshold=1.0
if verbose:
local_out=out
else:
from libtbx.utils import null_out
local_out=null_out()
target_in_all_regions=map_data.size()*fraction_occupied*(1-solvent_fraction)
print("\nTarget number of points in all regions: %.0f" %(
target_in_all_regions), file=local_out)
local_threshold=find_threshold_in_map(target_points=int(
target_in_all_regions),map_data=map_data)
print("Cutoff will be threshold of %7.2f marking %7.1f%% of cell" %(
local_threshold,100.*(1.-solvent_fraction)), file=out)
print("B-iso Threshold Target N Biggest Median Ratio Enough Score OK Regions", file=local_out)
unique_expected_regions=None
for n_range_low,n_range_high in n_range_low_high_list:
last_score=None
for nn in range(n_range_low,n_range_high+1):
if nn in used_ranges: continue
used_ranges.append(nn)
if density_threshold is not None:
threshold=density_threshold
else:
threshold=starting_density_threshold*(scale**nn)
if threshold < lower_bound or threshold > upper_bound:
continue
co,sorted_by_volume,min_b,max_b=get_co(
map_data=map_data.deep_copy(),
threshold=threshold,wrapping=wrapping)
if len(sorted_by_volume)<2:
score,has_sufficient_regions,too_low,too_high,expected_regions,ok=\
None,None,None,None,None,None
continue # don't go on
else:
score,has_sufficient_regions,too_low,too_high,expected_regions,ok=\
score_threshold(b_vs_region=b_vs_region,
threshold=threshold,
sorted_by_volume=sorted_by_volume,
fraction_occupied=fraction_occupied,
solvent_fraction=solvent_fraction,
residues_per_region=residues_per_region,
min_volume=min_volume,
min_ratio=min_ratio,
max_ratio_to_target=max_ratio_to_target,
min_ratio_to_target=min_ratio_to_target,
min_ratio_of_ncs_copy_to_first=min_ratio_of_ncs_copy_to_first,
ncs_copies=ncs_copies,
n_residues=n_residues,
map_data=map_data,
target_in_all_regions=target_in_all_regions,
crystal_symmetry=crystal_symmetry,
chain_type=chain_type,
out=local_out)
if expected_regions:
unique_expected_regions=max(1,
(ncs_copies-1+expected_regions)//ncs_copies)
if too_high and threshold<upper_bound:
upper_bound=threshold
if too_low and threshold>lower_bound:
lower_bound=threshold
if score is None:
if best_threshold and best_threshold_has_sufficient_regions:
if threshold >best_threshold: # new upper bound
upper_bound=threshold
elif threshold <best_threshold: # new lower bound
lower_bound=threshold
elif (ok or not best_ok) and \
(best_score is None or score > best_score):
best_threshold=threshold
best_threshold_has_sufficient_regions=has_sufficient_regions
best_score=score
best_ok=ok
if best_threshold is not None:
print("\nBest threshold: %5.2f\n" %(best_threshold), file=out)
return best_threshold,unique_expected_regions,best_score,best_ok
elif density_threshold is not None: # use it anyhow
return density_threshold,unique_expected_regions,None,None
else:
return None,unique_expected_regions,None,None
def get_co(map_data=None,threshold=None,wrapping=None):
co=maptbx.connectivity(map_data=map_data,threshold=threshold,
wrapping=wrapping)
regions=co.regions()
rr=list(range(0,co.regions().size()))
regions_0=regions[0]
rr_0=rr[0]
regions=regions[1:]
rr=rr[1:]
if rr:
z = zip(regions,rr)
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
else:
sorted_by_volume = []
sorted_by_volume=[(regions_0,rr_0)]+sorted_by_volume
min_b, max_b = co.get_blobs_boundaries_tuples() # As grid points, not A
return co,sorted_by_volume,min_b,max_b
def get_connectivity(b_vs_region=None,
map_data=None,
solvent_fraction=None,
n_residues=None,
ncs_copies=None,
fraction_occupied=None,
iterate_with_remainder=None,
min_volume=None,
min_ratio=None,
wrapping=None,
residues_per_region=None,
max_ratio_to_target=None,
min_ratio_to_target=None,
min_ratio_of_ncs_copy_to_first=None,
starting_density_threshold=None,
density_threshold=None,
crystal_symmetry=None,
chain_type=None,
verbose=None,
out=sys.stdout):
print("\nGetting connectivity", file=out)
libtbx.call_back(message='segment',data=None)
# Normalize map data now to SD of the part that is not solvent
map_data=renormalize_map_data(
map_data=map_data,solvent_fraction=solvent_fraction)
# Try connectivity at various thresholds
# Choose one that has about the right number of grid points in top regions
scale=0.95
best_threshold=None
best_scale=scale
best_score=None
best_ok=None
best_unique_expected_regions=None
for ii in range(3):
threshold,unique_expected_regions,score,ok=choose_threshold(
density_threshold=density_threshold,
starting_density_threshold=starting_density_threshold,
b_vs_region=b_vs_region,
map_data=map_data,
n_residues=n_residues,
ncs_copies=ncs_copies,
fraction_occupied=fraction_occupied,
solvent_fraction=solvent_fraction,
scale=scale,
wrapping=wrapping,
residues_per_region=residues_per_region,
min_volume=min_volume,
min_ratio=min_ratio,
max_ratio_to_target=max_ratio_to_target,
min_ratio_to_target=min_ratio_to_target,
min_ratio_of_ncs_copy_to_first=min_ratio_of_ncs_copy_to_first,
crystal_symmetry=crystal_symmetry,
chain_type=chain_type,
verbose=verbose,
out=out)
# Take it if it improves (score, ok)
if threshold is not None:
if best_score is None or \
((ok or not best_ok) and (score > best_score)):
best_score=score
best_unique_expected_regions=unique_expected_regions
best_ok=ok
best_threshold=threshold
best_scale=scale
if best_ok or density_threshold is not None:
break
else:
scale=scale**0.333 # keep trying
if best_threshold is None or (
density_threshold is not None and best_score is None):
if iterate_with_remainder: # on first try failed
raise Sorry("No threshold found...try with density_threshold=xxx")
else: # on iteration...ok
print("Note: No threshold found", file=out)
return None,None,None,None,None,None,None,None
else:
starting_density_threshold=best_threshold
# try it next time
co,sorted_by_volume,min_b,max_b=get_co(
map_data=map_data,threshold=best_threshold,wrapping=wrapping)
return co,sorted_by_volume,min_b,max_b,best_unique_expected_regions,\
best_score,threshold,starting_density_threshold
def get_volume_of_seq(text,chain_type=None,out=sys.stdout):
from iotbx.bioinformatics import chain_type_and_residues
# get chain type and residues (or use given chain type and count residues)
chain_type,n_residues=chain_type_and_residues(text=text,chain_type=chain_type)
if chain_type is None and n_residues is None:
return None,None,None
if chain_type=='PROTEIN':
mw_residue=110.0 # from $CDOC/matthews.doc
density_factor=1.23 # 1.66/DENSITY-OF-PROTEIN=1.66/1.35
else:
mw_residue=330.0 # guess for DNA/RNA
density_factor=1.15 # 1.66/DENSITY-OF-DNA=1.66/1.45
return len(text)*density_factor*mw_residue,len(text),chain_type
def create_rna_dna(cns_dna_rna_residue_names):
dd={}
for key in cns_dna_rna_residue_names.keys():
dd[cns_dna_rna_residue_names[key]]=key
return dd
def get_solvent_content_from_seq_file(params,
sequence=None,
seq_file=None,
ncs_copies=None,
map_volume=None,
out=sys.stdout):
if not sequence and not os.path.isfile(seq_file):
raise Sorry(
"The sequence file '%s' is missing." %(seq_file))
if not sequence:
print("\nReading sequence from %s " %(seq_file), file=out)
sequence=open(seq_file).read()
from iotbx.bioinformatics import get_sequences
sequences=get_sequences(text=sequence)
# get unique part of these sequences
from mmtbx.validation.chain_comparison import \
extract_unique_part_of_sequences as eups
print("Unique part of sequences:", file=out)
copies_in_unique,base_copies,unique_sequence_dict=eups(sequences)
all_unique_sequence=[]
for seq in copies_in_unique.keys():
print("Copies: %s base copies: %s Sequence: %s" %(
copies_in_unique[seq],base_copies,seq))
all_unique_sequence.append(seq)
if base_copies != ncs_copies:
print("NOTE: %s copies of unique portion but ncs_copies=%s" %(
base_copies,ncs_copies), file=out)
if ncs_copies==1:
ncs_copies=base_copies
print("Using ncs_copies=%s instead" %(ncs_copies), file=out)
else:
print("Still using ncs_copies=%s" %(ncs_copies), file=out)
volume_of_chains=0.
n_residues=0
chain_types_considered=[]
for seq in all_unique_sequence:
volume,nres,chain_type=get_volume_of_seq(seq,
chain_type=params.crystal_info.chain_type,out=out)
if volume is None: continue
volume_of_chains+=volume
n_residues+=nres
if not chain_type in chain_types_considered:
chain_types_considered.append(chain_type)
chain_types_considered.sort()
print("\nChain types considered: %s\n" %(
" ".join(chain_types_considered)), file=out)
volume_of_molecules=volume_of_chains*ncs_copies
n_residues_times_ncs=n_residues*ncs_copies
solvent_fraction=1.-(volume_of_molecules/map_volume)
solvent_fraction=max(0.001,min(0.999,solvent_fraction))
if solvent_fraction==0.001 or solvent_fraction==0.999:
print("NOTE: solvent fraction of %7.2f very unlikely..." %(
solvent_fraction) + "please check ncs_copies and sequence ", file=out)
print("Solvent content from composition: %7.2f" %(solvent_fraction), file=out)
print("Cell volume: %.1f NCS copies: %d Volume of unique chains: %.1f" %(
map_volume,ncs_copies,volume_of_chains), file=out)
print("Total residues: %d Volume of all chains: %.1f Solvent fraction: %.3f "%(
n_residues_times_ncs,volume_of_molecules,solvent_fraction), file=out)
return solvent_fraction,n_residues,n_residues_times_ncs
def get_solvent_fraction(params,
ncs_object=None,ncs_copies=None,
crystal_symmetry=None,tracking_data=None,out=sys.stdout):
if tracking_data and not crystal_symmetry:
#crystal_symmetry=tracking_data.original_crystal_symmetry not used
crystal_symmetry=tracking_data.crystal_symmetry
map_volume=crystal_symmetry.unit_cell().volume()
if tracking_data and not ncs_copies:
#ncs_copies=tracking_data.input_ncs_info.original_number_of_operators
ncs_copies=tracking_data.input_ncs_info.number_of_operators # 2018-01-29 put back
if not ncs_copies: ncs_copies=1
if params.input_files.seq_file or params.crystal_info.sequence:
solvent_content,n_residues,n_residues_times_ncs=\
get_solvent_content_from_seq_file(
params,
sequence=params.crystal_info.sequence,
seq_file=params.input_files.seq_file,
ncs_copies=ncs_copies,
map_volume=map_volume,
out=out)
if not params.crystal_info.solvent_content:
params.crystal_info.solvent_content=solvent_content
print("Solvent fraction from composition: %7.2f "%(
params.crystal_info.solvent_content), file=out)
else:
print("Solvent content from parameters: %7.2f" %(
params.crystal_info.solvent_content), file=out)
else:
if params.crystal_info.solvent_content:
print("Solvent content from parameters: %7.2f" %(
params.crystal_info.solvent_content), file=out)
elif params.crystal_info.molecular_mass:
params.crystal_info.solvent_content=\
get_solvent_fraction_from_molecular_mass(
crystal_symmetry=crystal_symmetry,
molecular_mass=params.crystal_info.molecular_mass,
out=out)
else:
print("Getting solvent content automatically.", file=out)
if tracking_data:
if params.input_files.seq_file or params.crystal_info.sequence:
tracking_data.set_input_seq_info(file_name=params.input_files.seq_file,
sequence=params.crystal_info.sequence,
n_residues=n_residues)
tracking_data.set_n_residues(
n_residues=n_residues_times_ncs)
if params.crystal_info.solvent_content:
tracking_data.set_solvent_fraction(params.crystal_info.solvent_content)
return tracking_data
else:
return params.crystal_info.solvent_content
def top_key(dd):
if not dd:
return None,None
elif len(dd)==1:
return list(dd.items())[0]
else:
best_key=None
best_n=None
for key in dd.keys():
if not best_n or dd[key] > best_n:
best_n=dd[key]
best_key=key
return best_key,best_n
def choose_max_regions_to_consider(params,
sorted_by_volume=None,
ncs_copies=None):
max_per_au=params.segmentation.max_per_au
min_ratio=params.segmentation.min_ratio
min_volume=params.segmentation.min_volume
# sort and eliminate regions with few points and those at end of list
if len(sorted_by_volume)<2:
return 0
max_grid_points=sorted_by_volume[1][0]
cntr=0
for p in sorted_by_volume[1:]:
cntr+=1
if max_per_au and (cntr>max_per_au*ncs_copies):
cntr-=1
break
v,i=p # v=volume in grid points, i=id
if v/max_grid_points<min_ratio or v < min_volume:
cntr-=1
break
return cntr
def get_edited_mask(sorted_by_volume=None,
max_regions_to_consider=None,
co=None,
out=sys.stdout):
conn_obj=co.result()
origin=list(conn_obj.accessor().origin())
all=list(conn_obj.accessor().all())
conn_obj.accessor().show_summary(out)
edited_mask=conn_obj.deep_copy()
first=True
edited_volume_list=[]
original_id_from_id={}
for i in range(1,max_regions_to_consider+1):
v,id=sorted_by_volume[i]
original_id_from_id[i]=id
edited_volume_list.append(v)
s = (conn_obj==id)
if first:
edited_mask=edited_mask.set_selected(~s,0)
first=False
edited_mask=edited_mask.set_selected(s,i) # edited mask has ID of
# regions, labeled in decreasing size from 1 to max_regions_to_consider
return edited_mask,edited_volume_list,original_id_from_id
def choose_subset(a,target_number=1):
new_array=flex.vec3_double()
assert type(new_array)==type(a)
n=a.size()
nskip=max(1,n//target_number)
i=0
for x in a:
if i%nskip==0 or i==n-1:
new_array.append(x)
i+=1
return new_array
def run_get_duplicates_and_ncs(
ncs_obj=None,
min_b=None,
max_b=None,
edited_mask=None,
original_id_from_id=None,
edited_volume_list=None,
max_regions_to_consider=None,
regions_left=None,
tracking_data=None,
out=sys.stdout,
):
duplicate_dict,equiv_dict,equiv_dict_ncs_copy_dict,region_range_dict,\
region_centroid_dict,region_scattered_points_dict=\
get_duplicates_and_ncs(
ncs_obj=ncs_obj,
min_b=min_b,
max_b=max_b,
edited_mask=edited_mask,
edited_volume_list=edited_volume_list,
original_id_from_id=original_id_from_id,
max_regions_to_consider=max_regions_to_consider,
tracking_data=tracking_data,
out=out)
# check that we have region_centroid for all values
complete=True
missing=[]
for i in range(1,max_regions_to_consider+1):
if not i in region_centroid_dict.keys():
if (regions_left is None) or (i in regions_left):
complete=False
missing.append(i)
if complete:
return duplicate_dict,equiv_dict,equiv_dict_ncs_copy_dict,\
region_range_dict,region_centroid_dict,\
region_scattered_points_dict
else:
raise Sorry("Cannot find region-centroid for all regions? Missing: %s" %(
missing))
def copy_dict_info(from_dict,to_dict):
for key in from_dict.keys():
to_dict[key]=from_dict[key]
def get_centroid_from_blobs(min_b=None,max_b=None,
id=None,original_id_from_id=None):
orig_id=original_id_from_id[id]
upper=max_b[orig_id]
lower=min_b[orig_id]
avg=[]
for u,l in zip(upper,lower):
avg.append(0.5*(u+l))
return avg
def get_duplicates_and_ncs(
ncs_obj=None,
min_b=None,
max_b=None,
edited_mask=None,
original_id_from_id=None,
edited_volume_list=None,
max_regions_to_consider=None,
target_points_per_region=30,
minimum_points_per_region=10,
maximum_points_per_region=100,
tracking_data=None,
out=sys.stdout,
):
unit_cell=tracking_data.crystal_symmetry.unit_cell()
region_scattered_points_dict=get_region_scattered_points_dict(
edited_volume_list=edited_volume_list,
edited_mask=edited_mask,
unit_cell=unit_cell,
target_points_per_region=target_points_per_region,
minimum_points_per_region=minimum_points_per_region,
maximum_points_per_region=maximum_points_per_region)
# Now just use the scattered points to get everything else:
region_n_dict={} # count of points used by region (differs from volume due
# to the sampling)
region_range_dict={} # keyed by region in edited_mask; range for x, y, z
region_centroid_dict={} # keyed by region in edited_mask; range for x, y, z
for id in region_scattered_points_dict.keys():
sites=region_scattered_points_dict[id]
region_n_dict[id]=sites.size()
if region_n_dict[id]:
region_centroid_dict[id]=list(sites.mean())
else: # No points...use bounds from object
region_centroid_dict[id]=get_centroid_from_blobs(min_b=min_b,
max_b=max_b,
id=id,original_id_from_id=original_id_from_id)
# Now get NCS relationships
ncs_group=ncs_obj.ncs_groups()[0]
duplicate_dict={} # keyed by id, number of duplicates for that region
equiv_dict={} # equiv_dict[id][other_id]=number_of points other_id matches
# id through an ncs relationship
equiv_dict_ncs_copy_dict={}
for id in region_scattered_points_dict.keys():
duplicate_dict[id]=0
equiv_dict[id]={}
equiv_dict_ncs_copy_dict[id]={}
# Figure out which ncs operator is the identity
identity_op=ncs_group.identity_op_id()
print("Identity operator is %s" %(identity_op), file=out)
# 2017-12-16 Score poorly if it involves a cell translation unless it
# is a crystal
if len(ncs_group.translations_orth())>1:
# Skip if no ncs...
for id in region_scattered_points_dict.keys():
for xyz_cart in region_scattered_points_dict[id]:
n=0
for i0 in range(len(ncs_group.translations_orth())):
if i0==identity_op: continue
r=ncs_group.rota_matrices_inv()[i0] # inverse maps pos 0 on to pos i
t=ncs_group.translations_orth_inv()[i0]
n+=1
new_xyz_cart=r * matrix.col(xyz_cart) + t
new_xyz_frac=unit_cell.fractionalize(new_xyz_cart)
if tracking_data.params.crystal_info.use_sg_symmetry or \
(new_xyz_frac[0]>=0 and new_xyz_frac[0]<=1 and \
new_xyz_frac[1]>=0 and new_xyz_frac[1]<=1 and \
new_xyz_frac[2]>=0 and new_xyz_frac[2]<=1):
value=edited_mask.value_at_closest_grid_point(new_xyz_frac)
else:
value=0 # value for nothing there 2017-12-16
if value==id:
duplicate_dict[id]+=1
break # only count once
elif value>0: # notice which one is matched
if not value in equiv_dict[id]:
equiv_dict[id][value]=0
equiv_dict_ncs_copy_dict[id][value]={}
equiv_dict[id][value]+=1
if not n in equiv_dict_ncs_copy_dict[id][value]:
equiv_dict_ncs_copy_dict[id][value][n]=0
equiv_dict_ncs_copy_dict[id][value][n]+=1 # how many are ncs copy n
return duplicate_dict,equiv_dict,equiv_dict_ncs_copy_dict,\
region_range_dict,region_centroid_dict,region_scattered_points_dict
def get_region_scattered_points_dict(
edited_volume_list=None,
edited_mask=None,
unit_cell=None,
sampling_rate=None,
target_points_per_region=None,
minimum_points_per_region=None,
maximum_points_per_region=None):
# Get sampled points in each region
sample_dict={}
region_scattered_points_dict={} # some points in each region
if not sampling_rate:
sampling_rate=edited_volume_list[0]//target_points_per_region
sampling_rate_set=False
else:
sampling_rate_set=True
volumes=flex.int()
sampling_rates=flex.int()
id_list=[]
# have to set up dummy first set:
volumes.append(0)
sampling_rates.append(0)
id_list.append(0)
for i in range(len(edited_volume_list)):
id=i+1
v=edited_volume_list[i]
region_scattered_points_dict[id]=flex.vec3_double()
volumes.append(v)
if sampling_rate_set:
sample_dict[id]=sampling_rate
sampling_rates.append(sampling_rate)
else:
sample_dict[id]=max(1,
max(v//maximum_points_per_region,
min(v//minimum_points_per_region,
sampling_rate) ))
sampling_rates.append(max(1,
max(v//maximum_points_per_region,
min(v//minimum_points_per_region,
sampling_rate) )))
id_list.append(id)
sample_regs_obj = maptbx.sample_all_mask_regions(
mask=edited_mask,
volumes=volumes,
sampling_rates=sampling_rates,
unit_cell=unit_cell)
for id in id_list[1:]: # skip the dummy first set
region_scattered_points_dict[id]=sample_regs_obj.get_array(id)
return region_scattered_points_dict
def remove_bad_regions(params=None,
duplicate_dict=None,
edited_volume_list=None,
out=sys.stdout):
worst_list=[]
for id in duplicate_dict.keys():
fract=duplicate_dict[id]/edited_volume_list[id-1]
if duplicate_dict[id] and fract >=params.segmentation.max_overlap_fraction:
worst_list.append([fract,id])
else:
del duplicate_dict[id]
worst_list.sort()
worst_list.reverse()
bad_region_list=[]
max_number_to_remove=int(0.5+
0.01*params.segmentation.remove_bad_regions_percent*len(edited_volume_list))
if worst_list:
print("\nRegions that span multiple NCS au:", file=out)
for fract,id in worst_list:
print("ID: %d Duplicate points: %d (%.1f %%)" %(
id,duplicate_dict[id],100.*fract), end=' ', file=out)
if len(bad_region_list)<max_number_to_remove:
bad_region_list.append(id)
print(" (removed)", file=out)
else:
print(file=out)
new_sorted_by_volume=[]
region_list=[]
region_volume_dict={}
for i in range(len(edited_volume_list)):
id=i+1
v=edited_volume_list[i]
new_sorted_by_volume.append([v,id])
region_list.append(id)
region_volume_dict[id]=v
if bad_region_list:
print("Bad regions (excluded)",bad_region_list, file=out)
return region_list,region_volume_dict,new_sorted_by_volume,bad_region_list
def sort_by_ncs_overlap(matches,equiv_dict_ncs_copy_dict_id):
sort_list=[]
for id1 in matches:
key,n=top_key(equiv_dict_ncs_copy_dict_id[id1]) # Take top ncs_copy
sort_list.append([n,id1])
sort_list.sort()
sort_list.reverse()
key_list=[]
for n,id1 in sort_list:
key_list.append(id1)
return key_list
def get_ncs_equivalents(
bad_region_list=None,
region_list=None,
region_scattered_points_dict=None,
equiv_dict=None,
ncs_copies=None,
equiv_dict_ncs_copy_dict=None,
min_coverage=.10,
out=sys.stdout):
equiv_dict_ncs_copy={}
for id in region_list:
if id in bad_region_list: continue
match_dict=equiv_dict.get(id,{}) # which are matches
matches=list(match_dict.keys())
if not matches: continue
key_list=sort_by_ncs_overlap(matches,equiv_dict_ncs_copy_dict[id])
n_found=0
for id1 in key_list:
# id matches id1 N=match_dict[id1]
# 2017-12-16 Do not include if there is a cell translation
key,n=top_key(equiv_dict_ncs_copy_dict[id][id1]) # ncs_copy, n-overlap
if n<min_coverage*region_scattered_points_dict[id].size():
break
else:
if not id in equiv_dict_ncs_copy:equiv_dict_ncs_copy[id]={}
equiv_dict_ncs_copy[id][id1]=key
n_found+=1
if n_found>=ncs_copies-1:
break
return equiv_dict_ncs_copy
def get_overlap(l1,l2):
overlap_list=[]
l1a=single_list(l1)
l2a=single_list(l2)
for i in l1a:
if i in l2a and not i in overlap_list: overlap_list.append(i)
return overlap_list
def group_ncs_equivalents(params,
region_list=None,
region_volume_dict=None,
equiv_dict_ncs_copy=None,
tracking_data=None,
split_if_possible=None,
out=sys.stdout):
# equiv_dict_ncs_copy[id][id1]=ncs_copy
# group together all the regions that are related to region 1...etc
# if split_if_possible then skip all groups with multiple entries
ncs_equiv_groups_as_list=[]
ncs_equiv_groups_as_dict={}
for id in region_list:
equiv_group={} #equiv_group[ncs_copy]=[id1,id2,id3...]
equiv_group[0]=[id] # always
for id1 in equiv_dict_ncs_copy.get(id,{}).keys():
ncs_copy=equiv_dict_ncs_copy[id][id1]
if not ncs_copy in equiv_group: equiv_group[ncs_copy]=[]
equiv_group[ncs_copy].append(id1) # id1 is ncs_copy of id
all_single=True
equiv_group_as_list=[]
total_grid_points=0
missing_ncs_copies=[]
present_ncs_copies=[]
for ncs_copy in range(tracking_data.input_ncs_info.number_of_operators):
# goes 0 to ncs_copies-1 (including extra ones if present)
local_equiv_group=equiv_group.get(ncs_copy,[])
if local_equiv_group:
equiv_group_as_list.append(local_equiv_group)
present_ncs_copies.append(ncs_copy)
if ncs_copy > 0 and \
len(local_equiv_group)>1 and len(equiv_group.get(0,[]))==1:
all_single=False
for id in equiv_group.get(ncs_copy,[]):
total_grid_points+=region_volume_dict[id]
else:
missing_ncs_copies.append(ncs_copy)
equiv_group_as_list.sort()
if tracking_data.input_ncs_info.is_helical_symmetry:
# complete if we have original_number_of_operators worth
if (not params.segmentation.require_complete) or \
len(present_ncs_copies)>= \
tracking_data.input_ncs_info.original_number_of_operators:
complete=True
else:
complete=False
else:
if len(missing_ncs_copies)==0:
complete=True
else:
complete=False
if complete and \
(not str(equiv_group_as_list) in ncs_equiv_groups_as_dict or
total_grid_points>ncs_equiv_groups_as_dict[str(equiv_group_as_list)]) \
and (all_single or (not split_if_possible)):
ncs_equiv_groups_as_dict[str(equiv_group_as_list)]=total_grid_points
ncs_equiv_groups_as_list.append([total_grid_points,equiv_group_as_list])
ncs_equiv_groups_as_list.sort()
ncs_equiv_groups_as_list.reverse()
# Now remove any group that duplicates a previous group
# 2015-11-07 allow a member to be in multiple groups though (for example
# one that spans several groups because it contains 2 region in other ncs
# copies)
# Make sure that if there are duplicates they are all in the leading
# positions of the list (these must be very big ones as they match 2
# regions in other ncs copies)
max_duplicates=tracking_data.input_ncs_info.number_of_operators-1 # not all duplicates
ncs_group_list=[]
used_list=[]
print("All equiv groups:", file=out)
used_regions=[]
for total_grid_points,equiv_group_as_list in ncs_equiv_groups_as_list:
duplicate=False
n_dup=0
for equiv_group in equiv_group_as_list:
for x in equiv_group:
if x in used_list:
n_dup+=1
if n_dup>max_duplicates or n_dup >len(equiv_group_as_list)-1:
duplicate=True
if not duplicate and n_dup>0: # check carefully to make sure that all
# are leading entries
for ncs_group in ncs_group_list:
overlaps=get_overlap(ncs_group,equiv_group_as_list)
if not overlaps: continue
overlaps.sort()
expected_match=single_list(equiv_group_as_list)[:len(overlaps)]
expected_match.sort()
if overlaps!=expected_match: # not leading entries
duplicate=True
break
if not duplicate:
#print >>out,"NCS GROUP:",equiv_group_as_list,":",total_grid_points
ncs_group_list.append(equiv_group_as_list)
for equiv_group in equiv_group_as_list:
for x in equiv_group:
if not x in used_list: used_list.append(x)
print("Total NCS groups: %d" %len(ncs_group_list), file=out)
# Make a dict that lists all ids that are in the same group as region x
shared_group_dict={}
for ncs_group in ncs_group_list:
for group_list in ncs_group:
for id1 in group_list:
if not id1 in shared_group_dict: shared_group_dict[id1]=[]
for other_group_list in ncs_group:
if other_group_list is group_list:continue
for other_id1 in other_group_list:
if not other_id1 in shared_group_dict [id1]:
shared_group_dict[id1].append(other_id1)
return ncs_group_list,shared_group_dict
def identify_ncs_regions(params,
sorted_by_volume=None,
co=None,
min_b=None,
max_b=None,
ncs_obj=None,
tracking_data=None,
out=sys.stdout):
# 1.choose top regions to work with
# 2.remove regions that are in more than one au of the NCS
# 3.identify groups of regions that are related by NCS
# Also note the centers and bounds of each region
# Choose number of top regions to consider
max_regions_to_consider=choose_max_regions_to_consider(params,
sorted_by_volume=sorted_by_volume,
ncs_copies=tracking_data.input_ncs_info.original_number_of_operators)
print("\nIdentifying NCS-related regions.Total regions to consider: %d" %(
max_regions_to_consider), file=out)
if max_regions_to_consider<1:
print("\nUnable to identify any NCS regions", file=out)
return None,tracking_data,None
# Go through all grid points; discard if not in top regions
# Renumber regions in order of decreasing size
load_saved_files=False # set to True to load results from previous run
dump_files=False # set to True to dump results and speed up next run
if not load_saved_files:
edited_mask,edited_volume_list,original_id_from_id=get_edited_mask(
sorted_by_volume=sorted_by_volume,
co=co,
max_regions_to_consider=max_regions_to_consider,out=out)
if dump_files:
from libtbx import easy_pickle
easy_pickle.dump("edited_mask.pkl",
[edited_mask,edited_volume_list,original_id_from_id])
else:
from libtbx import easy_pickle
[edited_mask,edited_volume_list,original_id_from_id
]=easy_pickle.load("edited_mask.pkl")
print("Loading edited_mask.pkl", file=out)
# edited_mask contains re-numbered region id's
# Identify duplicate and ncs relationships between regions
# duplicate_dict[id]= number of duplicates for that region
# equiv_dict[id][other_id]=number_of points other_id matches
# id through an ncs relationship
if not load_saved_files:
duplicate_dict,equiv_dict,equiv_dict_ncs_copy_dict,\
region_range_dict,region_centroid_dict,\
region_scattered_points_dict=\
run_get_duplicates_and_ncs(
ncs_obj=ncs_obj,
min_b=min_b,
max_b=max_b,
edited_mask=edited_mask,
original_id_from_id=original_id_from_id,
edited_volume_list=edited_volume_list,
max_regions_to_consider=max_regions_to_consider,
tracking_data=tracking_data,
out=out)
# Remove any bad regions
region_list,region_volume_dict,new_sorted_by_volume,\
bad_region_list=remove_bad_regions(
params=params,
duplicate_dict=duplicate_dict,
edited_volume_list=edited_volume_list,
out=out)
# Identify groups of regions that are ncs-related
# equiv_dict_ncs_copy[id][id1]=ncs_copy of id that corresponds to id1
equiv_dict_ncs_copy=get_ncs_equivalents(
region_list=region_list,
bad_region_list=bad_region_list,
region_scattered_points_dict=region_scattered_points_dict,
equiv_dict=equiv_dict,
ncs_copies=tracking_data.input_ncs_info.number_of_operators,
equiv_dict_ncs_copy_dict=equiv_dict_ncs_copy_dict,
out=out)
if dump_files:
from libtbx import easy_pickle
easy_pickle.dump("save.pkl",[duplicate_dict,equiv_dict,region_range_dict,region_centroid_dict,region_scattered_points_dict,region_list,region_volume_dict,new_sorted_by_volume,bad_region_list,equiv_dict_ncs_copy,tracking_data])
print("Dumped save.pkl", file=out)
else:
from libtbx import easy_pickle
[duplicate_dict,equiv_dict,region_range_dict,region_centroid_dict,region_scattered_points_dict,region_list,region_volume_dict,new_sorted_by_volume,bad_region_list,equiv_dict_ncs_copy,tracking_data]=easy_pickle.load("save.pkl")
print("Loaded save.pkl", file=out)
# Group together regions that are ncs-related. Also if one ncs
# copy has 2 or more regions linked together, group the other ones.
# each entry in ncs_group_list is a list of regions for each ncs_copy:
# e.g., [[8], [9, 23], [10, 25], [11, 27], [12, 24], [13, 22], [14, 26]]
# May contain elements that are in bad_region_list (to exclude later)
if not load_saved_files:
ncs_group_list,shared_group_dict=group_ncs_equivalents(params,
split_if_possible=params.segmentation.split_if_possible,
tracking_data=tracking_data,
region_volume_dict=region_volume_dict,
region_list=region_list,
equiv_dict_ncs_copy=equiv_dict_ncs_copy,
out=out)
if dump_files:
from libtbx import easy_pickle
easy_pickle.dump("group_list.pkl",[ncs_group_list,shared_group_dict])
print("Dumped to group_list.pkl", file=out)
else:
from libtbx import easy_pickle
[ncs_group_list,shared_group_dict]=easy_pickle.load("group_list.pkl")
print("Loaded group_list.pkl", file=out)
ncs_group_obj=ncs_group_object(
ncs_group_list=ncs_group_list,
shared_group_dict=shared_group_dict,
ncs_obj=ncs_obj,
crystal_symmetry=tracking_data.crystal_symmetry,
edited_mask=edited_mask,
origin_shift=tracking_data.origin_shift,
co=co,
min_b=min_b,
max_b=max_b,
equiv_dict=equiv_dict,
bad_region_list=bad_region_list,
original_id_from_id=original_id_from_id,
edited_volume_list=edited_volume_list,
region_range_dict=region_range_dict,
region_scattered_points_dict=region_scattered_points_dict,
region_centroid_dict=region_centroid_dict)
return ncs_group_obj,tracking_data,equiv_dict_ncs_copy
def get_center_list(regions,
region_centroid_dict=None):
center_list=[]
for region in regions:
center_list.append(region_centroid_dict[region])
return center_list
def get_average_center(regions,
region_centroid_dict=None):
center_list=get_center_list(regions,region_centroid_dict=region_centroid_dict)
for region in regions:
center_list.append(region_centroid_dict[region])
average_center=deepcopy(center_list[0])
if len(center_list)>1:
for r in center_list[1:]:
for i in range(3):
average_center[i]+=r[i]
for i in range(3):
average_center[i]/=len(center_list)
return average_center
def get_dist(r,s):
dd=0.
for i in range(3):
dd+=(r[i]-s[i])**2
return dd**0.5
def has_intersection(set1,set2):
set1a=single_list(set1)
set2a=single_list(set2)
for x in set1a:
if x in set2a:
return True
return False
def get_scattered_points_list(other_regions,
region_scattered_points_dict=None):
scattered_points_list=flex.vec3_double()
for x in other_regions:
scattered_points_list.extend(region_scattered_points_dict[x])
return scattered_points_list
def get_inter_region_dist_dict(ncs_group_obj=None,
selected_regions=None,target_scattered_points=None):
dd={}
for i in range(len(selected_regions)):
id=selected_regions[i]
if not id in dd: dd[id]={}
test_centers=ncs_group_obj.region_scattered_points_dict[id]
for j in range(i+1,len(selected_regions)):
id1=selected_regions[j]
test_centers1=ncs_group_obj.region_scattered_points_dict[id1]
dist=get_closest_dist(test_centers,test_centers1)
dd[id][id1]=dist
if not id1 in dd: dd[id1]={}
dd[id1][id]=dist
return dd
def get_dist_to_first_dict(ncs_group_obj=None,
selected_regions=None,
inter_region_dist_dict=None,
target_scattered_points=None):
# Get distance to region 0 ( or to target_scattered_points if supplied)
dist_to_first_dict={}
if target_scattered_points:
start_region=0
for x in selected_regions:
dist_to_first_dict[x]=get_closest_dist(
ncs_group_obj.region_scattered_points_dict[x],
target_scattered_points)
else:
start_region=1
x0=selected_regions[0]
dist_to_first_dict[x0]=0
for x in selected_regions[1:]:
dist_to_first_dict[x]=inter_region_dist_dict[x0][x]
changing=True
while changing:
changing=False
for x in selected_regions[start_region:]:
for y in selected_regions[start_region:]:
if x==y: continue
if dist_to_first_dict[y]<dist_to_first_dict[x] and \
inter_region_dist_dict[x][y]<dist_to_first_dict[x]:
dist_to_first_dict[x]=max(
dist_to_first_dict[y],inter_region_dist_dict[x][y])
changing=True
return dist_to_first_dict
def radius_of_gyration_of_vector(xyz):
return (xyz-xyz.mean()).rms_length()
def get_radius_of_gyration(ncs_group_obj=None,
selected_regions=None):
# return radius of gyration of points in selected regions
centers=flex.vec3_double()
for s in selected_regions:
centers.append(ncs_group_obj.region_centroid_dict[s])
centers=centers-centers.mean()
return centers.rms_length()
def get_closest_neighbor_rms(ncs_group_obj=None,selected_regions=None,
target_scattered_points=None,verbose=False,out=sys.stdout):
# return rms closest distance of each region center to lowest_numbered region,
# allowing sequential tracking taking max of inter-region distances
# XXX can't we save some of this for next time?
inter_region_dist_dict=get_inter_region_dist_dict(ncs_group_obj=ncs_group_obj,
selected_regions=selected_regions)
if verbose:
print("Inter-region distance dict:", file=out)
keys=list(inter_region_dist_dict.keys())
keys.sort()
for key in keys:
for key2 in inter_region_dist_dict[key].keys():
print("%s %s : %.1f " %(key,key2,inter_region_dist_dict[key][key2]), file=out)
dist_to_first_dict=get_dist_to_first_dict(ncs_group_obj=ncs_group_obj,
selected_regions=selected_regions,
inter_region_dist_dict=inter_region_dist_dict,
target_scattered_points=target_scattered_points)
if verbose:
print("Distance-to-first dict:", file=out)
keys=list(dist_to_first_dict.keys())
keys.sort()
for key in keys: print("\n %s: %.1f " %(key,dist_to_first_dict[key]), file=out)
if target_scattered_points:
start_region=0 # we are getting dist to target_scattered_points
else:
start_region=1 # we are getting dist to region 0
rms=0.
rms_n=0.
for x in selected_regions[start_region:]:
dist=dist_to_first_dict[x]
rms+=dist**2
rms_n+=1.
if rms_n>1:
rms/=rms_n
rms=rms**0.5
return rms
def get_rms(selected_regions=None,
region_centroid_dict=None):
# return rms distance of each region center from average of all others
rms=0.
rms_n=0.
for x in selected_regions:
other_regions=remove_one_item(selected_regions,item_to_remove=x)
current_center=get_average_center(other_regions,
region_centroid_dict=region_centroid_dict)
test_center=region_centroid_dict[x]
dist=get_dist(current_center,test_center)
rms+=dist**2
rms_n+=1.
if rms_n>1:
rms/=rms_n
return rms**0.5
def single_list(list_of_lists):
single=[]
for x in list_of_lists:
if type(x)==type([1,2,3]):
single+=single_list(x)
else:
single.append(x)
return single
def get_closest_dist(test_center,target_centers):
# make sure we have target_centers=vec3_double and not a list,
# and vec3_double or tuple for test_center
if type(test_center)==type([1,2,3]):
test_center=flex.vec3_double(test_center)
if type(target_centers)==type([1,2,3]):
target_centers=flex.vec3_double(target_centers)
if test_center.size()<1 or target_centers.size()<1: return None
closest_dist=test_center.min_distance_between_any_pair(target_centers)
return closest_dist
def region_lists_have_ncs_overlap(set1,set2,ncs_group_obj=None,cutoff=0):
for id1 in set1:
for id2 in set2:
if id2 in ncs_group_obj.shared_group_dict.get(id1,[]):
return True
return False
def get_effective_radius(ncs_group_obj=None,
target_scattered_points=None,
weight_rad_gyr=None,
selected_regions=None):
sr=deepcopy(selected_regions)
sr.sort()
rad_gyr=get_radius_of_gyration(ncs_group_obj=ncs_group_obj,
selected_regions=sr)
rms=get_closest_neighbor_rms(ncs_group_obj=ncs_group_obj,
target_scattered_points=target_scattered_points,
selected_regions=sr)
max_cell_dim=0.
if ncs_group_obj.max_cell_dim and ncs_group_obj.max_cell_dim > 1.0:
wrg=weight_rad_gyr*(300/ncs_group_obj.max_cell_dim) # have a consistent scale
else:
wrg=weight_rad_gyr
effective_radius=(rms+wrg*rad_gyr)/(1.+wrg)
return effective_radius
def add_neighbors(params,
selected_regions=None,
max_length_of_group=None,
target_scattered_points=None,
tracking_data=None,
equiv_dict_ncs_copy=None,
ncs_group_obj=None,out=sys.stdout):
# Add neighboring regions on to selected_regions.
# Same rules as select_from_seed
selected_regions=single_list(deepcopy(selected_regions))
added_regions=[]
start_dist=get_effective_radius(ncs_group_obj=ncs_group_obj,
target_scattered_points=target_scattered_points,
weight_rad_gyr=params.segmentation.weight_rad_gyr,
selected_regions=selected_regions)
delta_dist=params.segmentation.add_neighbors_dist
max_dist=start_dist+delta_dist
starting_selected_regions=deepcopy(selected_regions)
for x in selected_regions: # delete, add in alternatives one at a time and
# keep all the ok ones
ncs_groups_to_use=get_ncs_related_regions(
ncs_group_obj=ncs_group_obj,
selected_regions=[x],
include_self=False)
for x in ncs_groups_to_use: # try adding from each group
if x in selected_regions+added_regions:
continue
ncs_group=[[x]]
current_scattered_points_list=get_scattered_points_list(selected_regions,
region_scattered_points_dict=ncs_group_obj.region_scattered_points_dict)
for ncs_set in ncs_group: # pick the best ncs_set from this group
if has_intersection(ncs_group_obj.bad_region_list,ncs_set):
continue
dist=get_effective_radius(ncs_group_obj=ncs_group_obj,
target_scattered_points=target_scattered_points,
weight_rad_gyr=params.segmentation.weight_rad_gyr,
selected_regions=selected_regions+ncs_set)
if dist <= max_dist:
added_regions.append(x)
selected_regions=selected_regions+added_regions
dist=get_effective_radius(ncs_group_obj=ncs_group_obj,
target_scattered_points=target_scattered_points,
weight_rad_gyr=params.segmentation.weight_rad_gyr,
selected_regions=selected_regions)
# Identify all the NCS operators required to map final to starting
# equiv_dict_ncs_copy[id][id1]=ncs_copy of id that corresponds to id1
ncs_group=ncs_group_obj.ncs_obj.ncs_groups()[0]
identity_op=ncs_group.identity_op_id()
ncs_ops_used=[identity_op]
for id in selected_regions:
related_regions=get_ncs_related_regions(
ncs_group_obj=ncs_group_obj,
selected_regions=[id],
include_self=False)
for id1 in selected_regions:
if not id1 in related_regions: continue
ncs_copy1=equiv_dict_ncs_copy.get(id,{}).get(id1,None)
ncs_copy2=equiv_dict_ncs_copy.get(id1,{}).get(id,None)
for a in [ncs_copy1,ncs_copy2]:
if a is not None and not a in ncs_ops_used:
ncs_ops_used.append(a)
selected_regions.sort()
ncs_ops_used.sort()
for x in selected_regions:
print("GROUP ",x,":",ncs_group_obj.shared_group_dict.get(x,[]), file=out)
return selected_regions,dist,ncs_ops_used
def select_from_seed(params,
starting_regions,
target_scattered_points=None,
max_length_of_group=None,
ncs_groups_to_use=None,
tracking_data=None,
ncs_group_obj=None):
selected_regions=single_list(deepcopy(starting_regions))
# do not allow any region in ncs_group_obj.bad_region_list
# also do not allow any region that is in an ncs-related group to any region
# already used. Use ncs_group_obj.equiv_dict to identify these.
if not ncs_groups_to_use:
ncs_groups_to_use=ncs_group_obj.ncs_group_list
for ncs_group in ncs_groups_to_use: # try adding from each group
if max_length_of_group is not None and \
len(selected_regions)>=max_length_of_group:
break
best_ncs_set=None
best_dist=None
if has_intersection(ncs_group,selected_regions):
continue
current_scattered_points_list=get_scattered_points_list(selected_regions,
region_scattered_points_dict=ncs_group_obj.region_scattered_points_dict)
if target_scattered_points:
current_scattered_points_list.extend(target_scattered_points)
for ncs_set in ncs_group: # pick the best ncs_set from this group
if has_intersection(ncs_group_obj.bad_region_list,ncs_set): continue
# does any ncs copy of anything in selected_regions actually overlap
# with any member of ncs_set... might be efficient to delete the entire
# ncs_group if any ncs_set overlaps, but could lose some.
if region_lists_have_ncs_overlap(ncs_set,selected_regions,
ncs_group_obj=ncs_group_obj):
continue
dist=get_effective_radius(ncs_group_obj=ncs_group_obj,
target_scattered_points=target_scattered_points,
weight_rad_gyr=params.segmentation.weight_rad_gyr,
selected_regions=selected_regions+ncs_set)
if best_dist is None or dist<best_dist:
best_dist=dist
best_ncs_set=ncs_set
if best_ncs_set is not None:
selected_regions+=best_ncs_set
dist=get_effective_radius(ncs_group_obj=ncs_group_obj,
target_scattered_points=target_scattered_points,
weight_rad_gyr=params.segmentation.weight_rad_gyr,
selected_regions=selected_regions)
return selected_regions,dist
def remove_one_item(input_list,item_to_remove=None):
new_list=[]
for item in input_list:
if item != item_to_remove:
new_list.append(item)
return new_list
def get_ncs_related_regions_specific_list(
ncs_group_obj=None,
target_regions=None,
include_self=False):
all_regions=[]
for target_region in target_regions:
all_regions+=get_ncs_related_regions_specific_target(
ncs_group_obj=ncs_group_obj,
target_region=target_region,
other_regions=remove_one_item(
target_regions,item_to_remove=target_region),
include_self=include_self)
return all_regions
def get_ncs_related_regions_specific_target(
ncs_group_obj=None,
target_region=None,
other_regions=None,
include_self=False):
# similar to get_ncs_related_regions, but find just one ncs group that
# contains x but does not contain any member of other_regions
for ncs_group in ncs_group_obj.ncs_group_list: # might this be the group
ids_in_group=single_list(ncs_group)
if not target_region in ids_in_group: continue # does not contain target
contains_others=False
for other_id in other_regions:
if other_id in ids_in_group:
contains_other=True
break# contains other members
if not contains_others:
# this is the group
if include_self:
return ids_in_group
else:
return remove_one_item(ids_in_group,item_to_remove=target_region)
return []
def get_ncs_related_regions(
ncs_group_obj=None,
selected_regions=None,
include_self=False):
# returns a simple list of region ids
# if include_self then include selected regions and all ncs-related
# otherwise do not include selected regions or anything that might
# overlap with them
ncs_related_regions=[]
if include_self:
for id in selected_regions:
if not id in ncs_related_regions:
ncs_related_regions.append(id)
for ncs_group in ncs_group_obj.ncs_group_list:
ids_in_group=single_list(ncs_group)
if id in ids_in_group: # this group contains this selected id
for i in ids_in_group:
if not i in ncs_related_regions:
ncs_related_regions.append(i)
else:
for id in selected_regions:
found=False
for ncs_group in ncs_group_obj.ncs_group_list:
ids_in_group=single_list(ncs_group)
if id in ids_in_group: # this group contains this selected id
found=True
for i in ids_in_group:
if (not i==id) and (not i in selected_regions) and \
(not i in ncs_related_regions):
ncs_related_regions.append(i)
break # don't look at any more ncs groups
return ncs_related_regions
def all_elements_are_length_one(list_of_elements):
for x in list_of_elements:
if type(x)==type([1,2,3]):
if len(x)!=1: return False
return True
def as_list_of_lists(ll):
new_list=[]
for x in ll:
new_list.append([x])
return new_list
def select_regions_in_au(params,
ncs_group_obj=None,
target_scattered_points=None,
unique_expected_regions=None,
equiv_dict_ncs_copy=None,
tracking_data=None,
out=sys.stdout):
# Choose one region or set of regions from each ncs_group
# up to about unique_expected_regions
# Optimize closeness of centers...
# If target scattered_points is supplied, include them as allowed target
if not ncs_group_obj.ncs_group_list:
return ncs_group_obj,[]
max_length_of_group=max(1,unique_expected_regions*
params.segmentation.max_per_au_ratio)
print("Maximum length of group: %d" %(max_length_of_group), file=out)
if all_elements_are_length_one(ncs_group_obj.ncs_group_list):
# This is where there is no ncs. Basically skipping everything
best_selected_regions=single_list(ncs_group_obj.ncs_group_list)
best_rms=None
else:
#-------------- Find initial set of regions --------------------
# Seed with members of the first NCS group or with the target points
# and find the member of each NCS group that is closest
if target_scattered_points:
starting_regions=[None]
else:
starting_regions=ncs_group_obj.ncs_group_list[0]
best_selected_regions=None
best_rms=None
ok_seeds_examined=0
for starting_region in starting_regions: # NOTE starting_region is a list
if not starting_region and not target_scattered_points:continue
if ok_seeds_examined >= params.segmentation.seeds_to_try:
break # don't bother to keep trying
if starting_region and starting_region in ncs_group_obj.bad_region_list:
continue # do not use
if starting_region: # NOTE: starting_region is a list itself
starting_region_list=[starting_region]
else:
starting_region_list=[]
selected_regions,rms=select_from_seed(params,
starting_region_list,
target_scattered_points=target_scattered_points,
max_length_of_group=max_length_of_group,
tracking_data=tracking_data,
ncs_group_obj=ncs_group_obj)
if not selected_regions:
continue
ok_seeds_examined+=1
if best_rms is None or rms<best_rms:
best_rms=rms
best_selected_regions=selected_regions
print("New best selected: rms: %7.1f: %s " %(
rms,str(selected_regions)), file=out)
if best_rms is not None:
print("Best selected so far: rms: %7.1f: %s " %(
best_rms,str(best_selected_regions)), file=out)
if not best_selected_regions:
print("\nNo NCS regions found ...", file=out)
return ncs_group_obj,[]
# Now we have a first version of best_rms, best_selected_regions
#-------------- END Find initial set of regions --------------------
#-------------- Optimize choice of regions -------------------------
max_tries=10
improving=True
itry=0
while improving and itry<max_tries:
itry+=1
improving=False
previous_selected_regions=deepcopy(best_selected_regions)
previous_selected_regions.sort()
print("\nTry %d for optimizing regions" %(itry), file=out)
# Now see if replacing any regions with alternatives would improve it
for x in previous_selected_regions:
starting_regions=remove_one_item(previous_selected_regions,
item_to_remove=x)
# identify ncs_related regions to x, but not to other members of
# selected_regions
ncs_related_regions=get_ncs_related_regions_specific_list(
ncs_group_obj=ncs_group_obj,
include_self=True,
target_regions=[x])
if not ncs_related_regions: continue
ncs_groups_to_use=[as_list_of_lists(ncs_related_regions)]
new_selected_regions,rms=select_from_seed(params,starting_regions,
target_scattered_points=target_scattered_points,
max_length_of_group=max_length_of_group,
tracking_data=tracking_data,
ncs_groups_to_use=ncs_groups_to_use,
ncs_group_obj=ncs_group_obj)
if not new_selected_regions: continue
if best_rms is None or rms<best_rms:
best_selected_regions=new_selected_regions
best_selected_regions.sort()
best_rms=rms
improving=True
print("Optimized best selected: rms: %7.1f: %s " %(
best_rms,str(best_selected_regions)), file=out)
# Done with this try
selected_regions=best_selected_regions
selected_regions.sort()
if params.map_modification.regions_to_keep:
selected_regions=selected_regions[:params.map_modification.regions_to_keep]
rms=get_closest_neighbor_rms(ncs_group_obj=ncs_group_obj,
selected_regions=selected_regions,verbose=False,out=out)
if params.segmentation.add_neighbors and \
ncs_group_obj.ncs_obj.max_operators()>1:
print("\nAdding neighbor groups...", file=out)
selected_regions,rms,ncs_ops_used=add_neighbors(params,
selected_regions=selected_regions,
max_length_of_group=max_length_of_group,
target_scattered_points=target_scattered_points,
equiv_dict_ncs_copy=equiv_dict_ncs_copy,
tracking_data=tracking_data,
ncs_group_obj=ncs_group_obj,out=out)
else:
ncs_ops_used=None
print("\nFinal selected regions with rms of %6.2f: " %(rms), end=' ', file=out)
for x in selected_regions:
print(x, end=' ', file=out)
if ncs_ops_used:
print("\nNCS operators used: ", end=' ', file=out)
for op in ncs_ops_used: print(op, end=' ', file=out)
print(file=out)
# Save an ncs object containing just the ncs_ops_used
ncs_group_obj.set_ncs_ops_used(ncs_ops_used)
# Identify scattered points for all selected regions:
scattered_points=get_scattered_points_list(selected_regions,
region_scattered_points_dict=ncs_group_obj.region_scattered_points_dict)
# Identify ncs-related regions for all the selected regions
self_and_ncs_related_regions=get_ncs_related_regions(
ncs_group_obj=ncs_group_obj,
selected_regions=selected_regions,
include_self=True)
ncs_related_regions=get_ncs_related_regions(
ncs_group_obj=ncs_group_obj,
selected_regions=selected_regions,
include_self=False)
print("NCS-related regions (not used): %d " %(len(ncs_related_regions)), file=out)
ncs_group_obj.set_selected_regions(selected_regions)
ncs_group_obj.set_self_and_ncs_related_regions(self_and_ncs_related_regions)
ncs_group_obj.set_ncs_related_regions(ncs_related_regions)
return ncs_group_obj,scattered_points
def get_bool_mask_as_int(ncs_group_obj=None,mask_as_int=None,mask_as_bool=None):
if mask_as_int:
mask_as_int=mask_as_int.deep_copy()
else:
mask_as_int=ncs_group_obj.edited_mask.deep_copy()
s = (mask_as_bool==True)
mask_as_int = mask_as_int.set_selected(s,1)
mask_as_int = mask_as_int.set_selected(~s,0)
return mask_as_int
def get_bool_mask_of_regions(ncs_group_obj=None,region_list=None,
expand_size=None):
s = (ncs_group_obj.edited_mask == -1)
if region_list is None: region_list=[]
for id in region_list:
if not expand_size:
s |= (ncs_group_obj.edited_mask==id) # just take this region
else: # expand the size of the regions...use expand_mask which operates
# on the original id numbers and uses the co
bool_region_mask = ncs_group_obj.co.expand_mask(
id_to_expand=ncs_group_obj.original_id_from_id[id],
expand_size=expand_size)
s |= (bool_region_mask== True)
bool_mask = ncs_group_obj.co.expand_mask(id_to_expand=1,expand_size=1) # just to get bool mask
bool_mask = bool_mask.set_selected(s,True)
bool_mask = bool_mask.set_selected(~s,False)
return bool_mask
def create_remaining_mask_and_map(params,
ncs_group_obj=None,
map_data=None,
crystal_symmetry=None,
out=sys.stdout):
if not ncs_group_obj.selected_regions:
print("No regions selected", file=out)
return map_data
# create new remaining_map containing everything except the part that
# has been interpreted (and all points in interpreted NCS-related copies)
bool_all_used=get_bool_mask_of_regions(ncs_group_obj=ncs_group_obj,
region_list=ncs_group_obj.selected_regions+
ncs_group_obj.self_and_ncs_related_regions,
expand_size=params.segmentation.expand_size)
map_data_remaining=map_data.deep_copy()
s=(bool_all_used==True)
map_data_remaining=map_data_remaining.set_selected(s,
params.segmentation.value_outside_mask)
return map_data_remaining
def get_lower(lower_bounds,lower):
new_lower=[]
for i in range(3):
if lower_bounds[i] is None:
new_lower.append(lower[i])
elif lower[i] is None:
new_lower.append(lower_bounds[i])
else:
new_lower.append(min(lower_bounds[i],lower[i]))
return new_lower
def get_upper(upper_bounds,upper):
new_upper=[]
for i in range(3):
if upper_bounds[i] is None:
new_upper.append(upper[i])
elif upper[i] is None:
new_upper.append(upper_bounds[i])
else:
new_upper.append(max(upper_bounds[i],upper[i]))
return new_upper
def get_bounds(ncs_group_obj=None,id=None):
orig_id=ncs_group_obj.original_id_from_id[id]
lower=ncs_group_obj.min_b[orig_id]
upper=ncs_group_obj.max_b[orig_id]
return lower,upper
def get_selected_and_related_regions(params,
ncs_group_obj=None):
# Identify all points in the targeted regions
bool_selected_regions=get_bool_mask_of_regions(ncs_group_obj=ncs_group_obj,
region_list=ncs_group_obj.selected_regions,
expand_size=params.segmentation.expand_size+\
params.segmentation.mask_additional_expand_size)
# and all points in NCS-related copies (to be excluded)
if params.segmentation.exclude_points_in_ncs_copies and (
not params.segmentation.add_neighbors):
bool_ncs_related_mask=get_bool_mask_of_regions(ncs_group_obj=ncs_group_obj,
region_list=ncs_group_obj.ncs_related_regions)
# NOTE: using ncs_related_regions here NOT self_and_ncs_related_regions
else:
bool_ncs_related_mask=None
lower_bounds=[None,None,None]
upper_bounds=[None,None,None]
if ncs_group_obj.selected_regions:
for id in ncs_group_obj.selected_regions:
lower,upper=get_bounds(
ncs_group_obj=ncs_group_obj,id=id)
lower_bounds=get_lower(lower_bounds,lower)
upper_bounds=get_upper(upper_bounds,upper)
return bool_selected_regions,bool_ncs_related_mask,lower_bounds,upper_bounds
def adjust_bounds(params,
lower_bounds,upper_bounds,map_data=None,out=sys.stdout):
# range is lower_bounds to upper_bounds
lower_bounds=list(lower_bounds)
upper_bounds=list(upper_bounds)
if params is None or params.output_files.box_buffer is None:
box_buffer=0
else:
box_buffer=params.output_files.box_buffer
for i in range(3):
if lower_bounds[i] is None: lower_bounds[i]=0
if upper_bounds[i] is None: upper_bounds[i]=0
lower_bounds[i]-=box_buffer
lower_bounds[i]=max(0,lower_bounds[i])
upper_bounds[i]+=box_buffer
upper_bounds[i]=min(map_data.all()[i]-1,upper_bounds[i])
"""
print >>out,"\nRange: X:(%6d,%6d) Y:(%6d,%6d) Z:(%6d,%6d)" %(
lower_bounds[0],upper_bounds[0],
lower_bounds[1],upper_bounds[1],
lower_bounds[2],upper_bounds[2])
"""
return lower_bounds,upper_bounds
def write_region_maps(params,
ncs_group_obj=None,
map_data=None,
tracking_data=None,
remainder_ncs_group_obj=None,
regions_to_skip=None,
out=sys.stdout):
remainder_regions_written=[]
map_files_written=[]
if not ncs_group_obj:
return map_files_written,remainder_regions_written
if not ncs_group_obj.selected_regions:
return map_files_written,remainder_regions_written
for id in ncs_group_obj.selected_regions:
if regions_to_skip and id in regions_to_skip:
print("Skipping remainder region %d (already written out)" %(id), file=out)
continue
print("Writing region %d" %(id), end=' ', file=out)
# dummy atoms representing this region
sites=ncs_group_obj.region_scattered_points_dict[id]
bool_region_mask = ncs_group_obj.co.expand_mask(
id_to_expand=ncs_group_obj.original_id_from_id[id],
expand_size=params.segmentation.expand_size)
s = (bool_region_mask==True)
lower_bounds,upper_bounds=get_bounds(ncs_group_obj=ncs_group_obj,id=id)
if remainder_ncs_group_obj:
for remainder_id in remainder_ncs_group_obj.remainder_id_dict.keys():
if remainder_ncs_group_obj.remainder_id_dict[remainder_id]==id:
remainder_regions_written.append(remainder_id)
sites.extend(
remainder_ncs_group_obj.region_scattered_points_dict[remainder_id])
print("(including remainder region %d)" %(remainder_id), end=' ', file=out)
remainder_bool_region_mask = remainder_ncs_group_obj.co.expand_mask(
id_to_expand=remainder_ncs_group_obj.original_id_from_id[remainder_id],
expand_size=params.segmentation.expand_size)
s|= (remainder_bool_region_mask==True)
lower,upper=get_bounds(
ncs_group_obj=remainder_ncs_group_obj,id=remainder_id)
lower_bounds=get_lower(lower_bounds,lower)
upper_bounds=get_upper(upper_bounds,upper)
region_mask = map_data.deep_copy()
region_mask = region_mask.set_selected(s,1)
region_mask = region_mask.set_selected(~s,0)
local_map_data=map_data.deep_copy()
local_map_data=local_map_data * region_mask.as_double()
# Now cut down the map to the size we want
lower_bounds,upper_bounds=adjust_bounds(params,lower_bounds,upper_bounds,
map_data=map_data,out=out)
box_map,box_crystal_symmetry,\
dummy_smoothed_box_mask_data,dummy_original_box_map_data=cut_out_map(
map_data=local_map_data, \
crystal_symmetry=tracking_data.crystal_symmetry,
min_point=lower_bounds, max_point=upper_bounds,out=out)
if remainder_ncs_group_obj:
text=""
else:
text="_r"
base_file='map%s_%d.ccp4' %(text, id)
base_pdb_file='atoms%s_%d.pdb' %(text, id)
if tracking_data.params.output_files.output_directory:
if not os.path.isdir(tracking_data.params.output_files.output_directory):
os.mkdir(tracking_data.params.output_files.output_directory)
file_name=os.path.join(tracking_data.params.output_files.output_directory,base_file)
pdb_file_name=os.path.join(
tracking_data.params.output_files.output_directory,base_pdb_file)
else:
file_name=base_file
pdb_file_name=base_pdb_file
write_ccp4_map(box_crystal_symmetry,file_name, box_map)
print("to %s" %(file_name), file=out)
map_files_written.append(file_name)
tracking_data.add_output_region_map_info(
file_name=file_name,
crystal_symmetry=box_crystal_symmetry,
origin=box_map.origin(),
all=box_map.all(),
map_id=base_file)
print("Atoms representation written to %s" %(pdb_file_name), file=out)
write_atoms(tracking_data=tracking_data,sites=sites,file_name=pdb_file_name,
out=out)
tracking_data.add_output_region_pdb_info(
file_name=pdb_file_name)
return map_files_written,remainder_regions_written
def get_bounds_from_sites(sites_cart=None,map_data=None,
unit_cell=None):
lower_bounds=[None,None,None]
upper_bounds=[None,None,None]
sites_frac=unit_cell.fractionalize(sites_cart)
nx,ny,nz=map_data.all()
for x_frac in sites_frac:
x=[
int(0.5+nx*x_frac[0]),
int(0.5+ny*x_frac[1]),
int(0.5+nz*x_frac[2])]
if lower_bounds[0] is None or x[0]<lower_bounds[0]: lower_bounds[0]=x[0]
if lower_bounds[1] is None or x[1]<lower_bounds[1]: lower_bounds[1]=x[1]
if lower_bounds[2] is None or x[2]<lower_bounds[2]: lower_bounds[2]=x[2]
if upper_bounds[0] is None or x[0]>upper_bounds[0]: upper_bounds[0]=x[0]
if upper_bounds[1] is None or x[1]>upper_bounds[1]: upper_bounds[1]=x[1]
if upper_bounds[2] is None or x[2]>upper_bounds[2]: upper_bounds[2]=x[2]
return lower_bounds,upper_bounds
def write_output_files(params,
tracking_data=None,
map_data=None,
half_map_data_list=None,
ncs_group_obj=None,
remainder_ncs_group_obj=None,
pdb_hierarchy=None,
removed_ncs=None,
out=sys.stdout):
half_map_data_list_au=[]
if not half_map_data_list: half_map_data_list=[]
if params.output_files.au_output_file_stem:
au_mask_output_file=os.path.join(tracking_data.params.output_files.output_directory,params.output_files.au_output_file_stem+"_mask.ccp4")
au_map_output_file=os.path.join(tracking_data.params.output_files.output_directory,params.output_files.au_output_file_stem+"_map.ccp4")
au_atom_output_file=os.path.join(tracking_data.params.output_files.output_directory,params.output_files.au_output_file_stem+"_atoms.pdb")
else:
au_mask_output_file=None
au_map_output_file=None
au_atom_output_file=None
# Write out pdb file with dummy atoms for the AU to au_atom_output_file
if au_atom_output_file and params.output_files.write_output_maps:
sites=flex.vec3_double()
for id in ncs_group_obj.selected_regions:
sites.extend(ncs_group_obj.region_scattered_points_dict[id])
if remainder_ncs_group_obj:
for id in remainder_ncs_group_obj.selected_regions:
sites.extend(remainder_ncs_group_obj.region_scattered_points_dict[id])
write_atoms(tracking_data=tracking_data,sites=sites,
file_name=au_atom_output_file,out=out)
tracking_data.set_output_ncs_au_pdb_info(file_name=au_atom_output_file)
# Write out mask and map representing one NCS copy and none of
# other NCS copies. Expand the mask to include neighboring points (but
# not those explicitly in other NCS copies
if params.map_modification.soft_mask and params.control.save_box_map_ncs_au:
mask_expand_size=estimate_expand_size(
crystal_symmetry=tracking_data.crystal_symmetry,
map_data=map_data,
expand_target=tracking_data.params.segmentation.mask_expand_ratio*\
tracking_data.params.crystal_info.resolution,
out=out)
params.segmentation.mask_additional_expand_size = max(mask_expand_size,
params.segmentation.mask_additional_expand_size,)
bool_selected_regions,bool_ncs_related_mask,lower_bounds,upper_bounds=\
get_selected_and_related_regions(
params,ncs_group_obj=ncs_group_obj)
if bool_ncs_related_mask is not None:
s_ncs_related = (bool_ncs_related_mask==True)
else:
s_ncs_related = None
# Add in remainder regions if present
if remainder_ncs_group_obj:
bool_remainder_selected_regions,bool_remainder_ncs_related_mask,\
remainder_lower_bounds,remainder_upper_bounds=\
get_selected_and_related_regions(
params,ncs_group_obj=remainder_ncs_group_obj)
lower_bounds=get_lower(lower_bounds,remainder_lower_bounds)
upper_bounds=get_upper(upper_bounds,remainder_upper_bounds)
s_remainder_au = (bool_remainder_selected_regions==True)
bool_selected_regions=bool_selected_regions.set_selected(
s_remainder_au,True)
if s_ncs_related is not None and \
bool_remainder_ncs_related_mask is not None:
s_ncs_related |= (bool_remainder_ncs_related_mask==True)
# Now create NCS mask by eliminating all points in target (expanded) in
# NCS-related copies
if s_ncs_related is not None:
bool_selected_regions=bool_selected_regions.set_selected(
s_ncs_related,False)
if tracking_data.params.map_modification.regions_to_keep is None:
# Identify full (possibly expanded) ncs au starting with what we have
au_mask=get_one_au(tracking_data=tracking_data,
starting_mask=bool_selected_regions,
removed_ncs=removed_ncs,
ncs_obj=ncs_group_obj.ncs_obj,map_data=map_data,out=out)
print("\nExpanding NCS AU if necessary...", file=out)
print("Size of AU mask: %s Current size of AU: %s" %(
au_mask.count(True),bool_selected_regions.count(True)), file=out)
bool_selected_regions=(bool_selected_regions | au_mask)
print("New size of AU mask: %s" %(bool_selected_regions.count(True)), file=out)
sites_cart=get_marked_points_cart(mask_data=bool_selected_regions,
unit_cell=ncs_group_obj.crystal_symmetry.unit_cell(),
every_nth_point=tracking_data.params.segmentation.grid_spacing_for_au,
boundary_radius=tracking_data.params.segmentation.radius)
sites_lower_bounds,sites_upper_bounds=get_bounds_from_sites(
unit_cell=ncs_group_obj.crystal_symmetry.unit_cell(),
sites_cart=sites_cart,map_data=map_data)
print("Original bounds: %5s %5s %5s to %5s %5s %5s" %(
tuple(lower_bounds+upper_bounds)), file=out)
lower_bounds=get_lower(lower_bounds,sites_lower_bounds)
upper_bounds=get_upper(upper_bounds,sites_upper_bounds)
print("Updated bounds: %5s %5s %5s to %5s %5s %5s" %(
tuple(lower_bounds+upper_bounds)), file=out)
lower_bounds,upper_bounds=adjust_bounds(params,lower_bounds,upper_bounds,
map_data=map_data,out=out)
box_ncs_au=params.segmentation.box_ncs_au
if (not box_ncs_au):
print("Using entire input map (box_ncs_au=False)", file=out)
lower_bounds=map_data.origin()
upper_bounds=tuple(matrix.col(map_data.all())+
matrix.col(map_data.origin())-matrix.col((1,1,1)))
print("\nMaking two types of maps for AU of NCS mask and map with "+\
"buffer of %d grid units \nin each direction around AU" %(
params.output_files.box_buffer), file=out)
if params.output_files.write_output_maps:
print("Both types of maps have the same origin and overlay on %s" %(
os.path.join(tracking_data.params.output_files.output_directory,
params.output_files.shifted_map_file)), file=out)
print("\nThe standard maps (%s, %s) have the \noriginal cell dimensions." %(
os.path.join(tracking_data.params.output_files.output_directory,au_mask_output_file),
os.path.join(tracking_data.params.output_files.output_directory,au_map_output_file))+\
"\nThese maps show only the unique (NCS AU) part of the map.", file=out)
print("\nThe cut out box_maps (%s, %s) have \nsmaller cell dimensions." %(
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_mask_file),
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_map_file),) +\
"\nThese maps also show only the unique part of the map and have this"+\
"\nunique part cut out.\n", file=out)
# Write out NCS AU with shifted origin but initial crystal_symmetry
# Mask
mask_data_ncs_au=get_bool_mask_as_int(
ncs_group_obj=ncs_group_obj,mask_as_bool=bool_selected_regions)
if au_mask_output_file and params.output_files.write_output_maps:
# Write out the mask (as int)
write_ccp4_map(tracking_data.crystal_symmetry,
au_mask_output_file,mask_data_ncs_au)
print("Output NCS AU mask: %s" %(au_mask_output_file), file=out)
tracking_data.set_output_ncs_au_mask_info(
file_name=au_mask_output_file,
crystal_symmetry=tracking_data.crystal_symmetry,
origin=mask_data_ncs_au.origin(),
all=mask_data_ncs_au.all())
# Map
map_data_ncs_au=map_data.deep_copy()
s=(bool_selected_regions==True)
mask=map_data.deep_copy()
mask=mask.set_selected(s,1)
mask=mask.set_selected(~s,0)
if params.map_modification.soft_mask:
# buffer and smooth the mask
print("Smoothing mask")
map_data_ncs_au,smoothed_mask_data=apply_soft_mask(map_data=map_data_ncs_au,
mask_data=mask.as_double(),
rad_smooth=tracking_data.params.crystal_info.resolution,
crystal_symmetry=tracking_data.crystal_symmetry,
out=out)
half_map_data_list_au=[]
for hm in half_map_data_list: # apply mask to half maps
hm_data_ncs_au,hm_smoothed_mask_data=apply_soft_mask(
map_data=hm.deep_copy().as_double(),
mask_data=mask.as_double(),
rad_smooth=tracking_data.params.crystal_info.resolution,
crystal_symmetry=tracking_data.crystal_symmetry,
out=out)
half_map_data_list_au.append(hm_data_ncs_au)
elif (box_ncs_au): # usual. If box_ncs_au is False, do not mask
map_data_ncs_au=map_data_ncs_au*mask
one_d=map_data_ncs_au.as_1d()
n_zero=mask.count(0)
n_tot=mask.size()
mean_in_box=one_d.min_max_mean().mean*n_tot/(n_tot-n_zero)
map_data_ncs_au=map_data_ncs_au+(1-mask)*mean_in_box
half_map_data_list_au=[]
for hm in half_map_data_list: # apply mask to half maps
one_d=hm.as_1d()
mean_in_box=one_d.min_max_mean().mean*n_tot/(n_tot-n_zero)
hm_data_ncs_au=hm+(1-mask)*mean_in_box
half_map_data_list_au.append(hm_data_ncs_au)
del one_d,mask
if au_map_output_file and params.output_files.write_output_maps:
# Write out the NCS au of density
write_ccp4_map(tracking_data.crystal_symmetry,au_map_output_file,
map_data_ncs_au)
print("Output NCS AU map: %s" %(au_map_output_file), file=out)
tracking_data.set_output_ncs_au_map_info(
file_name=au_map_output_file,
crystal_symmetry=tracking_data.crystal_symmetry,
origin=map_data_ncs_au.origin(),
all=map_data_ncs_au.all())
# Now box_map of cut out AU
box_mask_ncs_au,box_crystal_symmetry,\
dummy_smoothed_box_mask_data,dummy_original_box_map_data=cut_out_map(
map_data=mask_data_ncs_au.as_double(),
crystal_symmetry=tracking_data.crystal_symmetry,
min_point=lower_bounds, max_point=upper_bounds,out=out)
# Mask
if params.output_files.box_mask_file and params.output_files.write_output_maps:
# write out box_map NCS mask representing one AU of the NCS
write_ccp4_map(
box_crystal_symmetry,
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_mask_file),
box_mask_ncs_au)
print("Output NCS au as box (cut out) mask: %s " %(
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_mask_file)), file=out)
tracking_data.set_output_box_mask_info(
file_name=os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_mask_file),
crystal_symmetry=box_crystal_symmetry,
origin=box_mask_ncs_au.origin(),
all=box_mask_ncs_au.all())
# Map
box_map_ncs_au,box_crystal_symmetry,\
dummy_smoothed_box_mask_data,dummy_original_box_map_data=cut_out_map(
soft_mask=tracking_data.params.map_modification.soft_mask,
resolution=tracking_data.params.crystal_info.resolution,
map_data=map_data_ncs_au.as_double(),
crystal_symmetry=tracking_data.crystal_symmetry,
min_point=lower_bounds, max_point=upper_bounds,out=out)
half_map_data_list_au_box=[]
for hmdlu in half_map_data_list_au:
hm_box_map_ncs_au,dummy_box_crystal_symmetry,\
dummy_smoothed_box_mask_data,dummy_original_box_map_data=cut_out_map(
soft_mask=tracking_data.params.map_modification.soft_mask,
resolution=tracking_data.params.crystal_info.resolution,
map_data=hmdlu.as_double(),
crystal_symmetry=tracking_data.crystal_symmetry,
min_point=lower_bounds, max_point=upper_bounds,out=out)
half_map_data_list_au_box.append(hm_box_map_ncs_au)
if params.control.save_box_map_ncs_au:
tracking_data.set_box_map_ncs_au_map_data(
box_map_ncs_au_crystal_symmetry=box_crystal_symmetry,
box_map_ncs_au_map_data=box_map_ncs_au,
box_map_ncs_au_half_map_data_list=half_map_data_list_au_box,
)
write_ccp4_map(tracking_data.crystal_symmetry,'map_data_ncs_au.ccp4',map_data_ncs_au)
write_ccp4_map(box_crystal_symmetry,'box_map_ncs_au.ccp4',box_map_ncs_au)
if params.output_files.box_map_file:
# write out NCS map as box_map (cut out region of map enclosed in box_mask)
if params.output_files.write_output_maps:
write_ccp4_map(box_crystal_symmetry,
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_map_file),
box_map_ncs_au)
print("Output NCS au as box (cut out) map: %s " %(
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_map_file)), file=out)
tracking_data.set_output_box_map_info(
file_name=os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_map_file),
crystal_symmetry=box_crystal_symmetry,
origin=box_map_ncs_au.origin(),
all=box_map_ncs_au.all())
# Write out all the selected regions
if params.output_files.write_output_maps:
print("\nWriting out region maps. "+\
"These superimpose on the NCS AU map \nand "+\
"mask %s,%s\n" %(
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_map_file),
os.path.join(tracking_data.params.output_files.output_directory,params.output_files.box_mask_file),), file=out)
map_files_written,remainder_regions_written=write_region_maps(params,
map_data=map_data,
tracking_data=tracking_data,
ncs_group_obj=ncs_group_obj,
remainder_ncs_group_obj=remainder_ncs_group_obj,
out=out)
# and pick up the remainder regions not already written
remainder_map_files_written,dummy_remainder=write_region_maps(params,
map_data=map_data,
tracking_data=tracking_data,
ncs_group_obj=remainder_ncs_group_obj,
regions_to_skip=remainder_regions_written,
out=out)
map_files_written+=remainder_map_files_written
else:
map_files_written=[]
return map_files_written
def write_intermediate_maps(params,
map_data=None,
map_data_remaining=None,
ncs_group_obj=None,
tracking_data=None,
out=sys.stdout):
if map_data_remaining and params.output_files.remainder_map_file:
write_ccp4_map(
tracking_data.crystal_symmetry,params.output_files.remainder_map_file,
map_data_remaining)
print("Wrote output remainder map to %s" %(
params.output_files.remainder_map_file), file=out)
if params.segmentation.write_all_regions:
for id in ncs_group_obj.selected_regions:
region_mask=ncs_group_obj.edited_mask.deep_copy()
s = (ncs_group_obj.edited_mask == -1)
s |= (ncs_group_obj.edited_mask==id)
region_mask = region_mask.set_selected(s,1)
region_mask = region_mask.set_selected(~s,0)
write_ccp4_map(tracking_data.crystal_symmetry,
'mask_%d.ccp4' %id, region_mask)
print("Wrote output mask for region %d to %s" %(id,
"mask_%d.ccp4" %(id)), file=out)
def iterate_search(params,
map_data_remaining=None,
map_data=None,
ncs_obj=None,
ncs_group_obj=None,
scattered_points=None,
tracking_data=None,
out=sys.stdout):
# Write out intermediate maps if desired
if params.output_files.write_intermediate_maps:
write_intermediate_maps(params,
map_data=map_data,
map_data_remaining=map_data_remaining,
ncs_group_obj=ncs_group_obj,
tracking_data=tracking_data,
out=out)
new_params=deepcopy(params)
new_params.segmentation.iterate_with_remainder=False
new_params.segmentation.density_threshold=None
new_params.output_files.write_output_maps=False
new_params.output_files.output_info_file=None
if params.output_files.write_intermediate_maps:
new_params.output_files.au_output_file_stem=\
params.output_files.au_output_file_stem+"_cycle_2"
else:
new_params.output_files.au_output_file_stem=None
fraction=params.segmentation.iteration_fraction
if tracking_data.n_residues:
new_n_residues=int(tracking_data.n_residues*fraction)
new_solvent_fraction=max(0.001,min(0.999,
1- (1-tracking_data.solvent_fraction)*fraction))
new_tracking_data=deepcopy(tracking_data)
if new_tracking_data.n_residues:
new_tracking_data.set_n_residues(new_n_residues)
new_tracking_data.set_solvent_fraction(new_solvent_fraction)
new_tracking_data.set_origin_shift() # sets it to zero
new_tracking_data.params.segmentation.starting_density_threshold=new_params.segmentation.starting_density_threshold # this is new
print("\nIterating with remainder density", file=out)
# NOTE: do not include pdb_hierarchy here unless you deep_copy it
remainder_ncs_group_obj,dummy_remainder,remainder_tracking_data=run(
None,params=new_params,
map_data=map_data_remaining,
ncs_obj=ncs_obj,
target_scattered_points=scattered_points,
tracking_data=new_tracking_data,
is_iteration=True,
out=out)
if not remainder_ncs_group_obj: # Nothing to do
return None
# Combine the results to get remainder_id_dict
# remainder_id_dict[id_remainder]=id_nearby
remainder_ncs_group_obj=combine_with_iteration(params,
map_data=map_data,
crystal_symmetry=tracking_data.crystal_symmetry,
ncs_group_obj=ncs_group_obj,
remainder_ncs_group_obj=remainder_ncs_group_obj,
out=out)
return remainder_ncs_group_obj
def bounds_overlap(lower=None,upper=None,
other_lower=None,other_upper=None,tol=1):
for i in range(3):
if upper[i]+tol<other_lower[i]: return False
if other_upper[i]+tol<lower[i]: return False
return True
def combine_with_iteration(params,
map_data=None,
crystal_symmetry=None,
ncs_group_obj=None,
remainder_ncs_group_obj=None,
out=sys.stdout):
if not ncs_group_obj.selected_regions or not remainder_ncs_group_obj \
or not remainder_ncs_group_obj.selected_regions:
return None
# see if any regions in ncs_obj overlap with remainder_ncs_group_obj...
# If so, combine
remainder_id_dict={}
for id_remainder in remainder_ncs_group_obj.selected_regions:
best_id=None
best_overlaps=None
remainder_centers=\
remainder_ncs_group_obj.region_scattered_points_dict[id_remainder]
# figure out typical distance between scattered_points...
touching_dist=get_touching_dist(remainder_centers)
# Notice bounds of remainder region:
r_lower,r_upper=get_bounds(
ncs_group_obj=remainder_ncs_group_obj,id=id_remainder)
for id in ncs_group_obj.selected_regions:
# Skip if not likely to be very close...
lower,upper=get_bounds(ncs_group_obj=ncs_group_obj,id=id)
if not bounds_overlap(lower=lower,upper=upper,
other_lower=r_lower,other_upper=r_upper):
continue
test_centers=ncs_group_obj.region_scattered_points_dict[id]
dist=get_closest_dist(test_centers,remainder_centers)
if touching_dist is not None and dist>touching_dist:
continue
bool_region_mask = ncs_group_obj.co.expand_mask(
id_to_expand=ncs_group_obj.original_id_from_id[id],
expand_size=params.segmentation.expand_size+1) # just touching
s = (bool_region_mask== True)
s &= (remainder_ncs_group_obj.edited_mask==id_remainder)
overlaps=s.count(True)
if best_overlaps is None or overlaps>best_overlaps:
best_overlaps=overlaps
best_id=id
if best_overlaps:
print("\nCombining remainder id %d with original id %d (overlaps=%d)" %(
id_remainder,best_id,best_overlaps), file=out)
remainder_id_dict[id_remainder]=best_id
remainder_ncs_group_obj.remainder_id_dict=remainder_id_dict
return remainder_ncs_group_obj
def get_touching_dist(centers,default=100.,min_dist=8.):
mean_dist=0.
mean_dist_n=0.
nskip=max(1,len(centers)//10) # try to get 10
for i in range(0,len(centers),nskip):
if i==0:
target=centers[1:]
elif i==len(centers)-1:
target=centers[:-1]
else:
target=centers[:i]
target.extend(centers[i+1:])
other=centers[i:i+1]
if not target or not other: continue
dist=get_closest_dist(target,other)
if dist is not None:
mean_dist+=dist
mean_dist_n+=1.
if mean_dist_n>0:
return max(min_dist,2.0*mean_dist/mean_dist_n)
else:
return default
def get_grid_units(map_data=None,crystal_symmetry=None,radius=None,
out=sys.stdout):
N_ = map_data.all()
sx,sy,sz= 1/N_[0], 1/N_[1], 1/N_[2]
sx_cart,sy_cart,sz_cart=crystal_symmetry.unit_cell().orthogonalize(
[sx,sy,sz])
grid_spacing=(sx_cart+sy_cart+sz_cart)/3.
grid_units=int(radius/grid_spacing)
min_cell_grid_units=min(N_[0], N_[1], N_[2])
grid_units=max(1,min(grid_units,int(min_cell_grid_units/3)))
print("Grid units representing %7.1f A will be %d" %(
radius,grid_units), file=out)
return grid_units
def cut_out_map(map_data=None, crystal_symmetry=None,
soft_mask=None,soft_mask_radius=None,resolution=None,
shift_origin=None,
min_point=None,max_point=None,out=sys.stdout):
from cctbx import uctbx
from cctbx import maptbx
na = map_data.all() # tuple with dimensions
for i in range(3):
assert min_point[i] >= 0
assert max_point[i] <= na[i]
new_map_data = maptbx.copy(map_data, tuple(min_point), tuple(max_point))
# NOTE: end point of map is max_point, so size of map (new all()) is
# (max_point-min_point+ (1,1,1))
# shrink unit cell, angles are the same
# NOTE 2: the origin of output map will be min_point (not 0,0,0).
shrunk_uc = []
for i in range(3):
shrunk_uc.append(
crystal_symmetry.unit_cell().parameters()[i]*new_map_data.all()[i]/na[i] )
uc_params=crystal_symmetry.unit_cell().parameters()
new_unit_cell_box = uctbx.unit_cell(
parameters=(shrunk_uc[0],shrunk_uc[1],shrunk_uc[2],
uc_params[3],uc_params[4],uc_params[5]))
new_crystal_symmetry=crystal.symmetry(
unit_cell=new_unit_cell_box,space_group='p1')
if soft_mask:
if soft_mask_radius is None:
soft_mask_radius=resolution
assert soft_mask_radius is not None
original_map_data=new_map_data.deep_copy()
new_map_data,smoothed_mask_data=set_up_and_apply_soft_mask(
map_data=new_map_data,
shift_origin=shift_origin,
crystal_symmetry=new_crystal_symmetry,
resolution=resolution,
radius=soft_mask_radius,out=out)
else:
original_map_data=None
smoothed_mask_data=None
return new_map_data, new_crystal_symmetry,\
smoothed_mask_data,original_map_data
def set_up_and_apply_soft_mask(map_data=None,shift_origin=None,
crystal_symmetry=None,resolution=None,
radius=None,out=sys.stdout):
acc=map_data.accessor()
map_data = map_data.shift_origin()
new_acc=map_data.accessor()
# Add soft boundary to mean around outside of mask
# grid_units is how many grid units are about equal to soft_mask_radius
grid_units=get_grid_units(map_data=map_data,
crystal_symmetry=crystal_symmetry,radius=radius,out=out)
grid_units=int(0.5+0.5*grid_units)
from cctbx import maptbx
zero_boundary_map=maptbx.zero_boundary_box_map(
map_data,grid_units).result()
# this map is zero's around the edge and 1 in the middle
# multiply zero_boundary_map--smoothed & new_map_data and return
print("Applying soft mask to boundary of cut out map", file=out)
new_map_data,smoothed_mask_data=apply_soft_mask(map_data=map_data,
mask_data=zero_boundary_map,
rad_smooth=resolution,
crystal_symmetry=crystal_symmetry,
out=out)
if new_acc != acc:
new_map_data.reshape(acc)
smoothed_mask_data.reshape(acc)
return new_map_data,smoothed_mask_data
def apply_shift_to_pdb_hierarchy(
origin_shift=None,
crystal_symmetry=None,
pdb_hierarchy=None,out=sys.stdout):
if origin_shift is not None:
sites_cart=pdb_hierarchy.atoms().extract_xyz()
sites_cart_shifted=sites_cart+\
flex.vec3_double(sites_cart.size(), origin_shift)
pdb_hierarchy.atoms().set_xyz(sites_cart_shifted)
return pdb_hierarchy
def apply_origin_shift(origin_shift=None,
ncs_object=None,
shifted_ncs_object=None,
pdb_hierarchy=None,
target_hierarchy=None,
map_data=None,
shifted_map_file=None,
shifted_pdb_file=None,
shifted_ncs_file=None,
tracking_data=None,
sharpening_target_pdb_inp=None,
out=sys.stdout):
if shifted_map_file:
write_ccp4_map(tracking_data.crystal_symmetry,
shifted_map_file,
map_data)
print("Wrote shifted map to %s" %(
shifted_map_file), file=out)
tracking_data.set_shifted_map_info(file_name=
shifted_map_file,
crystal_symmetry=tracking_data.crystal_symmetry,
origin=map_data.origin(),
all=map_data.all())
if origin_shift: # Note origin shift does not change crystal_symmetry
if pdb_hierarchy:
pdb_hierarchy=apply_shift_to_pdb_hierarchy(
origin_shift=origin_shift,
crystal_symmetry=tracking_data.crystal_symmetry,
pdb_hierarchy=pdb_hierarchy,
out=out)
if sharpening_target_pdb_inp:
sharpening_target_pdb_inp=apply_shift_to_pdb_hierarchy(
origin_shift=origin_shift,
crystal_symmetry=tracking_data.crystal_symmetry,
pdb_hierarchy=sharpening_target_pdb_inp.construct_hierarchy(),
out=out).as_pdb_input()
if target_hierarchy:
target_hierarchy=apply_shift_to_pdb_hierarchy(
origin_shift=origin_shift,
crystal_symmetry=tracking_data.crystal_symmetry,
pdb_hierarchy=target_hierarchy,
out=out)
from scitbx.math import matrix
if ncs_object and not shifted_ncs_object:
shfted_ncs_object=ncs_object.coordinate_offset(
coordinate_offset=matrix.col(origin_shift))
if shifted_pdb_file and pdb_hierarchy:
import iotbx.pdb
f=open(shifted_pdb_file,'w')
print(iotbx.pdb.format_cryst1_record(
crystal_symmetry=tracking_data.crystal_symmetry), file=f)
print(pdb_hierarchy.as_pdb_string(), file=f)
f.close()
print("Wrote shifted pdb file to %s" %(
shifted_pdb_file), file=out)
tracking_data.set_shifted_pdb_info(file_name=shifted_pdb_file,
n_residues=pdb_hierarchy.overall_counts().n_residues)
if shifted_ncs_file and shifted_ncs_object:
shifted_ncs_object.format_all_for_group_specification(
file_name=shifted_ncs_file)
print("Wrote %s NCS operators for shifted map to %s" %(
shifted_ncs_object.max_operators(),
shifted_ncs_file), file=out)
if tracking_data.input_ncs_info.has_updated_operators():
print("NOTE: these may include additional operators added to fill the cell"+\
" or\nhave fewer operators if not all applied.", file=out)
tracking_data.set_shifted_ncs_info(file_name=shifted_ncs_file,
number_of_operators=shifted_ncs_object.max_operators(),
is_helical_symmetry=tracking_data.input_ncs_info.is_helical_symmetry)
tracking_data.shifted_ncs_info.show_summary(out=out)
return shifted_ncs_object,pdb_hierarchy,target_hierarchy,tracking_data,\
sharpening_target_pdb_inp
def restore_pdb(params,tracking_data=None,out=sys.stdout):
if not params.output_files.restored_pdb:
params.output_files.restored_pdb=\
params.input_files.pdb_to_restore[:-4]+"_restored.pdb"
print("Shifting origin of %s and writing to %s" %(
params.input_files.pdb_to_restore,
params.output_files.restored_pdb), file=out)
os=tracking_data.origin_shift
origin_shift=(-os[0],-os[1],-os[2])
print("Origin shift will be: %.1f %.1f %.1f "%(origin_shift), file=out)
import iotbx.pdb
pdb_inp = iotbx.pdb.input(file_name=params.input_files.pdb_to_restore)
pdb_hierarchy = pdb_inp.construct_hierarchy()
pdb_hierarchy=apply_shift_to_pdb_hierarchy(
origin_shift=origin_shift,
crystal_symmetry=tracking_data.crystal_symmetry,
pdb_hierarchy=pdb_hierarchy,
out=out)
f=open(params.output_files.restored_pdb,'w')
print(iotbx.pdb.format_cryst1_record(
crystal_symmetry=tracking_data.crystal_symmetry), file=f)
print(pdb_hierarchy.as_pdb_string(), file=f)
f.close()
print("Wrote restored pdb file to %s" %(
params.output_files.restored_pdb), file=out)
def find_threshold_in_map(target_points=None,
map_data=None,
iter_max=10):
map_1d=map_data.as_1d()
map_mean=map_1d.min_max_mean().mean
map_max=map_1d.min_max_mean().max
map_min=map_1d.min_max_mean().min
cutoff=map_mean
low=map_min
high=map_max
for iter in range(iter_max):
s = (map_1d >cutoff)
n_cutoff=s.count(True)
if n_cutoff == target_points:
return cutoff
elif n_cutoff < target_points: # lower it
high=cutoff
cutoff=0.5*(cutoff+low)
else: # raise it
low=cutoff
cutoff=0.5*(cutoff+high)
return cutoff
def remove_points(mask,remove_points=None):
keep_points=(remove_points==False)
new_mask=(mask & keep_points)
return new_mask
def get_ncs_sites_cart(sites_cart=None,ncs_id=None,
ncs_obj=None, unit_cell=None, ncs_in_cell_only=True):
ncs_sites_cart=flex.vec3_double()
if not ncs_obj or not ncs_obj.ncs_groups() or not ncs_obj.ncs_groups()[0] or \
not ncs_obj.ncs_groups()[0].translations_orth():
return ncs_sites_cart
# identify ncs-related points
ncs_group=ncs_obj.ncs_groups()[0]
identity_op=ncs_group.identity_op_id()
ncs_sites_cart=flex.vec3_double()
for xyz_cart in sites_cart:
for i0 in range(len(ncs_group.translations_orth())):
if i0==identity_op: continue
if ncs_id is not None and i0!=ncs_id: continue
r=ncs_group.rota_matrices_inv()[i0] # inverse maps pos 0 on to pos i
t=ncs_group.translations_orth_inv()[i0]
new_xyz_cart=r * matrix.col(xyz_cart) + t
ncs_sites_cart.append(new_xyz_cart)
if ncs_in_cell_only:
new_sites_cart=flex.vec3_double()
ncs_sites_frac=unit_cell.fractionalize(ncs_sites_cart)
for site_frac,site_cart in zip(ncs_sites_frac,ncs_sites_cart):
if site_frac[0]>=0 and site_frac[0]<=1 and \
site_frac[1]>=0 and site_frac[1]<=1 and \
site_frac[2]>=0 and site_frac[2]<=1:
new_sites_cart.append(site_cart)
ncs_sites_cart=new_sites_cart
return ncs_sites_cart
def get_ncs_mask(map_data=None,unit_cell=None,ncs_object=None,
starting_mask=None,radius=None,expand_radius=None,overall_mask=None,
every_nth_point=None):
assert every_nth_point is not None
if not expand_radius: expand_radius=2.*radius
working_au_mask=starting_mask.deep_copy()
working_ncs_mask=mask_from_sites_and_map( # empty ncs mask
map_data=map_data,unit_cell=unit_cell,
sites_cart=flex.vec3_double(),radius=radius,overall_mask=overall_mask)
au_points_last=working_au_mask.count(True)
ncs_points_last=working_ncs_mask.count(True)
max_tries=10000
for ii in range(max_tries): # just a big number; should take just a few
# Find all points in au (sample every_nth_point in grid)
au_sites_cart=get_marked_points_cart(mask_data=working_au_mask,
unit_cell=unit_cell,every_nth_point=every_nth_point,
boundary_radius=radius)
# Find all points ncs-related to marked point in mask
ncs_sites_cart=get_ncs_sites_cart(sites_cart=au_sites_cart,
ncs_obj=ncs_object,unit_cell=unit_cell,ncs_in_cell_only=True)
# Expand au slightly with all points near to au_sites_cart
new_au_mask=mask_from_sites_and_map(
map_data=map_data,unit_cell=unit_cell,
sites_cart=au_sites_cart,radius=radius,overall_mask=overall_mask)
working_au_mask=(working_au_mask | new_au_mask) # add on to existing
keep_points=(working_ncs_mask==False) # cross off those in ncs
working_au_mask=(working_au_mask & keep_points)
# mark ncs au with all points not in au that are close to ncs_sites_cart
new_ncs_mask=mask_from_sites_and_map(
map_data=map_data,unit_cell=unit_cell,
sites_cart=ncs_sites_cart,radius=radius,overall_mask=overall_mask)
keep_points=(working_au_mask==False) # cross off those in au
new_ncs_mask=(new_ncs_mask & keep_points)
working_ncs_mask=(new_ncs_mask | working_ncs_mask) # add on to existing
au_points=working_au_mask.count(True)
ncs_points=working_ncs_mask.count(True)
if au_points==au_points_last and ncs_points==ncs_points_last:
break
au_points_last=au_points
ncs_points_last=ncs_points
# Now expand the au and repeat
working_au_mask=mask_from_sites_and_map(
map_data=map_data,unit_cell=unit_cell,
sites_cart=au_sites_cart,radius=expand_radius,overall_mask=overall_mask)
keep_points=(working_ncs_mask==False) # cross off those in ncs
working_au_mask=(working_au_mask & keep_points)
return working_au_mask,working_ncs_mask
def renormalize_map_data(
map_data=None,solvent_fraction=None):
sd=max(0.0001,map_data.sample_standard_deviation())
if solvent_fraction >= 10.: solvent_fraction=solvent_fraction/100.
assert solvent_fraction > 0 and solvent_fraction < 1
scaled_sd=sd/(1-solvent_fraction)**0.5
map_data=(map_data-map_data.as_1d().min_max_mean().mean)/scaled_sd
return map_data
def mask_from_sites_and_map(
map_data=None,unit_cell=None,
sites_cart=None,radius=None,overall_mask=None):
assert radius is not None
from cctbx import maptbx
sel = maptbx.grid_indices_around_sites(
unit_cell = unit_cell,
fft_n_real = map_data.focus(),
fft_m_real = map_data.all(),
sites_cart = sites_cart,
site_radii = flex.double(sites_cart.size(), radius))
map_data_1d=map_data.as_1d()
mask=(map_data_1d==0 and map_data_1d==1) # 1D bool array all False
mask.set_selected(sel, True) # mark points around sites
mask.reshape(map_data.accessor())
if overall_mask:
assert overall_mask.all()==mask.all()
mask=(mask & overall_mask)
return mask
def set_radius(unit_cell=None,map_data=None,every_nth_point=None):
# Set radius so that radius will capture all points on grid if sampled
# on every_nth_point
a,b,c = unit_cell.parameters()[:3]
nx,ny,nz=map_data.all()
# furthest possible minimum distance between grid points
max_diagonal_between_sampled=every_nth_point*(
(a/nx)**2+(b/ny)**2+(c/nz)**2)**0.5
radius=max_diagonal_between_sampled*0.55 # big enough to cover everything
return radius
def get_marked_points_cart(mask_data=None,unit_cell=None,
every_nth_point=3,boundary_radius=None):
# return list of cartesian coordinates of grid points that are marked
# only sample every every_nth_point in each direction...
assert mask_data.origin() == (0,0,0)
nx,ny,nz=mask_data.all()
if boundary_radius:
# How far from edges shall we stay:
grid_frac=(1./nx,1./ny,1./nz)
grid_orth=unit_cell.orthogonalize(grid_frac)
boundary_grid_points=0
for go in grid_orth:
bgp=int(0.99+boundary_radius/go)
boundary_grid_points=max(boundary_grid_points,bgp)
else:
boundary_grid_points=0
marked_points=maptbx.marked_grid_points(
map_data=mask_data,
every_nth_point=every_nth_point).result()
sites_frac=flex.vec3_double()
boundary_points_skipped=0
for grid_point in marked_points:
if boundary_grid_points:
if \
grid_point[0]<boundary_grid_points or \
grid_point[0]>nx-boundary_grid_points or \
grid_point[1]<boundary_grid_points or \
grid_point[1]>ny-boundary_grid_points or \
grid_point[2]<boundary_grid_points or \
grid_point[2]>nz-boundary_grid_points: # XXX was typo previously
boundary_points_skipped+=1
continue
sites_frac.append(
(grid_point[0]/nx,
grid_point[1]/ny,
grid_point[2]/nz))
sites_cart=unit_cell.orthogonalize(sites_frac)
return sites_cart
def get_overall_mask(
map_data=None,
mask_threshold=None,
fraction_of_max_mask_threshold=None,
mask_padding_fraction=None,
solvent_fraction=None,
crystal_symmetry=None,
radius=None,
resolution=None,
d_max=100000.,
out=sys.stdout):
# Make a local SD map from our map-data
from cctbx.maptbx import crystal_gridding
from cctbx import sgtbx
cg=crystal_gridding(
unit_cell=crystal_symmetry.unit_cell(),
space_group_info=sgtbx.space_group_info(number=1), # Always
pre_determined_n_real=map_data.all())
if not resolution:
from cctbx.maptbx import d_min_from_map
resolution=d_min_from_map(
map_data,crystal_symmetry.unit_cell(), resolution_factor=1./4.)
print("\nEstimated resolution of map: %6.1f A\n" %(
resolution), file=out)
if radius:
smoothing_radius=2.*radius
else:
smoothing_radius=2.*resolution
from mmtbx.command_line.map_to_structure_factors import run as map_to_sf
args=['d_min=None','box=True']
from libtbx.utils import null_out
map_coeffs=map_to_sf(args=args,
space_group_number=1, # always p1 cell for this
ccp4_map=make_ccp4_map(map_data,crystal_symmetry.unit_cell()),
return_as_miller_arrays=True,nohl=True,out=null_out())
if not map_coeffs:
raise Sorry("No map coeffs obtained")
map_coeffs=map_coeffs.resolution_filter(d_min=resolution,d_max=d_max)
complete_set = map_coeffs.complete_set()
stol = flex.sqrt(complete_set.sin_theta_over_lambda_sq().data())
import math
w = 4 * stol * math.pi * smoothing_radius
sphere_reciprocal = 3 * (flex.sin(w) - w * flex.cos(w))/flex.pow(w, 3)
try:
temp = complete_set.structure_factors_from_map(
flex.pow2(map_data-map_data.as_1d().min_max_mean().mean))
except Exception as e:
print(e, file=out)
raise Sorry("The sampling of the map appears to be too low for a "+
"\nresolution of %s. Try using a larger value for resolution" %(
resolution))
fourier_coeff=complete_set.array(data=temp.data()*sphere_reciprocal)
sd_map=fourier_coeff.fft_map(
crystal_gridding=cg,
).apply_volume_scaling().real_map_unpadded()
assert sd_map.all()==map_data.all()
# now use sd_map
# First mask out the map based on threshold
mm=sd_map.as_1d().min_max_mean()
max_in_sd_map=mm.max
mean_in_map=mm.mean
min_in_map=mm.min
print("Highest value in SD map is %7.2f. Mean is %7.2f . Lowest is %7.2f " %(
max_in_sd_map,
mean_in_map,
min_in_map), file=out)
if fraction_of_max_mask_threshold:
mask_threshold=fraction_of_max_mask_threshold*max_in_sd_map
print("Using fraction of max as threshold: %.3f " %(
fraction_of_max_mask_threshold), \
"which is threshold of %.3f" %(mask_threshold), file=out)
if mask_padding_fraction:
# Adjust threshold to increase by mask_padding_fraction, proportional
# to fraction available
overall_mask=(sd_map>= mask_threshold)
current_above_threshold=overall_mask.count(True)/overall_mask.size()
# current+(1-current)*pad
additional_padding=(1-current_above_threshold)*mask_padding_fraction
target_above_threshold=min(
0.99,current_above_threshold+additional_padding)
print("Target with padding of %.2f will be %.2f" %(
mask_padding_fraction,target_above_threshold), file=out)
solvent_fraction=(1-target_above_threshold)
mask_threshold=None
if mask_threshold:
print("Cutoff for mask will be input threshold", file=out)
threshold=mask_threshold
else: # guess based on solvent_fraction
if solvent_fraction is None:
print("Guessing solvent fraction of 0.9", file=out)
solvent_fraction=0.9 # just guess
threshold=find_threshold_in_map(target_points=int(
(1.-solvent_fraction)*sd_map.size()),
map_data=sd_map)
print("Cutoff will be threshold marking about %7.1f%% of cell" %(
100.*(1.-solvent_fraction)), file=out)
overall_mask=(sd_map>= threshold)
print("Model region of map "+\
"(density above %7.3f )" %( threshold) +" includes %7.1f%% of map" %(
100.*overall_mask.count(True)/overall_mask.size()), file=out)
return overall_mask,max_in_sd_map,sd_map
def get_skew(data=None):
mean=data.min_max_mean().mean
sd=data.standard_deviation_of_the_sample()
x=data-mean
return (x**3).min_max_mean().mean/sd**3
def get_kurtosis(data=None):
mean=data.min_max_mean().mean
sd=data.standard_deviation_of_the_sample()
x=data-mean
return (x**4).min_max_mean().mean/sd**4
def score_map(map_data=None,
sharpening_info_obj=None,
solvent_fraction=None,
fraction_occupied=None,
wrapping=None,
sa_percent=None,
region_weight=None,
max_regions_to_test=None,
scale_region_weight=False,
out=sys.stdout):
if sharpening_info_obj:
solvent_fraction=sharpening_info_obj.solvent_fraction
wrapping=sharpening_info_obj.wrapping
fraction_occupied=sharpening_info_obj.fraction_occupied
sa_percent=sharpening_info_obj.sa_percent
region_weight=sharpening_info_obj.region_weight
max_regions_to_test=sharpening_info_obj.max_regions_to_test
else:
sharpening_info_obj=sharpening_info()
if solvent_fraction is None: # skip SA score
sharpening_info_obj.adjusted_sa=0.
assert sharpening_info_obj.sharpening_target=='kurtosis'
else: # usual
map_data=renormalize_map_data(
map_data=map_data,solvent_fraction=solvent_fraction)
target_in_all_regions=map_data.size()*fraction_occupied*(1-solvent_fraction)
print("\nTarget number of points in all regions: %.0f" %(
target_in_all_regions), file=out)
threshold=find_threshold_in_map(target_points=int(
target_in_all_regions),map_data=map_data)
print("Cutoff will be threshold of %7.2f marking %7.1f%% of cell" %(
threshold,100.*(1.-solvent_fraction)), file=out)
co,sorted_by_volume,min_b,max_b=get_co(
map_data=map_data.deep_copy(),
threshold=threshold,wrapping=wrapping)
if len(sorted_by_volume)<2:
return sharpening_info_obj# skip it, nothing to do
target_sum= sa_percent* target_in_all_regions*0.01
print("Points for %.1f percent of target in all regions: %.1f" %(
sa_percent,target_sum), file=out)
cntr=0
sum_v=0.
sum_new_v=0.
for p in sorted_by_volume[1:max_regions_to_test+2]:
cntr+=1
v,i=p
sum_v+=v
bool_expanded=co.expand_mask(id_to_expand=i,expand_size=1)
new_v=bool_expanded.count(True)-v
sum_new_v+=new_v
sa_ratio=new_v/v
if sum_v>=target_sum: break
sa_ratio=sum_new_v/max(1.,sum_v) # ratio of SA to volume
regions=len(sorted_by_volume[1:])
normalized_regions=regions/max(1,target_in_all_regions)
skew=get_skew(map_data.as_1d())
if scale_region_weight:
solvent_fraction_std=0.85 # typical value, ends up as scale on weight
region_weight_scale=(1.-solvent_fraction)/(1.-solvent_fraction_std)
region_weight_use=region_weight*region_weight_scale
else:
region_weight_use=region_weight
sharpening_info_obj.adjusted_sa=\
sa_ratio - region_weight_use*normalized_regions
sharpening_info_obj.sa_ratio=sa_ratio
sharpening_info_obj.normalized_regions=normalized_regions
sharpening_info_obj.kurtosis=get_kurtosis(map_data.as_1d())
if sharpening_info_obj.sharpening_target=='kurtosis':
sharpening_info_obj.score=sharpening_info_obj.kurtosis
else:
sharpening_info_obj.score=sharpening_info_obj.adjusted_sa
return sharpening_info_obj
def sharpen_map_with_si(sharpening_info_obj=None,
f_array_normalized=None,
f_array=None,phases=None,
map_data=None,
overall_b=None,
resolution=None,
out=sys.stdout):
si=sharpening_info_obj
if si.sharpening_method=='no_sharpening':
return map_and_b_object(map_data=map_data)
if map_data and (not f_array or not phases):
map_coeffs,dummy=get_f_phases_from_map(map_data=map_data,
crystal_symmetry=si.crystal_symmetry,
d_min=si.resolution,
d_min_ratio=si.d_min_ratio,
return_as_map_coeffs=True,
scale_max=si.scale_max,
out=out)
f_array,phases=map_coeffs_as_fp_phi(map_coeffs)
if si.remove_aniso:
if si.use_local_aniso and \
(si.local_aniso_in_local_sharpening or
(si.local_aniso_in_local_sharpening is None and si.ncs_copies==1)) and \
si.original_aniso_obj: # use original
aniso_obj=si.original_aniso_obj
print("\nRemoving aniso from map using saved aniso object before sharpening\n", file=out)
else:
print("\nRemoving aniso from map before sharpening\n", file=out)
aniso_obj=None
from cctbx.maptbx.refine_sharpening import analyze_aniso
f_array,f_array_ra=analyze_aniso(
aniso_obj=aniso_obj,
remove_aniso=si.remove_aniso,
f_array=f_array,resolution=si.resolution,out=out)
if si.is_model_sharpening() or si.is_half_map_sharpening():
from cctbx.maptbx.refine_sharpening import scale_amplitudes
ff=f_array.phase_transfer(phase_source=phases,deg=True)
map_and_b=scale_amplitudes(
map_coeffs=f_array.phase_transfer(phase_source=phases,deg=True),
si=si,overall_b=overall_b,out=out)
return map_and_b
elif si.is_resolution_dependent_sharpening():
if f_array_normalized is None:
from cctbx.maptbx.refine_sharpening import get_sharpened_map,\
quasi_normalize_structure_factors
(d_max,d_min)=f_array.d_max_min()
if not f_array.binner():
f_array.setup_binner(n_bins=si.n_bins,d_max=d_max,d_min=d_min)
f_array_normalized=quasi_normalize_structure_factors(
f_array,set_to_minimum=0.01)
map_data=get_sharpened_map(ma=f_array_normalized,phases=phases,
b=si.resolution_dependent_b,resolution=si.resolution,n_real=si.n_real,
d_min_ratio=si.d_min_ratio)
return map_and_b_object(map_data=map_data)
else:
map_and_b=apply_sharpening(n_real=si.n_real,
f_array=f_array,phases=phases,
sharpening_info_obj=si,
crystal_symmetry=si.crystal_symmetry,
out=null_out())
return map_and_b
def put_bounds_in_range(
lower_bounds=None,upper_bounds=None,
box_size=None,
n_buffer=None,
n_real=None,out=sys.stdout):
# put lower and upper inside (0,n_real) and try to make size at least minimum
new_lb=[]
new_ub=[]
print("Putting bounds in range...(%s,%s,%s) to (%s,%s,%s)" %(
tuple(list(lower_bounds)+list(upper_bounds))), file=out)
if n_buffer:
print("Buffer of %s added" %(n_buffer), file=out)
for lb,ub,ms,nr in zip(lower_bounds,upper_bounds,box_size,n_real):
if ub<lb: ub=lb
if lb >ub: lb=ub
extra=(ms-(ub-lb))//2
lb=lb-extra
ub=ub+extra
if n_buffer:
lb=lb-n_buffer
ub=ub+n_buffer
if lb<0:
shift=-lb
lb+=shift
ub+=shift
boundary=int(ms-(ub-lb+1))//2
if boundary>0:
lb=lb-boundary
ub=ub+boundary
if lb<0: lb=0
if ub>nr: ub=nr
new_lb.append(lb)
new_ub.append(ub)
print("New bounds ...(%s,%s,%s) to (%s,%s,%s)" %(
tuple(list(new_lb)+list(new_ub))), file=out)
return tuple(new_lb),tuple(new_ub)
def get_iterated_solvent_fraction(map=None,
verbose=None,
resolve_size=None,
crystal_symmetry=None,
mask_padding_fraction=None,
fraction_of_max_mask_threshold=None,
cell_cutoff_for_solvent_from_mask=None,
mask_resolution=None,
out=sys.stdout):
if cell_cutoff_for_solvent_from_mask and \
crystal_symmetry.unit_cell().volume() > cell_cutoff_for_solvent_from_mask**3:
#go directly to low_res_mask
return get_solvent_fraction_from_low_res_mask(
crystal_symmetry=crystal_symmetry,
map_data=map.deep_copy(),
mask_padding_fraction=mask_padding_fraction,
fraction_of_max_mask_threshold=fraction_of_max_mask_threshold,
mask_resolution=mask_resolution)
try:
from phenix.autosol.map_to_model import iterated_solvent_fraction
solvent_fraction=iterated_solvent_fraction(
crystal_symmetry=crystal_symmetry,
map_as_double=map,
verbose=verbose,
resolve_size=resolve_size,
return_solvent_fraction=True,
out=out)
if solvent_fraction<=0.989: # means that it was 0.99 which is hard limit
return solvent_fraction
else: # use backup method
return get_solvent_fraction_from_low_res_mask(
crystal_symmetry=crystal_symmetry,
map_data=map.deep_copy(),
mask_padding_fraction=mask_padding_fraction,
fraction_of_max_mask_threshold=fraction_of_max_mask_threshold,
mask_resolution=mask_resolution)
except Exception as e:
# catch case where map was not on proper grid
if str(e).find("sym equiv of a grid point must be a grid point")>-1:
print("\nSpace group:%s \n Unit cell: %s \n Gridding: %s \nError message: %s" %(
crystal_symmetry.space_group().info(),
str(crystal_symmetry.unit_cell().parameters()),
str(map.all()),str(e)), file=out)
raise Sorry(
"The input map seems to be on a grid incompatible with crystal symmetry"+
"\n(symmetry equivalents of a grid point must be on "+
"an integer grid point)")
elif str(e).find("maximum size for resolve is")>-1:
raise Sorry(str(e)+
"\nIt may be possible to go on by supplying solvent content"+
"or molecular_mass")
# Try to get solvent fraction with low_res mask
return get_solvent_fraction_from_low_res_mask(
crystal_symmetry=crystal_symmetry,
map_data=map.deep_copy(),
mask_padding_fraction=mask_padding_fraction,
fraction_of_max_mask_threshold=fraction_of_max_mask_threshold,
mask_resolution=mask_resolution)
def get_solvent_fraction_from_low_res_mask(
crystal_symmetry=None,map_data=None,
fraction_of_max_mask_threshold=None,
mask_padding_fraction=None,
mask_resolution=None,
out=sys.stdout):
overall_mask,max_in_sd_map,sd_map=get_overall_mask(map_data=map_data,
fraction_of_max_mask_threshold=fraction_of_max_mask_threshold,
mask_padding_fraction=mask_padding_fraction,
crystal_symmetry=crystal_symmetry,
resolution=mask_resolution,
out=out)
solvent_fraction=overall_mask.count(False)/overall_mask.size()
print("Solvent fraction from overall mask: %.3f " %(solvent_fraction), file=out)
return solvent_fraction
def get_solvent_fraction_from_molecular_mass(
crystal_symmetry=None,molecular_mass=None,out=sys.stdout):
map_volume=crystal_symmetry.unit_cell().volume()
density_factor=1000*1.23 # just protein density, close enough...
mm=molecular_mass
molecule_fraction= mm*density_factor/map_volume
if molecule_fraction > 1 and mm > 1000: mm=mm/1000 # was in Da
solvent_fraction=max(0.01,min(1.,1 - (
mm*density_factor/map_volume)))
print("Solvent content of %7.2f from molecular mass of %7.1f kDa" %(
solvent_fraction,mm), file=out)
return solvent_fraction
def set_up_si(var_dict=None,crystal_symmetry=None,
is_crystal=None,
ncs_copies=None,n_residues=None,
solvent_fraction=None,molecular_mass=None,pdb_inp=None,map=None,
auto_sharpen=True,half_map_data_list=None,verbose=None,
out=sys.stdout):
si=sharpening_info(n_real=map.all())
args=[]
auto_sharpen_methods=var_dict.get('auto_sharpen_methods')
if auto_sharpen_methods and auto_sharpen_methods != ['None'] and \
len(auto_sharpen_methods)==1:
sharpening_method=auto_sharpen_methods[0]
else:
sharpening_method=None
for param in [
'verbose','resolve_size','seq_file', 'sequence',
'box_size',
'target_n_overlap',
'restrict_map_size',
'box_center','remove_aniso',
'input_weight_map_pickle_file', 'output_weight_map_pickle_file',
'read_sharpened_maps', 'write_sharpened_maps', 'select_sharpened_map',
'output_directory',
'smoothing_radius','use_local_aniso',
'local_aniso_in_local_sharpening',
'overall_before_local',
'local_sharpening',
'box_in_auto_sharpen',
'density_select_in_auto_sharpen',
'density_select_threshold_in_auto_sharpen',
'use_weak_density',
'resolution',
'd_min_ratio',
'scale_max',
'input_d_cut',
'b_blur_hires',
'discard_if_worse',
'mask_atoms','mask_atoms_atom_radius','value_outside_atoms',
'soft_mask',
'tol_r','abs_tol_t',
'rel_tol_t',
'require_helical_or_point_group_symmetry',
'max_helical_operators',
'allow_box_if_b_iso_set',
'max_box_fraction',
'cc_cut',
'max_cc_for_rescale',
'scale_using_last',
'density_select_max_box_fraction',
'k_sharpen',
'optimize_b_blur_hires',
'iterate',
'optimize_d_cut',
'residual_target','sharpening_target',
'search_b_min','search_b_max','search_b_n','adjust_region_weight',
'region_weight_method',
'region_weight_factor',
'region_weight_buffer',
'region_weight_default',
'target_b_iso_ratio',
'signal_min',
'buffer_radius',
'wang_radius',
'pseudo_likelihood',
'target_b_iso_model_scale',
'b_iso','b_sharpen',
'resolution_dependent_b',
'normalize_amplitudes_in_resdep',
'region_weight',
'sa_percent',
'n_bins',
'eps',
'max_regions_to_test',
'regions_to_keep',
'fraction_occupied',
'rmsd',
'rmsd_resolution_factor',
'k_sol',
'b_sol',
'fraction_complete',
'nproc',
'multiprocessing',
'queue_run_command',
'verbose',
]:
x=var_dict.get(param)
if x is not None:
if type(x)==type([1,2,3]):
xx=[]
for k in x:
xx.append(str(k))
args.append("%s=%s" %(param," ".join(xx)))
else:
args.append("%s=%s" %(param,x))
local_params=get_params_from_args(args)
# Set solvent content from molecular_mass if present
if molecular_mass and not solvent_fraction:
solvent_fraction=get_solvent_fraction_from_molecular_mass(
crystal_symmetry=crystal_symmetry,molecular_mass=molecular_mass,
out=out)
if (local_params.input_files.seq_file or
local_params.crystal_info.sequence) and \
not local_params.crystal_info.solvent_content and \
not solvent_fraction: # 2017-12-19
solvent_fraction=get_solvent_fraction(local_params,
crystal_symmetry=crystal_symmetry,
ncs_copies=ncs_copies,out=out)
si.update_with_params(params=local_params,
crystal_symmetry=crystal_symmetry,
is_crystal=is_crystal,
solvent_fraction=solvent_fraction,
ncs_copies=ncs_copies,
n_residues=n_residues,
auto_sharpen=auto_sharpen,
sharpening_method=sharpening_method,
pdb_inp=pdb_inp,
half_map_data_list=half_map_data_list,
)
return si
def bounds_to_frac(b,map_data):
a=map_data.all()
return b[0]/a[0],b[1]/a[1], b[2]/a[2]
def bounds_to_cart(b,map_data,crystal_symmetry=None):
bb=bounds_to_frac(b,map_data)
a,b,c=crystal_symmetry.unit_cell().parameters()[:3]
return a*bb[0],b*bb[1], c*bb[2]
def select_inside_box(lower_bounds=None,upper_bounds=None,xrs=None,
hierarchy=None):
if not hierarchy or not xrs:
return None
selection = flex.bool(xrs.scatterers().size())
for atom_group in hierarchy.atom_groups():
for atom in atom_group.atoms():
if atom.xyz[0]>=lower_bounds[0] and \
atom.xyz[0]<=upper_bounds[0] and \
atom.xyz[1]>=lower_bounds[1] and \
atom.xyz[1]<=upper_bounds[1] and \
atom.xyz[2]>=lower_bounds[2] and \
atom.xyz[2]<=upper_bounds[2]:
selection[atom.i_seq]=True
asc1=hierarchy.atom_selection_cache()
return hierarchy.select(selection)
def make_empty_map(template_map=None,value=0.):
# Create empty map original_map_in_box
empty_map=flex.double(template_map.as_1d().as_double().size(),value)
empty_map.reshape(flex.grid(template_map.all()))
return empty_map
def sum_box_data(starting_map=None,box_map=None,
lower_bounds=None,upper_bounds=None):
# sum box data into starting_map
#Pull out current starting_map data
starting_box_data= maptbx.copy(starting_map, tuple(lower_bounds), tuple(upper_bounds))
assert starting_box_data.all()==box_map.all()
# Add to box_map
starting_box_data=starting_box_data.as_1d()
box_map_data=box_map.as_1d()
starting_box_data+=box_map_data
# put back into shape
starting_box_data.reshape(flex.grid(box_map.all()))
maptbx.set_box(
map_data_from = starting_box_data,
map_data_to = starting_map,
start = lower_bounds,
end = upper_bounds)
return starting_map
def copy_box_data(starting_map=None,box_map=None,
lower_bounds=None,upper_bounds=None):
# Copy box data into original_map_in_box
maptbx.set_box(
map_data_from = box_map,
map_data_to = starting_map,
start = lower_bounds,
end = upper_bounds)
return starting_map
def select_box_map_data(si=None,
map_data=None,
first_half_map_data=None,
second_half_map_data=None,
pdb_inp=None,
get_solvent_fraction=True,# XXX test not doing this...
n_min=30, # at least 30 atoms to run model sharpening
restrict_map_size=None,
out=sys.stdout,local_out=sys.stdout):
box_solvent_fraction=None
solvent_fraction=si.solvent_fraction
crystal_symmetry=si.crystal_symmetry
box_size=si.box_size
lower_bounds=None
upper_bounds=None
smoothed_box_mask_data=None
original_box_map_data=None #
n_buffer=None
if (not pdb_inp and not si.box_in_auto_sharpen) and (
first_half_map_data and second_half_map_data):
print("Creating density-based soft mask and applying to half-map data", file=out)
if not si.soft_mask:
raise Sorry(
"Need to set soft_mask=True for half-map sharpening without model")
# NOTE: could precede this by density_select on map_data, save bounds and
# cut out half-maps with those bounds. That case could
# cover si.soft_mask=False
half_map_data_list=[first_half_map_data,second_half_map_data]
box_mask_data,box_map,half_map_data_list,\
box_solvent_fraction,smoothed_box_mask_data,original_box_map_data=\
get_and_apply_soft_mask_to_maps(
resolution=si.resolution,
wang_radius=si.wang_radius,
buffer_radius=si.buffer_radius,
map_data=map_data,crystal_symmetry=crystal_symmetry,
half_map_data_list=half_map_data_list,
out=out)
box_first_half_map,box_second_half_map=half_map_data_list
box_crystal_symmetry=crystal_symmetry
box_pdb_inp=pdb_inp
elif pdb_inp or (
si.density_select_in_auto_sharpen and not si.box_in_auto_sharpen):
# use map_box for pdb_inp (mask with model)
# also use map_box for density_select_in_auto_sharpen sharpening because
# need to use the same density select for all 3 maps.
# XXX Perhaps we canuse above method for pdb_inp
assert not si.local_sharpening
if pdb_inp:
print("Using map_box based on input model", file=out)
hierarchy=pdb_inp.construct_hierarchy()
max_box_fraction=si.max_box_fraction
si.density_select_in_auto_sharpen=False
else:
#print >>out,"Using density_select in map_box"
hierarchy=None
assert si.density_select_in_auto_sharpen
max_box_fraction=si.density_select_max_box_fraction
#----------------------trimming model-------------------------------
if si.box_center: # Have model but center at box_center and trim hierarchy
lower_bounds,upper_bounds=box_from_center(si=si,
map_data=map_data,out=out)
if si.soft_mask:
n_buffer=get_grid_units(map_data=map_data,
crystal_symmetry=crystal_symmetry,
radius=si.resolution,out=out)
n_buffer=int(0.5+n_buffer*1.5)
else:
n_buffer=0
lower_bounds,upper_bounds=put_bounds_in_range(
lower_bounds=lower_bounds,upper_bounds=upper_bounds,
box_size=box_size,n_buffer=n_buffer,
n_real=map_data.all(),out=out)
lower_frac=bounds_to_frac(lower_bounds,map_data)
upper_frac=bounds_to_frac(upper_bounds,map_data)
lower_cart=bounds_to_cart(
lower_bounds,map_data,crystal_symmetry=crystal_symmetry)
upper_cart=bounds_to_cart(
upper_bounds,map_data,crystal_symmetry=crystal_symmetry)
if hierarchy: # trimming hierarchy to box and then using trimmed
# hierarchy in map_box to create actual box
xrs=hierarchy.extract_xray_structure(
crystal_symmetry=si.crystal_symmetry)
# find everything in box
sel_hierarchy=select_inside_box(lower_bounds=lower_cart,
upper_bounds=upper_cart, xrs=xrs,hierarchy=hierarchy)
n=sel_hierarchy.overall_counts().n_atoms
print("Selected atoms inside box: %d" %(n), file=out)
if n<n_min:
print("Skipping...using entire structure", file=out)
else:
hierarchy=sel_hierarchy
#----------------------end trimming model-------------------------------
from mmtbx.command_line.map_box import run as run_map_box
args=["keep_input_unit_cell_and_grid=False"]
if si.density_select_in_auto_sharpen:
args.append('density_select=True')
#print >>out,"Using density_select in map_box"
if si.density_select_threshold_in_auto_sharpen is not None:
args.append('density_select_threshold=%s' %(
si.density_select_threshold_in_auto_sharpen))
elif si.box_in_auto_sharpen and not si.mask_atoms:
print("Using map_box with model", file=out)
elif si.mask_atoms:
print("Using map_box with model and mask_atoms", file=out)
args.append('mask_atoms=True')
if si.mask_atoms_atom_radius:
args.append('mask_atoms_atom_radius=%s' %(si.mask_atoms_atom_radius))
if si.value_outside_atoms:
args.append('value_outside_atoms=%s' %(si.value_outside_atoms))
if si.soft_mask:
print("Using soft mask", file=out)
args.append('soft_mask=%s' %(si.soft_mask))
args.append('soft_mask_radius=%s' %(si.resolution))
else:
raise Sorry("Unknown choice in select_box_data")
if restrict_map_size:
args.append('restrict_map_size=True')
print("Getting map as box now", file=out)
local_hierarchy=None
if hierarchy:
local_hierarchy=hierarchy.deep_copy() # run_map_box modifies its argument
box=run_map_box(args,
map_data=map_data,pdb_hierarchy=local_hierarchy,
write_output_files=False,
crystal_symmetry=crystal_symmetry,log=out)
lower_bounds=box.gridding_first
upper_bounds=box.gridding_last
box_map=box.map_box
box_map=scale_map(box_map,out=out)
box_crystal_symmetry=box.box_crystal_symmetry
box_pdb_inp=box.hierarchy.as_pdb_input()
if first_half_map_data:
print("Getting first map as box", file=out)
if hierarchy:
local_hierarchy=hierarchy.deep_copy() # required
box_first=run_map_box(args,
map_data=first_half_map_data,pdb_hierarchy=local_hierarchy,
write_output_files=False,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
crystal_symmetry=crystal_symmetry,
log=out)
box_first_half_map=box_first.map_box.as_double()
else:
box_first_half_map=None
if second_half_map_data:
print("Getting second map as box", file=out)
if hierarchy:
local_hierarchy=hierarchy.deep_copy() # required
box_second=run_map_box(args,
map_data=second_half_map_data,pdb_hierarchy=local_hierarchy,
write_output_files=False,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
crystal_symmetry=crystal_symmetry,
log=out)
box_second_half_map=box_second.map_box.as_double()
else:
box_second_half_map=None
else: # cut out box based on box_center or regions
if si.box_center: # center at box_center
print("Cutting out box based on center at (%.2f,%.2f,%.2f) " %( si.box_center), file=out)
lower_bounds,upper_bounds=box_from_center(si=si,
map_data=map_data,out=out)
elif si.use_weak_density:
print("Cutting out box based on centering on weak density", file=out)
lower_bounds,upper_bounds=box_of_smallest_region(si=si,
map_data=map_data,
out=out)
else:
print("Cutting out box based on centering on strong density", file=out)
lower_bounds,upper_bounds=box_of_biggest_region(si=si,
map_data=map_data,
out=out)
if si.soft_mask:
n_buffer=get_grid_units(map_data=map_data,
crystal_symmetry=crystal_symmetry,
radius=si.resolution,out=out)
n_buffer=int(0.5+n_buffer*1.5)
else:
n_buffer=0
lower_bounds,upper_bounds=put_bounds_in_range(
lower_bounds=lower_bounds,upper_bounds=upper_bounds,
box_size=box_size,n_buffer=n_buffer,
n_real=map_data.all(),out=out)
# select map data inside this box
print("\nSelecting map data inside box", file=out)
box_map,box_crystal_symmetry,\
smoothed_box_mask_data,original_box_map_data=cut_out_map(
map_data=map_data.as_double(),
crystal_symmetry=crystal_symmetry,
soft_mask=si.soft_mask,
soft_mask_radius=si.resolution,
resolution=si.resolution,
shift_origin=True,
min_point=lower_bounds, max_point=upper_bounds,out=out)
box_pdb_inp=None
if first_half_map_data:
box_first_half_map,box_first_crystal_symmetry,\
dummy_smoothed_box_mask_data,dummy_original_box_map_data=cut_out_map(
map_data=first_half_map_data.as_double(),
crystal_symmetry=crystal_symmetry,
soft_mask=si.soft_mask,
soft_mask_radius=si.resolution,
resolution=si.resolution,
shift_origin=True,
min_point=lower_bounds, max_point=upper_bounds,out=local_out)
else:
box_first_half_map=None
if second_half_map_data:
box_second_half_map,box_second_crystal_symmetry,\
dummy_smoothed_box_mask_data,dummy_original_box_map_data=cut_out_map(
map_data=second_half_map_data.as_double(),
crystal_symmetry=crystal_symmetry,
soft_mask=si.soft_mask,
soft_mask_radius=si.resolution,
resolution=si.resolution,
shift_origin=True,
min_point=lower_bounds, max_point=upper_bounds,out=local_out)
else:
box_second_half_map=None
if not box_map or (
(not pdb_inp and not second_half_map_data) and \
box_map.size() > si.max_box_fraction* map_data.size()):
return None,map_data,first_half_map_data,\
second_half_map_data,crystal_symmetry,None,\
smoothed_box_mask_data,None,None # no point
# figure out solvent fraction in this box...
if get_solvent_fraction: #
if box_solvent_fraction is None:
box_solvent_fraction=get_iterated_solvent_fraction(
crystal_symmetry=box_crystal_symmetry,
map=box_map,
fraction_of_max_mask_threshold=si.fraction_of_max_mask_threshold,
mask_resolution=si.resolution,
out=out)
if box_solvent_fraction is None:
box_solvent_fraction=si.solvent_fraction
print("Using overall solvent fraction for box", file=out)
print("Local solvent fraction: %7.2f" %(box_solvent_fraction), file=out)
else:
box_solvent_fraction=None
box_sharpening_info_obj=box_sharpening_info(
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
n_real=box_map.all(),
scale_max=si.scale_max,
wrapping=False,
crystal_symmetry=box_crystal_symmetry,
solvent_fraction=box_solvent_fraction)
return box_pdb_inp,box_map,box_first_half_map,box_second_half_map,\
box_crystal_symmetry,box_sharpening_info_obj,\
smoothed_box_mask_data,original_box_map_data,n_buffer
def inside_zero_one(xyz):
from scitbx.array_family import flex
from scitbx.matrix import col
offset=xyz-col((0.5,0.5,0.5))
lower_int=offset.iround().as_vec3_double()
return xyz-lower_int
def move_xyz_inside_cell(xyz_cart=None,crystal_symmetry=None):
xyz_local=flex.vec3_double()
if type(xyz_cart)==type(xyz_local):
xyz_local=xyz_cart
is_single=False
else:
is_single=True
xyz_local.append(xyz_cart)
xyz_frac=crystal_symmetry.unit_cell().fractionalize(xyz_local)
new_xyz_frac=inside_zero_one(xyz_frac)
new_xyz_cart=crystal_symmetry.unit_cell().orthogonalize(new_xyz_frac)
if is_single:
return new_xyz_cart[0]
else:
return new_xyz_cart
def box_from_center( si=None,
map_data=None,
out=sys.stdout):
cx,cy,cz=si.crystal_symmetry.unit_cell().fractionalize(si.box_center)
if cx<0 or cx>1 or cy<0 or cy>1 or cz<0 or cz>1:
print("Moving box center inside (0,1)", file=out)
si.box_center=move_xyz_inside_cell(
xyz_cart=si.box_center,crystal_symmetry=si.crystal_symmetry)
cx,cy,cz=si.crystal_symmetry.unit_cell().fractionalize(si.box_center)
print("\nBox centered at (%7.2f,%7.2f,%7.2f) A" %(
tuple(si.box_center)), file=out)
ax,ay,az=map_data.all()
cgx,cgy,cgz=int(0.5+ax*cx),int(0.5+ay*cy),int(0.5+az*cz),
print("Box grid centered at (%d,%d,%d)\n" %(cgx,cgy,cgz), file=out)
return (cgx,cgy,cgz),(cgx,cgy,cgz)
def box_of_smallest_region(si=None,
map_data=None,
return_as_list=None,
out=sys.stdout):
return box_of_biggest_region(si=si,
map_data=map_data,
return_as_list=return_as_list,
use_smallest=True,
out=out)
def box_of_biggest_region(si=None,
map_data=None,
return_as_list=None,
use_smallest=False,
out=sys.stdout):
n_residues=si.n_residues
ncs_copies=si.ncs_copies
solvent_fraction=si.solvent_fraction
b_vs_region=b_vs_region_info()
co,sorted_by_volume,min_b,max_b,unique_expected_regions,best_score,\
new_threshold,starting_density_threshold=\
get_connectivity(
b_vs_region=b_vs_region,
map_data=map_data,
n_residues=n_residues,
ncs_copies=ncs_copies,
solvent_fraction=solvent_fraction,
min_volume=si.min_volume,
min_ratio=si.min_ratio,
fraction_occupied=si.fraction_occupied,
wrapping=si.wrapping,
residues_per_region=si.residues_per_region,
max_ratio_to_target=si.max_ratio_to_target,
min_ratio_to_target=si.min_ratio_to_target,
min_ratio_of_ncs_copy_to_first=si.min_ratio_of_ncs_copy_to_first,
starting_density_threshold=si.starting_density_threshold,
density_threshold=si.density_threshold,
crystal_symmetry=si.crystal_symmetry,
chain_type=si.chain_type,
verbose=si.verbose,
out=out)
if len(sorted_by_volume)<2:
return # nothing to do
if use_smallest:
small_ratio=0.25
maximum_position_ratio=0.75
v1,i1=sorted_by_volume[1]
v_small=small_ratio*v1
maximum_position_small=maximum_position_ratio*(len(sorted_by_volume)-1)+1
best_pos=1
ii=0
for v,i in sorted_by_volume[1:]:
ii+=1
if v < v_small: continue
if ii > maximum_position_small: continue
best_pos=ii
v,i=sorted_by_volume[best_pos]
print("\nVolume of target region %d: %d grid points: "%(best_pos,v), file=out)
else: # usual
v,i=sorted_by_volume[1]
print("\nVolume of largest region: %d grid points: "%(v), file=out)
print("Region %3d (%3d) volume:%5d X:%6d - %6d Y:%6d - %6d Z:%6d - %6d "%(
1,i,v,
min_b[i][0],max_b[i][0],
min_b[i][1],max_b[i][1],
min_b[i][2],max_b[i][2]), file=out)
if (not return_as_list):
return min_b[i],max_b[i]
else: # return a list of centers
centers_frac=flex.vec3_double()
a1,a2,a3=map_data.all()
for v,i in sorted_by_volume[1:]:
centers_frac.append(
tuple((
(min_b[i][0]+max_b[i][0])/(2.*a1),
(min_b[i][1]+max_b[i][1])/(2.*a2),
(min_b[i][2]+max_b[i][2])/(2.*a3),
))
)
return centers_frac
def get_fft_map(n_real=None,map_coeffs=None):
from cctbx import maptbx
from cctbx.maptbx import crystal_gridding
if n_real:
cg=crystal_gridding(
unit_cell=map_coeffs.crystal_symmetry().unit_cell(),
space_group_info=map_coeffs.crystal_symmetry().space_group_info(),
pre_determined_n_real=n_real)
else:
cg=None
ccs=map_coeffs.crystal_symmetry()
fft_map = map_coeffs.fft_map( resolution_factor = 0.25,
crystal_gridding=cg,
symmetry_flags=maptbx.use_space_group_symmetry)
fft_map.apply_sigma_scaling()
return fft_map
def average_from_bounds(lower,upper,grid_all=None):
avg=[]
for u,l in zip(upper,lower):
avg.append(0.5*(u+l))
if grid_all:
avg_fract=[]
for a,g in zip(avg,grid_all):
avg_fract.append(a/g)
avg=avg_fract
return avg
def get_ncs_copies(site_cart,ncs_object=None,
only_inside_box=None,unit_cell=None):
ncs_group=ncs_object.ncs_groups()[0]
from scitbx.array_family import flex
from scitbx.matrix import col
sites_cart_ncs=flex.vec3_double()
for t,r in zip(ncs_group.translations_orth_inv(),
ncs_group.rota_matrices_inv()):
sites_cart_ncs.append(r * col(site_cart) + t)
if only_inside_box:
assert unit_cell is not None
sites_frac_ncs=unit_cell.fractionalize(sites_cart_ncs)
new_sites_frac=flex.vec3_double()
for x in sites_frac_ncs:
if x[0]>=0 and x[0]<=1 and \
x[1]>=0 and x[1]<=1 and \
x[2]>=0 and x[2]<=1:
new_sites_frac.append(x)
sites_cart_ncs=unit_cell.orthogonalize(new_sites_frac)
return sites_cart_ncs
def fit_bounds_inside_box(lower,upper,box_size=None,all=None):
# adjust bounds so upper>lower and box size is at least box_size
new_lower=[]
new_upper=[]
if box_size:
for u,l,s,a in zip(upper,lower,box_size,all):
ss=u-l+1
delta=int((1+s-ss)/2) # desired increase in size, to subtract from l
l=max(0,l-delta)
ss=u-l+1
delta=(s-ss) # desired increase in size, to add to u
u=min(a-1,u+delta)
l=max(0,l)
new_lower.append(l)
new_upper.append(u)
else:
for u,l,a in zip(upper,lower,all):
u=min(a-1,u)
l=max(0,l)
new_lower.append(l)
new_upper.append(u)
return new_lower,new_upper
def split_boxes(lower=None,upper=None,target_size=None,target_n_overlap=None,
fix_target_size_and_overlap=None):
# purpose: split the region from lower to upper into overlapping
# boxes of size about target_size
# NOTE: does not actually use target_n_overlap unless
# fix_target_size_and_overlap is set
all_box_list=[]
for l,u,ts in zip (lower,upper,target_size):
n=u+1-l
# offset defined by: ts-offset + ts-offset+...+ts =n
# ts*n_box-offset*(n_box-1)=n approx
# n_box (ts-offset) +offset=n
# n_box= (n-offset)/(ts-offset)
assert ts>target_n_overlap
if fix_target_size_and_overlap:
n_box=(n-target_n_overlap-1)/(ts-target_n_overlap)
if n_box>int(n_box):
n_box=int(n_box)+1
else:
n_box=int(n_box)
new_target_size=ts
offset=ts-target_n_overlap
else: # original version
n_box=max(1,(n+(3*ts//4))//ts)
new_target_size=int(0.9+n/n_box)
offset=int(0.9+(n-new_target_size)/max(1,n_box-1))
box_list=[]
for i in range(n_box):
start_pos=max(l,l+i*offset)
end_pos=min(u,start_pos+new_target_size)
if fix_target_size_and_overlap:
start_pos=max(0,min(start_pos,end_pos-new_target_size))
box_list.append([start_pos,end_pos])
all_box_list.append(box_list)
new_lower_upper_list=[]
for xs,xe in all_box_list[0]:
for ys,ye in all_box_list[1]:
for zs,ze in all_box_list[2]:
new_lower_upper_list.append([(xs,ys,zs,),(xe,ye,ze,)])
return new_lower_upper_list
def get_target_boxes(si=None,ncs_obj=None,map=None,
pdb_inp=None,out=sys.stdout):
print(80*"-", file=out)
print("Getting segmented map to ID locations for sharpening", file=out)
print(80*"-", file=out)
if si.input_weight_map_pickle_file:
from libtbx import easy_pickle
file_name=si.input_weight_map_pickle_file
print("Loading segmentation data from %s" %(file_name), file=out)
tracking_data=easy_pickle.load(file_name)
else:
args=[
'resolution=%s' %(si.resolution),
'seq_file=%s' %(si.seq_file),
'sequence=%s' %(si.sequence),
'solvent_content=%s' %(si.solvent_fraction),
'auto_sharpen=False', # XXX could sharpen overall
'write_output_maps=True',
'add_neighbors=False',
'density_select=False', ]
if si.is_crystal:
args.append("is_crystal=True")
ncs_group_obj,remainder_ncs_group_obj,tracking_data=run(
args,
map_data=map.deep_copy(),
ncs_obj=ncs_obj,
crystal_symmetry=si.crystal_symmetry)
if si.output_weight_map_pickle_file:
from libtbx import easy_pickle
file_name=os.path.join(si.output_directory,si.output_weight_map_pickle_file)
print("Dumping segmentation data to %s" %(file_name), file=out)
easy_pickle.dump(file_name,tracking_data)
if not ncs_obj or ncs_obj.max_operators()==0:
from mmtbx.ncs.ncs import ncs
ncs_obj=ncs()
ncs_obj.set_unit_ncs()
print("Regions in this map:", file=out)
centers_frac=flex.vec3_double()
upper_bounds_list=[]
lower_bounds_list=[]
if pdb_inp and pdb_inp.atoms().extract_xyz().size()>1:
xyz_list=pdb_inp.atoms().extract_xyz()
i_end=xyz_list.size()
n_centers=min(i_end,max(1,len(tracking_data.output_region_map_info_list)))
n_steps=min(n_centers,xyz_list.size())
i_step=int(0.5+min(i_end/2,i_end/n_steps)) # about n_centers but up to n_atoms
i_start=max(1,int(0.5+i_step/2))
from scitbx.matrix import col
ma=map.all()
for i in range(i_start,i_end,i_step):
lower_cart=col(xyz_list[i])
lower_frac=si.crystal_symmetry.unit_cell().fractionalize(lower_cart)
lower=[
int(0.5+ma[0]*lower_frac[0]),
int(0.5+ma[1]*lower_frac[1]),
int(0.5+ma[2]*lower_frac[2])]
lower,upper=fit_bounds_inside_box(
lower,lower,box_size=si.box_size,all=map.all())
upper_bounds_list.append(upper)
lower_bounds_list.append(lower)
average_fract=average_from_bounds(lower,upper,grid_all=map.all())
centers_frac.append(average_fract)
else:
for map_info_obj in tracking_data.output_region_map_info_list:
lower,upper=map_info_obj.lower_upper_bounds()
lower,upper=fit_bounds_inside_box(
lower,upper,
box_size=None, # take the whole region, not just center
all=map.all())
for lower,upper in split_boxes(lower=lower,upper=upper,
target_size=si.box_size,
target_n_overlap=si.target_n_overlap):
upper_bounds_list.append(upper)
lower_bounds_list.append(lower)
average_fract=average_from_bounds(lower,upper,grid_all=map.all())
centers_frac.append(average_fract)
centers_cart=si.crystal_symmetry.unit_cell().orthogonalize(centers_frac)
# Make ncs-related centers
print("NCS ops:",ncs_obj.max_operators(), file=out)
centers_cart_ncs_list=[]
for i in range(centers_cart.size()):
centers_cart_ncs_list.append(get_ncs_copies(
centers_cart[i],ncs_object=ncs_obj,only_inside_box=True,
unit_cell=si.crystal_symmetry.unit_cell()) )
all_cart=flex.vec3_double()
for center_list in centers_cart_ncs_list:
all_cart.extend(center_list)
sharpening_centers_file=os.path.join(
si.output_directory,"sharpening_centers.pdb")
write_atoms(file_name=sharpening_centers_file,
crystal_symmetry=si.crystal_symmetry,sites=centers_cart)
ncs_sharpening_centers_file=os.path.join(
si.output_directory,"ncs_sharpening_centers.pdb")
write_atoms(file_name=ncs_sharpening_centers_file,
crystal_symmetry=si.crystal_symmetry,sites=all_cart)
print("\nSharpening centers (matching shifted_map_file).\n\n "+\
"Written to: \n%s \n%s\n"%(
sharpening_centers_file,ncs_sharpening_centers_file), file=out)
for i in range(centers_cart.size()):
print("Center: %s (%7.2f,%7.2f,%7.2f) Bounds: %s :: %s " %(
i,centers_cart[i][0],centers_cart[i][1],centers_cart[i][2],
str(lower_bounds_list[i]),str(upper_bounds_list[i])), file=out)
print(80*"-", file=out)
print("Done getting segmented map to ID locations for sharpening", file=out)
print(80*"-", file=out)
return upper_bounds_list,lower_bounds_list,\
centers_cart_ncs_list,centers_cart,all_cart
def get_box_size(lower_bound=None,upper_bound=None):
box_size=[]
for lb,ub in zip(lower_bound,upper_bound):
box_size.append(ub-lb)
return box_size
def mean_dist_to_nearest_neighbor(all_cart):
if all_cart.size()<2: # nothing to check
return None
sum_dist=0.
sum_n=0.
for i in range(all_cart.size()):
xyz=all_cart[i:i+1]
others=all_cart[:i]
others.extend(all_cart[i+1:])
sum_dist+=get_closest_dist(xyz,others)
sum_n+=1.
return sum_dist/max(1.,sum_n)
def run_local_sharpening(si=None,
auto_sharpen_methods=None,
map=None,
ncs_obj=None,
half_map_data_list=None,
pdb_inp=None,
out=sys.stdout):
print(80*"-", file=out)
print("Running local sharpening", file=out)
print(80*"-", file=out)
# run auto_sharpen_map_or_map_coeffs with box_in_auto_sharpen=True and
# centered at different places. Identify the places as centers of regions.
# Run on au of NCS and apply NCS to get remaining positions
if si.overall_before_local:
# first do overall sharpening of the map to get it about right
print(80*"*", file=out)
print("\nSharpening map overall before carrying out local sharpening\n", file=out)
overall_si=deepcopy(si)
overall_si.local_sharpening=False # don't local sharpen
overall_si=auto_sharpen_map_or_map_coeffs(si=overall_si,
auto_sharpen_methods=auto_sharpen_methods,
map=map,
half_map_data_list=half_map_data_list,
pdb_inp=pdb_inp,
out=out)
sharpened_map=overall_si.map_data
print("\nDone sharpening map overall before carrying out local sharpening\n", file=out)
print(80*"*", file=out)
else:
sharpened_map=map
# Accumulate sums
starting_weight=0.01 # put starting map everywhere with low weight
sum_weight_map=make_empty_map(template_map=map,value=starting_weight)
# in case a pixel is not covered...
sum_weight_value_map=starting_weight*sharpened_map.deep_copy()
print("\nUsing overall map for any regions where "+\
"no local information is present", file=out)
id_list=[]
b_iso_list=flex.double()
starting_b_iso_list=flex.double()
# use sharpened map here
upper_bounds_list,lower_bounds_list,\
centers_cart_ncs_list,centers_cart,all_cart=\
get_target_boxes(si=si,map=sharpened_map,ncs_obj=ncs_obj,
pdb_inp=pdb_inp,out=out)
dist=mean_dist_to_nearest_neighbor(all_cart)
if not dist:
dist=10.
if not si.smoothing_radius:
print("No nearest neighbors...best to set smoothing radius", file=out)
print("\nMean distance to nearest center is %7.2f A " %(
dist), file=out)
if not si.smoothing_radius:
si.smoothing_radius=float("%.0f" %(dist*2/3)) # 10A from nearest neighbor
print("Using %s A for smoothing radius" %(si.smoothing_radius), file=out)
i=-1
for ub,lb,centers_ncs_cart,center_cart in zip(
upper_bounds_list,lower_bounds_list,centers_cart_ncs_list,centers_cart):
i+=1
if si.select_sharpened_map is not None and i != si.select_sharpened_map:
continue
map_file_name='sharpened_map_%s.ccp4' %(i)
if 0 and si.read_sharpened_maps: # cannot do this as no bounds
print("\nReading sharpened map directly from %s" %(map_file_name), file=out)
result=get_map_object(file_name=map_file_name,
out=out)
local_map_data=result[0]
else:
local_si=deepcopy(si)
local_si.local_sharpening=False # don't do it again
local_si.box_size=get_box_size(lower_bound=lb,upper_bound=ub)
local_si.box_center=center_cart
local_si.box_in_auto_sharpen=True
local_si.density_select_in_auto_sharpen=False
local_si.use_local_aniso=si.local_aniso_in_local_sharpening
local_si.remove_aniso=si.local_aniso_in_local_sharpening
local_si.max_box_fraction=999 # just bigger than 1
local_si.density_select_max_box_fraction=999
local_si.nproc=1
print(80*"+", file=out)
print("Getting local sharpening for box %s" %(i), file=out)
print(80*"+", file=out)
bsi=auto_sharpen_map_or_map_coeffs(si=local_si,
auto_sharpen_methods=auto_sharpen_methods,
map=sharpened_map,
half_map_data_list=half_map_data_list,
pdb_inp=pdb_inp,
return_bsi=True, # just return the bsi of sharpened data
out=out)
if not bsi.map_data:
print("\nNo result for local map %s ...skipping" %(i), file=out)
continue
# merge with background using bsi.smoothed_box_mask_data
if bsi.smoothed_box_mask_data:
print("Merging small map into overall map in soft-mask region", file=out)
bsi.merge_into_overall_map(overall_map=map) # XXX overall_map not used
# Now remove buffer region
if bsi.n_buffer: # extract just the good part
print("Removing buffer from small map", file=out)
bsi.remove_buffer(out=out)
weight_data=bsi.get_gaussian_weighting(out=out)
weighted_data=bsi.map_data*weight_data
sum_weight_value_map=sum_box_data(starting_map=sum_weight_value_map,
box_map=weighted_data,
lower_bounds=bsi.lower_bounds,
upper_bounds=bsi.upper_bounds)
sum_weight_map=sum_box_data(starting_map=sum_weight_map,
box_map=weight_data,
lower_bounds=bsi.lower_bounds,
upper_bounds=bsi.upper_bounds)
id_list.append(i)
starting_b_iso_list.append(bsi.starting_b_iso)
b_iso_list.append(bsi.b_iso)
print(80*"+", file=out)
print("End of getting local sharpening for small box %s" %(i), file=out)
print(80*"+", file=out)
print("\nOverall map created from total of %s local maps" %(i), file=out)
if si.overall_before_local:
print("Note: overall map already sharpened with global sharpening", file=out)
if starting_b_iso_list.size()<1:
print("No results for local sharpening...", file=out)
else:
print("Summary of b_iso values by local map:", file=out)
print(" ID Starting B-iso Sharpened B-iso", file=out)
for i,starting_b_iso,b_iso in zip(id_list,starting_b_iso_list,b_iso_list):
print(" %4s %7.2f %7.2f" %(i,starting_b_iso,b_iso), file=out)
print("\nMean %7.2f %7.2f" %(
starting_b_iso_list.min_max_mean().mean,
b_iso_list.min_max_mean().mean), file=out)
si.map_data=sum_weight_value_map/sum_weight_map
# Get overall b_iso...
print("\nGetting overall b_iso of composite map...", file=out)
map_coeffs_aa,map_coeffs,f_array,phases=effective_b_iso(
map_data=si.map_data,
resolution=si.resolution,
d_min_ratio=si.d_min_ratio,
scale_max=si.scale_max,
crystal_symmetry=si.crystal_symmetry,
out=out)
print(80*"+", file=out)
print("End of getting local sharpening ", file=out)
print(80*"+", file=out)
return si
def auto_sharpen_map_or_map_coeffs(
si=None,
resolution=None, # resolution is required
crystal_symmetry=None, # supply crystal_symmetry and map or
map=None, # map and n_real
half_map_data_list=None, # two half-maps matching map
is_crystal=None,
map_coeffs=None,
pdb_inp=None,
ncs_obj=None,
seq_file=None,
sequence=None,
rmsd=None,
rmsd_resolution_factor=None,
k_sol=None,
b_sol=None,
fraction_complete=None,
n_real=None,
solvent_content=None,
molecular_mass=None,
region_weight=None,
sa_percent=None,
n_bins=None,
eps=None,
max_regions_to_test=None,
regions_to_keep=None,
fraction_occupied=None,
input_weight_map_pickle_file=None,
output_weight_map_pickle_file=None,
read_sharpened_maps=None,
write_sharpened_maps=None,
select_sharpened_map=None,
output_directory=None,
smoothing_radius=None,
local_sharpening=None,
local_aniso_in_local_sharpening=None,
overall_before_local=None,
use_local_aniso=None,
auto_sharpen=None,
box_in_auto_sharpen=None, # n_residues, ncs_copies required if not False
density_select_in_auto_sharpen=None,
density_select_threshold_in_auto_sharpen=None,
allow_box_if_b_iso_set=None,
use_weak_density=None,
discard_if_worse=None,
n_residues=None,
ncs_copies=None,
box_center=None,
remove_aniso=None,
box_size=None,
target_n_overlap=None,
lower_bounds=None,
upper_bounds=None,
restrict_map_size=None,
auto_sharpen_methods=None,
residual_target=None,
sharpening_target=None,
d_min_ratio=None,
scale_max=None,
input_d_cut=None,
b_blur_hires=None,
max_box_fraction=None,
cc_cut=None,
max_cc_for_rescale=None,
scale_using_last=None,
density_select_max_box_fraction=None,
mask_atoms=None,
mask_atoms_atom_radius=None,
value_outside_atoms=None,
soft_mask=None,
tol_r=None,
abs_tol_t=None,
rel_tol_t=None,
require_helical_or_point_group_symmetry=None,
max_helical_operators=None,
k_sharpen=None,
optimize_d_cut=None,
optimize_b_blur_hires=None,
iterate=None,
search_b_min=None,
search_b_max=None,
search_b_n=None,
adjust_region_weight=None,
region_weight_method=None,
region_weight_factor=None,
region_weight_buffer=None,
region_weight_default=None,
target_b_iso_ratio=None,
signal_min=None,
buffer_radius=None,
wang_radius=None,
pseudo_likelihood=None,
target_b_iso_model_scale=None,
b_iso=None, # if set, use it
b_sharpen=None, # if set, use it
resolution_dependent_b=None, # if set, use it
normalize_amplitudes_in_resdep=None, # if set, use it
return_bsi=False,
verbose=None,
resolve_size=None,
nproc=None,
multiprocessing=None,
queue_run_command=None,
out=sys.stdout):
if si: #
resolution=si.resolution
crystal_symmetry=si.crystal_symmetry
if not auto_sharpen:
auto_sharpen=si.auto_sharpen
if verbose is None:
verbose=si.verbose
if resolve_size is None:
resolve_size=si.resolve_size
if auto_sharpen is None:
auto_sharpen=True
if map_coeffs and not resolution:
resolution=map_coeffs.d_min()
if map_coeffs and not crystal_symmetry:
crystal_symmetry=map_coeffs.crystal_symmetry()
assert resolution is not None
if map:
return_as_map=True
else: # convert from structure factors to create map if necessary
map=get_fft_map(n_real=n_real, map_coeffs=map_coeffs).real_map_unpadded()
return_as_map=False
# Set ncs_copies if possible
if ncs_copies is None and ncs_obj and ncs_obj.max_operators():
ncs_copies=ncs_obj.max_operators()
print("Set ncs copies based on ncs_obj to %s" %(ncs_copies), file=out)
# Determine if we are running model_sharpening
if half_map_data_list and len(half_map_data_list)==2:
auto_sharpen_methods=['half_map_sharpening']
elif pdb_inp:
auto_sharpen_methods=['model_sharpening']
if not si:
# Copy parameters to si (sharpening_info_object)
si=set_up_si(var_dict=locals(),
crystal_symmetry=crystal_symmetry,
is_crystal=is_crystal,
solvent_fraction=solvent_content,
molecular_mass=molecular_mass,
auto_sharpen=auto_sharpen,
map=map,
verbose=verbose,
half_map_data_list=half_map_data_list,
pdb_inp=pdb_inp,
ncs_copies=ncs_copies,
n_residues=n_residues,out=out)
# Figure out solvent fraction
if si.solvent_fraction is None:
si.solvent_fraction=get_iterated_solvent_fraction(
crystal_symmetry=crystal_symmetry,
verbose=si.verbose,
resolve_size=si.resolve_size,
fraction_of_max_mask_threshold=si.fraction_of_max_mask_threshold,
mask_resolution=si.resolution,
map=map,
out=out)
if si.solvent_fraction:
print("Estimated solvent content: %.2f" %(si.solvent_fraction), file=out)
else:
raise Sorry("Unable to estimate solvent content...please supply "+
"solvent_content \nor molecular_mass")
# Determine if we are running half-map or model_sharpening
if half_map_data_list and len(half_map_data_list)==2:
first_half_map_data=half_map_data_list[0]
second_half_map_data=half_map_data_list[1]
else:
first_half_map_data=None
second_half_map_data=None
# Decide if we are running local sharpening (overlapping set of sharpening
# runs at various locations)
libtbx.call_back(message='sharpen',data=None)
if si.local_sharpening:
return run_local_sharpening(si=si,
auto_sharpen_methods=auto_sharpen_methods,
map=map,
ncs_obj=ncs_obj,
half_map_data_list=half_map_data_list,
pdb_inp=pdb_inp,
out=out)
# Get preliminary values of sharpening
working_map=map # use another name for map XXX
if si.iterate and not si.preliminary_sharpening_done:
si.preliminary_sharpening_done=True
si.iterate=False
# first do overall sharpening of the map to get it about right
print(80*"*", file=out)
print("\nSharpening map overall before carrying out final sharpening\n", file=out)
overall_si=deepcopy(si)
overall_si.local_sharpening=False # don't local sharpen
overall_si=auto_sharpen_map_or_map_coeffs(si=overall_si,
auto_sharpen_methods=auto_sharpen_methods,
map=map,
half_map_data_list=half_map_data_list,
pdb_inp=pdb_inp,
out=out)
working_map=overall_si.map_data
# Get solvent content again
overall_si.solvent_content=None
overall_si.solvent_fraction=get_iterated_solvent_fraction(
crystal_symmetry=crystal_symmetry,
verbose=overall_si.verbose,
resolve_size=overall_si.resolve_size,
fraction_of_max_mask_threshold=si.fraction_of_max_mask_threshold,
mask_resolution=si.resolution,
map=working_map,
out=out)
print("Resetting solvent fraction to %.2f " %(
overall_si.solvent_fraction), file=out)
si.solvent_fraction=overall_si.solvent_fraction
print("\nDone sharpening map overall before carrying out final sharpening\n", file=out)
print(80*"*", file=out)
si.b_blur_hires=0. # from now on, don't apply extra blurring
else:
working_map=map
# Now identify optimal sharpening params
print(80*"=", file=out)
print("\nRunning auto_sharpen to get sharpening parameters\n", file=out)
print(80*"=", file=out)
result=run_auto_sharpen( # get sharpening parameters standard run
si=si,
map_data=working_map,
first_half_map_data=first_half_map_data,
second_half_map_data=second_half_map_data,
pdb_inp=pdb_inp,
auto_sharpen_methods=auto_sharpen_methods,
print_result=False,
return_bsi=return_bsi,
out=out)
if return_bsi:
return result # it is a box_sharpening_info object
else:
si=result
print(80*"=", file=out)
print("\nDone running auto_sharpen to get sharpening parameters\n", file=out)
print(80*"=", file=out)
# Apply the optimal sharpening values and save map in si.map_data
# First test without sharpening if sharpening_method is b_iso,b and
# b_iso is not set
if si.sharpening_method in [
'b_iso','b_iso_to_d_cut','resolution_dependent'] and b_iso is None:
local_si=deepcopy(si)
local_si.sharpening_method='no_sharpening'
local_si.sharpen_and_score_map(map_data=working_map,out=null_out())
print("\nScore for no sharpening: %7.2f " %(local_si.score), file=out)
else:
local_si=None
print(80*"=", file=out)
print("\nApplying final sharpening to entire map", file=out)
print(80*"=", file=out)
si.sharpen_and_score_map(map_data=working_map,set_b_iso=True,out=out)
if si.discard_if_worse and local_si and local_si.score > si.score:
print("Sharpening did not improve map "+\
"(%7.2f sharpened, %7.2f unsharpened). Discarding sharpened map" %(
si.score,local_si.score), file=out)
print("\nUse discard_if_worse=False to keep the sharpening", file=out)
local_si.sharpen_and_score_map(map_data=working_map,out=out)
si=local_si
if not si.is_model_sharpening() and not si.is_half_map_sharpening():
si.show_score(out=out)
si.show_summary(out=out)
return si # si.map_data is the sharpened map
def estimate_signal_to_noise(value_list=None,minimum_value_to_include=0):
# get "noise" from rms value of value_list(n) compared with average of n-2,n-1,n+1,n+2
# assumes middle is the high part of the very smooth curve.
# Don't include values < minimum_value_to_include
mean_square_diff=0.
mean_square_diff_n=0.
for b2,b1,value,p1,p2 in zip(value_list,
value_list[1:],
value_list[2:],
value_list[3:],
value_list[4:]):
too_low=False
for xx in [b2,b1,value,p1,p2]:
if xx <minimum_value_to_include:
too_low=True
if not too_low:
mean_square_diff_n+=1
mean_square_diff+=( (b2+b1+p1+p2)*0.25 - value)**2
rmsd=(mean_square_diff/max(1,mean_square_diff_n))**0.5
if value_list.size()>0:
min_adj_sa=max(value_list[0],value_list[-1])
max_adj_sa=value_list.min_max_mean().max
signal_to_noise=(max_adj_sa-min_adj_sa)/max(1.e-10,rmsd)
else:
signal_to_noise=0.
return signal_to_noise
def optimize_b_blur_or_d_cut_or_b_iso(
optimization_target='b_blur_hires',
local_best_si=None,
local_best_map_and_b=None,
si_id_list=None,
si_score_list=None,
delta_b=None,
original_b_iso=None,
f_array=None,
phases=None,
delta_b_blur_hires=100,
delta_d_cut=0.25,
n_cycle_optimize=5,
min_cycles=2,
n_range=5,
out=sys.stdout):
assert optimization_target in ['b_blur_hires','d_cut','b_iso']
if optimization_target=='b_blur_hires':
print("\nOptimizing b_blur_hires. ", file=out)
elif optimization_target=='d_cut':
print("\nOptimizing d_cut. ", file=out)
local_best_si.input_d_cut=local_best_si.get_d_cut()
elif optimization_target=='b_iso':
print("\nOptimizing b_iso. ", file=out)
local_best_si.show_summary(out=out)
print("Current best score=%7.3f b_iso=%5.1f b_blur_hires=%5.1f d_cut=%5.1f" %(
local_best_si.score,local_best_si.b_iso,
local_best_si.b_blur_hires,
local_best_si.get_d_cut()), file=out)
# existing values:
value_dict={}
for id,score in zip(si_id_list,si_score_list):
value_dict[id]=score
best_score=local_best_si.score
delta_b_iso=delta_b
local_best_score=best_score
improving=True
working_best_si=deepcopy(local_best_si)
for cycle in range(n_cycle_optimize):
if not improving: break
print("Optimization cycle %s" %(cycle), file=out)
print("Current best score=%7.3f b_iso=%5.1f b_blur_hires=%5.1f d_cut=%5.1f" %(
working_best_si.score,working_best_si.b_iso,
working_best_si.b_blur_hires,
working_best_si.get_d_cut()), file=out)
if working_best_si.verbose:
print(" B-sharpen B-iso B-blur Adj-SA "+\
"Kurtosis SA-ratio Regions d_cut b_blur_hires", file=out)
local_best_working_si=deepcopy(working_best_si)
improving=False
for jj in range(-n_range,n_range+1):
if optimization_target=='b_blur_hires': # ZZ try optimizing b_blur_hires
test_b_blur_hires=max(0.,working_best_si.b_blur_hires+jj*delta_b_blur_hires)
test_d_cut=working_best_si.get_d_cut()
test_b_iso=working_best_si.b_iso
elif optimization_target=='d_cut':
test_b_blur_hires=working_best_si.b_blur_hires
test_d_cut=working_best_si.get_d_cut()+jj*delta_d_cut
test_b_iso=working_best_si.b_iso
elif optimization_target=='b_iso':
test_b_blur_hires=working_best_si.b_blur_hires
test_d_cut=working_best_si.get_d_cut()
test_b_iso=working_best_si.b_iso+jj*delta_b_iso
id="%.3f_%.3f_%.3f" %(test_b_iso,test_b_blur_hires,test_d_cut)
if id in value_dict:
score=value_dict[id]
else:
local_si=deepcopy(local_best_si)
local_f_array=f_array
local_phases=phases
local_si.b_blur_hires=test_b_blur_hires
local_si.input_d_cut=test_d_cut
local_si.b_iso=test_b_iso
local_si.b_sharpen=original_b_iso-local_si.b_iso
local_map_and_b=apply_sharpening(
f_array=local_f_array,phases=local_phases,
sharpening_info_obj=local_si,
crystal_symmetry=local_si.crystal_symmetry,
out=null_out())
local_si=score_map(
map_data=local_map_and_b.map_data,sharpening_info_obj=local_si,
out=null_out())
value_dict[id]=local_si.score
if local_si.verbose:
print(" %6.1f %6.1f %5s %7.3f %7.3f" %(
local_si.b_sharpen,local_si.b_iso,
local_si.b_blur_hires,
local_si.adjusted_sa,local_si.kurtosis) + \
" %7.3f %7.3f %7.3f %7.3f " %(
local_si.sa_ratio,local_si.normalized_regions,
test_d_cut,
test_b_blur_hires
), file=out)
if local_si.score > local_best_score:
local_best_score=local_si.score
local_best_working_si=deepcopy(local_si)
if local_best_score > best_score:
best_score=local_best_score
working_best_si=deepcopy(local_best_working_si)
delta_b_iso=delta_b_iso/2
delta_b_blur_hires=delta_b_blur_hires/2
delta_d_cut=delta_d_cut/2
print("Current working best "+\
"score=%7.3f b_iso=%5.1f b_blur_hires=%5.1f d_cut=%5.1f" %(
working_best_si.score,working_best_si.b_iso,
working_best_si.b_blur_hires,
working_best_si.get_d_cut()), file=out)
improving=True
if working_best_si and working_best_si.score > local_best_si.score:
print("Using new values of b_iso and b_blur_hires and d_cut", file=out)
local_best_si=working_best_si
local_best_si.show_summary(out=out)
return local_best_si,local_best_map_and_b
def set_mean_sd_of_map(map_data=None,target_mean=None,target_sd=None):
if not map_data: return None
new_mean=map_data.as_1d().min_max_mean().mean
new_sd=max(1.e-10,map_data.sample_standard_deviation())
map_data=(map_data-new_mean)/new_sd # normalized
return map_data*target_sd + target_mean # restore original
def run_auto_sharpen(
si=None,
map_data=None,
first_half_map_data=None,
second_half_map_data=None,
pdb_inp=None,
auto_sharpen_methods=None,
print_result=True,
return_bsi=False,
out=sys.stdout):
if si.verbose:
local_out=out
else:
local_out=null_out()
# Identifies parameters for optimal map sharpening using analysis of density,
# model-correlation, or half-map correlation (first_half_map_data vs
# vs second_half_map_data).
# NOTE: We can apply this to any map_data (a part or whole of the map)
# BUT: need to update n_real if we change the part of the map!
# change with map data: crystal_symmetry, solvent_fraction, n_real, wrapping,
smoothed_box_mask_data=None
original_box_map_data=None
if si.auto_sharpen and (
si.box_in_auto_sharpen or si.density_select_in_auto_sharpen or pdb_inp):
original_box_sharpening_info_obj=deepcopy(si) # should really not be box
box_pdb_inp,box_map_data,box_first_half_map_data,\
box_second_half_map_data,\
box_crystal_symmetry,box_sharpening_info_obj,\
smoothed_box_mask_data,original_box_map_data,n_buffer=\
select_box_map_data(si=si,
map_data=map_data,
first_half_map_data=first_half_map_data,
second_half_map_data=second_half_map_data,
pdb_inp=pdb_inp,
restrict_map_size=si.restrict_map_size,
out=out,local_out=local_out)
if box_sharpening_info_obj is None: # did not do it
print("Box map is similar in size to entire map..."+\
"skipping representative box of density", file=out)
original_box_sharpening_info_obj=None
crystal_symmetry=si.crystal_symmetry
else:
print("Using small map to identify optimal sharpening", file=out)
print("Box map grid: %d %d %d" %(
box_map_data.all()), file=out)
print("Box map cell: %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f "%(
box_crystal_symmetry.unit_cell().parameters()), file=out)
original_map_data=map_data
original_crystal_symmetry=si.crystal_symmetry
map_data=box_map_data
pdb_inp=box_pdb_inp
if si.density_select_in_auto_sharpen and ( # catch empty pdb_inp
not pdb_inp or not \
pdb_inp.construct_hierarchy().overall_counts().n_residues):
pdb_inp=None
crystal_symmetry=box_crystal_symmetry
if box_first_half_map_data:
first_half_map_data=box_first_half_map_data
if box_second_half_map_data:
second_half_map_data=box_second_half_map_data
# SET si for box now...
si=deepcopy(si).update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
else:
original_box_sharpening_info_obj=None
box_sharpening_info_obj=None
crystal_symmetry=si.crystal_symmetry
starting_mean=map_data.as_1d().min_max_mean().mean
starting_sd=map_data.sample_standard_deviation()
print("\nGetting original b_iso...", file=out)
map_coeffs_aa,map_coeffs,f_array,phases=effective_b_iso(
map_data=map_data,
resolution=si.resolution,
d_min_ratio=si.d_min_ratio,
scale_max=si.scale_max,
remove_aniso=si.remove_aniso,
crystal_symmetry=si.crystal_symmetry,
out=out)
original_b_iso=map_coeffs_aa.b_iso
if original_b_iso is None:
print("Could not determine original b_iso...setting to 200", file=out)
original_b_iso=200.
si.original_aniso_obj=map_coeffs_aa # set it so we can apply it later if desired
if first_half_map_data:
first_half_map_coeffs,dummy=get_f_phases_from_map(
map_data=first_half_map_data,
crystal_symmetry=si.crystal_symmetry,
d_min=si.resolution,
d_min_ratio=si.d_min_ratio,
remove_aniso=si.remove_aniso,
scale_max=si.scale_max,
return_as_map_coeffs=True,
out=local_out)
else:
first_half_map_coeffs=None
if second_half_map_data:
second_half_map_coeffs,dummy=get_f_phases_from_map(
map_data=second_half_map_data,
crystal_symmetry=si.crystal_symmetry,
d_min=si.resolution,
d_min_ratio=si.d_min_ratio,
scale_max=si.scale_max,
remove_aniso=si.remove_aniso,
return_as_map_coeffs=True,
out=local_out)
else:
second_half_map_coeffs=None
if pdb_inp:
# Getting model information if pdb_inp present ---------------------------
from cctbx.maptbx.refine_sharpening import get_model_map_coeffs_normalized
model_map_coeffs=get_model_map_coeffs_normalized(pdb_inp=pdb_inp,
si=si,
f_array=f_array,
resolution=si.resolution,
out=out)
else:
model_map_coeffs=None
# Try various methods for sharpening. # XXX fix this up
local_si=deepcopy(si).update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
if si.adjust_region_weight and \
(not si.sharpening_is_defined()) and (not si.is_model_sharpening()) \
and (not si.is_half_map_sharpening()) and (
not si.is_target_b_iso_to_d_cut()) and (
si.sharpening_target=='adjusted_sa'):
for iii in range(1): # just so we can break
local_si=deepcopy(si).update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
local_si.sharpening_target='adjusted_sa'
local_si.sharpening_method='b_iso_to_d_cut'
sa_ratio_list=[]
normalized_regions_list=[]
if 0: #si.resolution:
# 2017-07-26 reset b_low,b_mid,b_high, using 5.9*resolution**2 for b_mid
delta_search=si.search_b_max-si.search_b_min
b_mid=si.get_target_b_iso()
b_low=b_mid-150*delta_search/400
b_high=b_mid+250*delta_search/400
print("Centering search on b_iso=%7.2f" %(b_mid), file=out)
else:
b_low=min(original_b_iso,si.search_b_min)
b_high=max(original_b_iso,si.search_b_max)
b_mid=b_low+0.375*(b_high-b_low)
ok_region_weight=True
results_list=[]
kw_list=[]
first=True
id=0
for b_iso in [b_low,b_high,b_mid]:
id+=1
if first and local_si.multiprocessing=='multiprocessing' or \
local_si.nproc==1: # can do anything
local_log=out
else: # skip log entirely
local_log=None # will set this later and return as r.log_as_text
first=False
lsi=deepcopy(local_si)
lsi.b_sharpen=original_b_iso-b_iso
lsi.b_iso=b_iso
# ------ SET UP RUN HERE ----------
kw_list.append(
{
'f_array':f_array,
'phases':phases,
'crystal_symmetry':lsi.crystal_symmetry,
'local_si':lsi,
'id':id,
'out':local_log,
})
# We are going to call autosharpening with this
# ------ END OF SET UP FOR RUN ----------
"""
local_map_and_b=apply_sharpening(
f_array=f_array,phases=phases,
sharpening_info_obj=lsi,
crystal_symmetry=lsi.crystal_symmetry,
out=null_out())
local_si=score_map(map_data=local_map_and_b.map_data,
sharpening_info_obj=local_si,
out=null_out())
"""
# This is the actual run here =============
from libtbx.easy_mp import run_parallel
results_list=run_parallel(
method=si.multiprocessing,
qsub_command=si.queue_run_command,
nproc=si.nproc,
target_function=run_sharpen_and_score,kw_list=kw_list)
# results looks like: [result,result2]
sort_list=[]
for result in results_list:
sort_list.append([result.id,result])
sort_list.sort()
for id,result in sort_list:
local_si=result.local_si
if local_si.sa_ratio is None or local_si.normalized_regions is None:
ok_region_weight=False
sa_ratio_list.append(local_si.sa_ratio)
normalized_regions_list.append(local_si.normalized_regions)
if not ok_region_weight:
break # skip it
# Set region weight so that either:
# (1) delta_sa_ratio==region_weight*delta_normalized_regions
# (2) sa_ratio=region_weight*normalized_regions (at low B)
# region weight from change over entire region
d_sa_ratio=sa_ratio_list[0]-sa_ratio_list[1]
d_normalized_regions=normalized_regions_list[0]-normalized_regions_list[1]
delta_region_weight=si.region_weight_factor*d_sa_ratio/max(
1.e-10,d_normalized_regions)
if d_sa_ratio < 0 or d_normalized_regions < 0:
print("Not using delta_region_weight with unusable values", file=out)
ok_region_weight=False
# region weight from initial values
init_region_weight=si.region_weight_factor* \
sa_ratio_list[0]/max(1.e-10,normalized_regions_list[0])
# Ensure that adjusted_sa at b_mid is > than either end
# adjusted_sa=sa_ratio - region_weight*normalized_regions
# sa[2]=sa_ratio_list[2]-region_weight*normalized_regions[2]
# sa[1]=sa_ratio_list[1]-region_weight*normalized_regions[1]
# sa[0]=sa_ratio_list[0]-region_weight*normalized_regions[0]
# sa[2] >= sa[1] and sa[2] >= sa[0]
# sa_ratio_list[2]-region_weight*normalized_regions[2] >=
# sa_ratio_list[1]-region_weight*normalized_regions[1]
# NOTE: sa_ratio_list and normalized_regions both decrease in order:
# low med high or [0] [2] [1]
max_region_weight = (sa_ratio_list[2]- sa_ratio_list[1])/max(0.001,
normalized_regions_list[2]-normalized_regions_list[1])
min_region_weight = (sa_ratio_list[0]- sa_ratio_list[2])/max(0.001,
normalized_regions_list[0]-normalized_regions_list[2])
min_region_weight=max(1.e-10,min_region_weight) # positive
max_region_weight=max(1.e-10,max_region_weight) # positive
delta_weight=max(0.,max_region_weight-min_region_weight)
min_buffer=delta_weight*si.region_weight_buffer
min_region_weight+=min_buffer
max_region_weight-=min_buffer
min_max_region_weight=True
if min_region_weight >= max_region_weight:
print("Warning: min_region_weight >= max_region_weight...", file=out)
min_max_region_weight=False
#ok_region_weight=False
print("Region weight bounds: Min: %7.1f Max: %7.1f " %(
min_region_weight,max_region_weight), file=out)
print("Region weight estimates:", file=out)
print("From ratio of low-B surface area to regions: %7.1f" %(
init_region_weight), file=out)
print("Ratio of change in surface area to change in regions: %7.1f" %(
delta_region_weight), file=out)
# put them in bounds but note if we did it
out_of_range=False
if ok_region_weight and si.region_weight_method=='initial_ratio':
if min_max_region_weight and (
init_region_weight > max_region_weight or \
init_region_weight<min_region_weight):
init_region_weight=max(
min_region_weight,min(max_region_weight,init_region_weight))
out_of_range=True
print("\nRegion weight adjusted to %7.1f using initial ratio" %(
init_region_weight), file=out)
si.region_weight=init_region_weight
elif ok_region_weight and si.region_weight_method=='delta_ratio':
if min_max_region_weight and (
delta_region_weight > max_region_weight or \
delta_region_weight<min_region_weight):
delta_region_weight=max(
min_region_weight,min(max_region_weight,delta_region_weight))
out_of_range=True
si.region_weight=delta_region_weight
print("\nRegion weight set to %7.1f using overall ratio and " %(
si.region_weight) +\
"\nfactor of %5.1f" %(si.region_weight_factor), file=out)
else: # just use default target for b_iso
si.region_weight=si.region_weight_default
print("Skipping region_weight analysis as signal-to-noise is zero ("+\
"adjusted sa\nvs b_iso does not have low values at extremes and "+\
"clear maximum in the middle.)", file=out)
print("\nUnable to set region_weight ... using value of %7.2f" % (
si.region_weight), file=out)
if si.discard_if_worse:
print("Setting discard_if_worse=False as region_weight failed ", file=out)
si.discard_if_worse=False
if out_of_range and 'resolution_dependent' in auto_sharpen_methods:
new_list=[]
have_something_left=False
for x in auto_sharpen_methods:
if x != 'resolution_dependent':
if str(x) != 'None':
have_something_left=True
new_list.append(x)
if have_something_left:
auto_sharpen_methods=new_list
print("Removed resolution_dependent sharpening ( "+\
"weights were out of range)", file=out)
if box_sharpening_info_obj:
si.local_solvent_fraction=box_sharpening_info_obj.solvent_fraction
else:
si.local_solvent_fraction=si.solvent_fraction
null_si=None
best_si=deepcopy(si).update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
best_map_and_b=map_and_b_object()
if si.sharpening_is_defined(): # Use this if come in with method
print("\nUsing specified sharpening", file=out)
best_si=set_up_sharpening(si=si,map_data=map_data,out=out)
best_si.sharpen_and_score_map(map_data=map_data,
out=out).show_score(out=out)
best_si.show_summary(out=out)
else:
if best_si.is_model_sharpening():
print("\nSetting up model sharpening", file=out)
elif best_si.is_half_map_sharpening():
print("\nSetting up half-map sharpening", file=out)
else:
print("\nTesting sharpening methods with target of %s" %(
best_si.sharpening_target), file=out)
if not auto_sharpen_methods or auto_sharpen_methods==['None']:
auto_sharpen_methods=['no_sharpening']
for m in auto_sharpen_methods:
# ------------------------
if m in ['no_sharpening','resolution_dependent','model_sharpening',
'half_map_sharpening','target_b_iso_to_d_cut']:
if m=='target_b_iso_to_d_cut':
b_min=si.get_target_b_iso()
b_max=si.get_target_b_iso()
else:
b_min=original_b_iso
b_max=original_b_iso
b_n=1
k_sharpen=0.
delta_b=0
if m in ['resolution_dependent','model_sharpening',
'half_map_sharpening']:
pass # print out later
else:
print("\nB-sharpen B-iso k_sharpen SA "+\
"Kurtosis sa_ratio Normalized regions", file=out)
# ------------------------
# ------------------------
else: # ['b_iso','b_iso_to_d_cut']:
if si.search_b_n>1:
b_min=min(original_b_iso,si.search_b_min)
b_max=max(original_b_iso,si.search_b_max)
else: # for just one, take it
b_min=si.search_b_min
b_max=si.search_b_max
b_n=si.search_b_n
delta_b=(b_max-b_min)/max(1,b_n-1)
print("\nTesting %s with b_iso from %7.1f to %7.1f in %d steps of %7.1f" %(
m,b_min,b_max,b_n,delta_b), file=out)
print("(b_sharpen from %7.1f to %7.1f ) " %(
original_b_iso-b_min,original_b_iso-b_max), file=out)
if m=='b_iso':
k_sharpen=0.
else:
k_sharpen=si.k_sharpen
print("\nB-sharpen B-iso k_sharpen SA "+\
"Kurtosis sa_ratio Normalized regions", file=out)
# ------------------------
local_best_map_and_b=map_and_b_object()
local_best_si=deepcopy(si).update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
si_b_iso_list=flex.double()
si_score_list=flex.double()
si_id_list=[]
kw_list=[]
first=True
if return_bsi: assert local_si.nproc==1
results_list=[]
for i in range(b_n):
# ============================================
local_si=deepcopy(si).update_with_box_sharpening_info(
box_sharpening_info_obj=box_sharpening_info_obj)
local_si.sharpening_method=m
local_si.n_real=map_data.all()
local_si.k_sharpen=k_sharpen
if first and local_si.multiprocessing=='multiprocessing' or \
local_si.nproc==1: # can do anything
local_log=out
else: # skip log entirely
local_log=None # will set this later and return as r.log_as_text
first=False
if m=='resolution_dependent':
print("\nRefining resolution-dependent sharpening based on %s" %(
local_si.residual_target), file=out)
local_si.b_sharpen=0
local_si.b_iso=original_b_iso
from cctbx.maptbx.refine_sharpening import run as refine_sharpening
local_f_array,local_phases=refine_sharpening(
map_coeffs=map_coeffs,
sharpening_info_obj=local_si,
out=out)
elif m=='model_sharpening':
print("\nUsing model-based sharpening", file=out)
local_si.b_sharpen=0
local_si.b_iso=original_b_iso
from cctbx.maptbx.refine_sharpening import scale_amplitudes
scale_amplitudes(
model_map_coeffs=model_map_coeffs,map_coeffs=map_coeffs,
si=local_si,out=out)
# local_si contains target_scale_factors now
local_f_array=f_array
local_phases=phases
elif m=='half_map_sharpening':
print("\nUsing half-map-based sharpening", file=out)
local_si.b_sharpen=0
local_si.b_iso=original_b_iso
from cctbx.maptbx.refine_sharpening import scale_amplitudes
scale_amplitudes(
model_map_coeffs=model_map_coeffs,
map_coeffs=map_coeffs,
first_half_map_coeffs=first_half_map_coeffs,
second_half_map_coeffs=second_half_map_coeffs,
si=local_si,out=out)
# local_si contains target_scale_factors now
local_f_array=f_array
local_phases=phases
else:
local_f_array=f_array
local_phases=phases
b_iso=b_min+i*delta_b
local_si.b_sharpen=original_b_iso-b_iso
local_si.b_iso=b_iso
# ------ SET UP RUN HERE ----------
kw_list.append(
{
'f_array':local_f_array,
'phases':local_phases,
'crystal_symmetry':local_si.crystal_symmetry,
'original_b_iso':original_b_iso,
'local_si':local_si,
'm':m,
'return_bsi':return_bsi,
'out':local_log,
'id':i+1,
})
# We are going to call autosharpening with this
# ------ END OF SET UP FOR RUN ----------
# This is the actual run here =============
from libtbx.easy_mp import run_parallel
results_list=run_parallel(
method=si.multiprocessing,
qsub_command=si.queue_run_command,
nproc=si.nproc,
target_function=run_sharpen_and_score,kw_list=kw_list)
# results looks like: [result,result2]
sort_list=[]
for result in results_list:
sort_list.append([result.id,result])
sort_list.sort()
for id,result in sort_list:
local_si=result.local_si
local_map_and_b=result.local_map_and_b
if result.text:
print(result.text)
# Run through all result to get these
if local_si.b_sharpen is not None and local_si.b_iso is not None and\
local_si.k_sharpen is not None and local_si.kurtosis is not None \
and local_si.adjusted_sa is not None and local_si.score is not None:
si_b_iso_list.append(local_si.b_iso)
si_score_list.append(local_si.score)
if local_si.k_sharpen is not None:
si_id_list.append("%.3f_%.3f_%.3f" %(
local_si.b_iso,local_si.k_sharpen,
local_si.get_d_cut()))
if m=='no_sharpening':
null_si=local_si
if local_best_si.score is None or local_si.score>local_best_si.score:
local_best_si=local_si
local_best_map_and_b=local_map_and_b
# ============================================
# DONE WITH ALL RUNS
if not local_best_si.is_model_sharpening() and \
not local_best_si.is_half_map_sharpening():
if local_best_si.sharpening_method=='resolution_dependent':
print("\nBest scores for sharpening with "+\
"b[0]=%6.2f b[1]=%6.2f b[2]=%6.2f: " %(
local_best_si.resolution_dependent_b[0],
local_best_si.resolution_dependent_b[1],
local_best_si.resolution_dependent_b[2]), file=out)
else:
print("\nBest scores for sharpening with "+\
"b_iso=%6.1f b_sharpen=%6.1f k_sharpen=%s: " %(
local_best_si.b_iso,local_best_si.b_sharpen,
local_best_si.k_sharpen), file=out)
if local_best_si.score is not None:
local_best_si.show_summary(out=out)
print("Adjusted surface area: %7.3f Kurtosis: %7.3f Score: %7.3f\n" %(
local_best_si.adjusted_sa,local_best_si.kurtosis,local_best_si.score), file=out)
if si_score_list.size()>1: # test for signal
signal_to_noise=estimate_signal_to_noise(value_list=si_score_list)
print("Estimated signal-to-noise in ID of optimal sharpening: %5.1f" %(
signal_to_noise), file=out)
if signal_to_noise<local_best_si.signal_min and \
'target_b_iso_to_d_cut' in auto_sharpen_methods:
print("Skipping this analysis as signal-to-noise is less than %5.1f " %(
local_best_si.signal_min), file=out)
local_best_si.score=None
optimize_b_blur_hires=False
optimize_d_cut=False
n_cycles=0
if local_best_si.score is not None and local_best_si.optimize_d_cut and \
local_best_si.sharpening_method in ['b_iso_to_d_cut','b_iso']:
optimize_d_cut=True
n_cycles+=1
if local_best_si.score is not None and \
local_best_si.optimize_b_blur_hires and \
local_best_si.k_sharpen is not None and \
local_best_si.sharpening_method in ['b_iso_to_d_cut','b_iso']:
optimize_b_blur_hires=True
n_cycles+=1
##########################################
optimize_b_iso=True
for cycle in range(n_cycles):
if optimize_b_blur_hires:
local_best_si,local_best_map_and_b=optimize_b_blur_or_d_cut_or_b_iso(
#optimization_target='k_sharpen',
optimization_target='b_blur_hires',
local_best_si=local_best_si,
local_best_map_and_b=local_best_map_and_b,
si_id_list=si_id_list,
si_score_list=si_score_list,
delta_b=delta_b,
original_b_iso=original_b_iso,
f_array=f_array,
phases=phases,
out=out)
if optimize_d_cut:
local_best_si,local_best_map_and_b=optimize_b_blur_or_d_cut_or_b_iso(
optimization_target='d_cut',
local_best_si=local_best_si,
local_best_map_and_b=local_best_map_and_b,
si_id_list=si_id_list,
si_score_list=si_score_list,
delta_b=delta_b,
original_b_iso=original_b_iso,
f_array=f_array,
phases=phases,
out=out)
if optimize_b_iso:
local_best_si,local_best_map_and_b=optimize_b_blur_or_d_cut_or_b_iso(
optimization_target='b_iso',
local_best_si=local_best_si,
local_best_map_and_b=local_best_map_and_b,
si_id_list=si_id_list,
si_score_list=si_score_list,
delta_b=delta_b,
original_b_iso=original_b_iso,
f_array=f_array,
phases=phases,
out=out)
##########################################
if (local_best_si.score is not None or
local_best_si.is_model_sharpening()) and (
best_si.score is None or local_best_si.score > best_si.score):
best_si=local_best_si
best_map_and_b=local_best_map_and_b
if not best_si.is_model_sharpening() and \
not best_si.is_half_map_sharpening():
print("This is the current best score\n", file=out)
if (best_si.score is not None ) and (
not best_si.is_model_sharpening() ) and (not best_si.is_half_map_sharpening()):
print("\nOverall best sharpening method: %s Score: %7.3f\n" %(
best_si.sharpening_method,best_si.score), file=out)
best_si.show_summary(out=out)
if (not best_si.is_model_sharpening()) and \
(not best_si.is_half_map_sharpening()) and null_si:
if best_si.score>null_si.score: # we improved them..
print("Improved score with sharpening...", file=out)
else:
print("Did not improve score with sharpening...", file=out)
if return_bsi:
map_data=best_map_and_b.map_data
map_data=set_mean_sd_of_map(map_data=map_data,
target_mean=starting_mean,target_sd=starting_sd)
box_sharpening_info_obj.map_data=map_data
box_sharpening_info_obj.smoothed_box_mask_data=smoothed_box_mask_data
box_sharpening_info_obj.original_box_map_data=original_box_map_data
box_sharpening_info_obj.n_buffer=n_buffer
box_sharpening_info_obj.crystal_symmetry=best_si.crystal_symmetry
box_sharpening_info_obj.resolution=best_si.resolution
box_sharpening_info_obj.d_min_ratio=best_si.d_min_ratio
box_sharpening_info_obj.scale_max=best_si.scale_max
box_sharpening_info_obj.smoothing_radius=best_si.smoothing_radius
box_sharpening_info_obj.b_iso=best_map_and_b.final_b_iso
box_sharpening_info_obj.starting_b_iso=best_map_and_b.starting_b_iso
return box_sharpening_info_obj
if original_box_sharpening_info_obj:
# Put back original crystal_symmetry with original_box_sharpening_info_obj
print("\nRestoring original symmetry to best sharpening info", file=out)
best_si.update_with_box_sharpening_info(
box_sharpening_info_obj=original_box_sharpening_info_obj)
print("(%7.3f, %7.3f, %7.3f, %7.3f, %7.3f, %7.3f) "%(tuple(
best_si.crystal_symmetry.unit_cell().parameters())), file=out)
# and set tracking data with result
return best_si
def run_sharpen_and_score(f_array=None,
phases=None,
local_si=None,
crystal_symmetry=None,
original_b_iso=None,
m=None,
return_bsi=None,
id=None,
out=sys.stdout):
local_map_and_b=apply_sharpening(
f_array=f_array,phases=phases,
sharpening_info_obj=local_si,
crystal_symmetry=crystal_symmetry,
out=null_out())
local_si=score_map(map_data=local_map_and_b.map_data,
sharpening_info_obj=local_si,
out=null_out())
# Record b_iso values
if not local_map_and_b.starting_b_iso:
local_map_and_b.starting_b_iso=original_b_iso
if not local_map_and_b.final_b_iso:
local_map_and_b.final_b_iso=local_si.b_iso
# This is printout below here ===============
if m=='resolution_dependent':
text=\
"\nb[0] b[1] b[2] SA Kurtosis sa_ratio Normalized regions"
text+="\n"+\
"\nB-sharpen B-iso k_sharpen SA "+\
"Kurtosis sa_ratio Normalized regions"
text+="\n"+" %6.2f %6.2f %6.2f " %(
local_si.resolution_dependent_b[0],
local_si.resolution_dependent_b[1],
local_si.resolution_dependent_b[2]) +\
" %7.3f %7.3f " %(
local_si.adjusted_sa,local_si.kurtosis)+\
" %7.3f %7.3f" %(
local_si.sa_ratio,local_si.normalized_regions)
elif local_si.b_sharpen is not None and local_si.b_iso is not None and\
local_si.k_sharpen is not None and local_si.kurtosis is not None \
and local_si.adjusted_sa is not None:
text=\
" %6.1f %6.1f %5s %7.3f %7.3f" %(
local_si.b_sharpen,local_si.b_iso,
local_si.k_sharpen,local_si.adjusted_sa,local_si.kurtosis) + \
" %7.3f %7.3f" %(
local_si.sa_ratio,local_si.normalized_regions)
else:
text=""
if return_bsi:
r=group_args(
local_si=local_si,
local_map_and_b=local_map_and_b,
text=text,
id=id)
else:
r=group_args(
local_si=local_si,
local_map_and_b=None,
text=text,
id=id)
return r
def effective_b_iso(map_data=None,tracking_data=None,
box_sharpening_info_obj=None,
crystal_symmetry=None,
resolution=None,
remove_aniso=None,
d_min_ratio=None,
scale_max=None,
out=sys.stdout):
if not crystal_symmetry:
if box_sharpening_info_obj:
crystal_symmetry=box_sharpening_info_obj.crystal_symmetry
else:
crystal_symmetry=tracking_data.crystal_symmetry
if resolution:
d_min=resolution
else:
d_min=tracking_data.params.crystal_info.resolution
if not d_min_ratio:
d_min_ratio=tracking_data.params.map_modification.d_min_ratio
map_coeffs,map_coeffs_ra=get_f_phases_from_map(map_data=map_data,
crystal_symmetry=crystal_symmetry,
d_min=d_min,
d_min_ratio=d_min_ratio,
scale_max=scale_max,
remove_aniso=remove_aniso,
return_as_map_coeffs=True,
out=out)
f_array,phases=map_coeffs_as_fp_phi(map_coeffs)
b_iso=map_coeffs_ra.b_iso
if b_iso is not None:
print("Effective B-iso = %7.2f\n" %(b_iso), file=out)
else:
print("Effective B-iso not determined\n", file=out)
return map_coeffs_ra,map_coeffs,f_array,phases
def update_tracking_data_with_sharpening(map_data=None,tracking_data=None,
si=None,out=sys.stdout):
# Set shifted_map_info if map_data is new
if tracking_data.params.output_files.shifted_sharpened_map_file:
shifted_sharpened_map_file=os.path.join(
tracking_data.params.output_files.output_directory,
tracking_data.params.output_files.shifted_sharpened_map_file)
else:
shifted_sharpened_map_file=None
from cctbx.maptbx.segment_and_split_map import write_ccp4_map
if shifted_sharpened_map_file:
write_ccp4_map(tracking_data.crystal_symmetry,
shifted_sharpened_map_file,map_data)
print("Wrote shifted, sharpened map to %s" %(
shifted_sharpened_map_file), file=out)
tracking_data.set_shifted_map_info(file_name=
shifted_sharpened_map_file,
crystal_symmetry=tracking_data.crystal_symmetry,
origin=map_data.origin(),
all=map_data.all(),
b_sharpen=None)
def get_high_points_from_map(
map_data=None,
boundary_radius=5.,
unit_cell=None,
out=sys.stdout):
max_in_map_data=map_data.as_1d().min_max_mean().max
for cutoff in [0.99,0.98,0.95,0.90,0.50]:
high_points_mask=(map_data>= cutoff*max_in_map_data)
sda=map_data.as_1d().min_max_mean().max
for nth_point in [4,2,1]:
sites_cart=get_marked_points_cart(mask_data=high_points_mask,
unit_cell=unit_cell,every_nth_point=nth_point,
boundary_radius=boundary_radius)
if sites_cart.size()>0: break
if sites_cart.size()>0: break
assert sites_cart.size()>0
del high_points_mask
sites_cart=sites_cart[:1]
xyz_frac=unit_cell.fractionalize(sites_cart[0])
value=map_data.value_at_closest_grid_point(xyz_frac)
print("High point in map at (%7.2f %7.2f %7.2f) with value of %7.2f " %(
sites_cart[0][0],sites_cart[0][1],sites_cart[0][2],value), file=out)
return sites_cart
def get_one_au(tracking_data=None,
sites_cart=None,
ncs_obj=None,
map_data=None,
starting_mask=None,
radius=None,
every_nth_point=None,
removed_ncs=None,
out=sys.stdout):
unit_cell=tracking_data.crystal_symmetry.unit_cell()
if removed_ncs: # take everything left
mm=map_data.as_1d().min_max_mean()
mask_threshold=mm.min+max(0.00001,0.0001*(mm.mean-mm.min)) # just above min
else:
mask_threshold=tracking_data.params.segmentation.mask_threshold
every_nth_point=tracking_data.params.segmentation.grid_spacing_for_au
radius=tracking_data.params.segmentation.radius
if not radius:
radius=set_radius(unit_cell=unit_cell,map_data=map_data,
every_nth_point=every_nth_point)
tracking_data.params.segmentation.radius=radius
print("\nRadius for AU identification: %7.2f A" %(radius), file=out)
overall_mask,max_in_sd_map,sd_map=get_overall_mask(map_data=map_data,
mask_threshold=mask_threshold,
crystal_symmetry=tracking_data.crystal_symmetry,
resolution=tracking_data.params.crystal_info.resolution,
solvent_fraction=tracking_data.solvent_fraction,
radius=radius,
out=out)
if starting_mask:
print("Points in starting mask:",starting_mask.count(True), file=out)
print("Points in overall mask:",overall_mask.count(True), file=out)
print("Points in both:",(starting_mask & overall_mask).count(True), file=out)
if tracking_data.params.crystal_info.is_crystal:
# take starting mask as overall...
overall_mask= starting_mask
else: # usual
# make sure overall mask is at least as big..
overall_mask=(overall_mask | starting_mask)
print("New size of overall mask: ",overall_mask.count(True), file=out)
else:
if not sites_cart: # pick top of map
sites_cart=get_high_points_from_map(
boundary_radius=radius,
map_data=sd_map,
unit_cell=unit_cell,out=out)
starting_mask=mask_from_sites_and_map( # starting au mask
map_data=sd_map,unit_cell=unit_cell,
sites_cart=sites_cart,radius=radius,
overall_mask=overall_mask)
del sd_map
au_mask,ncs_mask=get_ncs_mask(
map_data=map_data,unit_cell=unit_cell,ncs_object=ncs_obj,
starting_mask=starting_mask,
radius=radius,
overall_mask=overall_mask,
every_nth_point=every_nth_point)
print("Points in au: %d in ncs: %d (total %7.1f%%) both: %d Not marked: %d" %(
au_mask.count(True),ncs_mask.count(True),
100.*float(au_mask.count(True)+ncs_mask.count(True))/au_mask.size(),
(au_mask & ncs_mask).count(True),
au_mask.size()-au_mask.count(True)-ncs_mask.count(True),), file=out)
return au_mask
def set_up_sharpening(si=None,map_data=None,out=sys.stdout):
print("\nCarrying out specified sharpening/blurring of map", file=out)
check_si=si # just use input information
check_si.show_summary(out=out)
if check_si.is_target_b_iso_to_d_cut():
check_si.b_iso=check_si.get_target_b_iso()
check_si.b_sharpen=None
print("Setting target b_iso of %7.1f " %(check_si.b_iso), file=out)
if check_si.b_sharpen is None and check_si.b_iso is not None:
# need to figure out b_sharpen
print("\nGetting b_iso of map", file=out)
b_iso=check_si.get_effective_b_iso(map_data=map_data,out=out)
check_si.b_sharpen=b_iso-check_si.b_iso # sharpen is what to
print("Value of b_sharpen to obtain b_iso of %s is %5.2f" %(
check_si.b_iso,check_si.b_sharpen), file=out)
elif check_si.b_sharpen is not None:
print("Sharpening b_sharpen will be %s" %(check_si.b_sharpen), file=out)
elif check_si.resolution_dependent_b:
print("Resolution-dependent b_sharpening values:" +\
"b0: %7.2f b1: %7.2f b2: %7.2f " %(
tuple(check_si.resolution_dependent_b)), file=out)
elif check_si.target_scale_factors:
print("Model sharpening scale values:", file=out)
for x in check_si.target_scale_factors: print(x, end=' ', file=out)
print(file=out)
return check_si
def run(args,
params=None,
map_data=None,
crystal_symmetry=None,
sequence=None,
half_map_data_list=None,
ncs_obj=None,
tracking_data=None,
target_scattered_points=None,
is_iteration=False,
pdb_hierarchy=None,
target_xyz=None,
target_hierarchy=None,
sharpening_target_pdb_inp=None,
out=sys.stdout):
if is_iteration:
print("\nIteration tracking data:", file=out)
tracking_data.show_summary(out=out)
else:
# get the parameters and map_data (sharpened, magnified, shifted...)
params,map_data,half_map_data_list,pdb_hierarchy,tracking_data,\
shifted_ncs_object=get_params( #
args,map_data=map_data,crystal_symmetry=crystal_symmetry,
half_map_data_list=half_map_data_list,
ncs_object=ncs_obj,
sequence=sequence,
sharpening_target_pdb_inp=sharpening_target_pdb_inp,out=out)
if params.control.shift_only:
return map_data,ncs_obj,tracking_data
elif params.control.check_ncs or \
params.control.sharpen_only:
return None,None,tracking_data
if params.input_files.pdb_to_restore:
restore_pdb(params,tracking_data=tracking_data,out=out)
return None,None,tracking_data
# read and write the ncs (Normally point-group NCS)
ncs_obj,tracking_data=get_ncs(params=params,tracking_data=tracking_data,
ncs_object=shifted_ncs_object,
out=out)
if params.input_files.target_ncs_au_file: # read in target
import iotbx.pdb
target_hierarchy=iotbx.pdb.input(
file_name=params.input_files.target_ncs_au_file).construct_hierarchy()
print("\nShifting model based on origin shift (if any)", file=out)
print("Coordinate shift is (%7.2f,%7.2f,%7.2f)" %(
tuple(tracking_data.origin_shift)), file=out)
if not map_data:
raise Sorry("Need map data for segment_and_split_map")
if params.output_files.shifted_map_file:
shifted_map_file=os.path.join(
tracking_data.params.output_files.output_directory,
params.output_files.shifted_map_file)
else:
shifted_map_file=None
if params.output_files.shifted_ncs_file:
shifted_ncs_file=os.path.join(
tracking_data.params.output_files.output_directory,
params.output_files.shifted_ncs_file)
else:
shifted_ncs_file=None
if params.output_files.shifted_ncs_file:
shifted_pdb_file=os.path.join(
tracking_data.params.output_files.output_directory,
params.output_files.shifted_pdb_file)
else:
shifted_pdb_file=None
ncs_obj,pdb_hierarchy,target_hierarchy,\
tracking_data,sharpening_target_pdb_inp=apply_origin_shift(
shifted_map_file=shifted_map_file,
shifted_pdb_file=shifted_pdb_file,
shifted_ncs_file=shifted_ncs_file,
origin_shift=tracking_data.origin_shift,
shifted_ncs_object=shifted_ncs_object,
pdb_hierarchy=pdb_hierarchy,
target_hierarchy=target_hierarchy,
map_data=map_data,
tracking_data=tracking_data,
sharpening_target_pdb_inp=sharpening_target_pdb_inp,
out=out)
if target_hierarchy:
target_xyz=target_hierarchy.atoms().extract_xyz()
del target_hierarchy
# We can use params.input_files.target_ncs_au_file here to define ncs au
if target_xyz and not target_scattered_points:
target_scattered_points=flex.vec3_double()
target_scattered_points.append(target_xyz.mean())
# get the chain types and therefore (using ncs_copies) volume fraction
tracking_data=get_solvent_fraction(params,
ncs_object=ncs_obj,tracking_data=tracking_data,out=out)
# Done with getting params and maps
# Summarize after any sharpening
tracking_data.show_summary(out=out)
original_ncs_obj=ncs_obj # in case we need it later...
original_input_ncs_info=tracking_data.input_ncs_info
removed_ncs=False
n_residues=tracking_data.n_residues
ncs_copies=tracking_data.input_ncs_info.number_of_operators
if (not tracking_data.solvent_fraction) and \
params.crystal_info.molecular_mass:
tracking_data.solvent_fraction=get_solvent_fraction_from_molecular_mass(
crystal_symmetry=tracking_data.crystal_symmetry,
molecular_mass=params.crystal_info.molecular_mass,
out=out)
if tracking_data.solvent_fraction:
solvent_fraction=tracking_data.solvent_fraction
else:
raise Sorry("Need solvent fraction or molecular mass or sequence file")
# Now usual method, using our new map...should duplicate best result above
for itry in range(2):
# get connectivity (conn=connectivity_object.result)
b_vs_region=b_vs_region_info()
si=sharpening_info(tracking_data=tracking_data)
co,sorted_by_volume,min_b,max_b,unique_expected_regions,best_score,\
new_threshold,starting_density_threshold=\
get_connectivity(
b_vs_region=b_vs_region,
map_data=map_data,
iterate_with_remainder=params.segmentation.iterate_with_remainder,
n_residues=n_residues,
ncs_copies=ncs_copies,
solvent_fraction=solvent_fraction,
fraction_occupied=si.fraction_occupied,
min_volume=si.min_volume,
min_ratio=si.min_ratio,
wrapping=si.wrapping,
residues_per_region=si.residues_per_region,
max_ratio_to_target=si.max_ratio_to_target,
min_ratio_to_target=si.min_ratio_to_target,
min_ratio_of_ncs_copy_to_first=si.min_ratio_of_ncs_copy_to_first,
starting_density_threshold=si.starting_density_threshold,
density_threshold=si.density_threshold,
crystal_symmetry=si.crystal_symmetry,
chain_type=si.chain_type,
verbose=si.verbose,
out=out)
params.segmentation.starting_density_threshold=starting_density_threshold # have to set tracking data as we are passing that above
tracking_data.params.segmentation.starting_density_threshold=starting_density_threshold # have to set tracking data as we are passing that above
if new_threshold:
print("\nNew threshold is %7.2f" %(new_threshold), file=out)
if co is None: # no luck
return None,None,tracking_data
# Check to see which regions are in more than one au of the NCS
# and set them aside. Group ncs-related regions together
ncs_group_obj,tracking_data,equiv_dict_ncs_copy=identify_ncs_regions(
params,sorted_by_volume=sorted_by_volume,
co=co,
min_b=min_b,
max_b=max_b,
ncs_obj=ncs_obj,
tracking_data=tracking_data,
out=out)
if ncs_group_obj and ncs_group_obj.ncs_group_list: # ok
break
elif ncs_obj and itry==0 and not is_iteration:# try again
print("No NCS groups identified on first try...taking entire NCS AU.", file=out)
# Identify ncs au
au_mask=get_one_au(tracking_data=tracking_data,
ncs_obj=ncs_obj,
map_data=map_data,out=out)
s=(au_mask==False)
min_in_map=map_data.as_1d().min_max_mean().min
map_data.set_selected(s,min_in_map) # mask out all but au
from mmtbx.ncs.ncs import ncs
ncs_obj=ncs()
ncs_obj.set_unit_ncs()
tracking_data.set_ncs_obj(ncs_obj=None)
tracking_data.update_ncs_info(number_of_operators=1)
if n_residues:
n_residues=n_residues/ncs_copies
solvent_fraction=max(0.001,min(0.999,
1-((1-solvent_fraction)/ncs_copies)))
ncs_copies=1
params.segmentation.require_complete=False
params.segmentation.iterate_with_remainder=False # so we do not iterate
removed_ncs=True
# Run again
else: # tried twice, give up
return None,None,tracking_data
# Choose one region or group of regions from each ncs_group in the list
# Optimize the closeness of centers
# Select group of regions that are close together and represent one au
ncs_group_obj,scattered_points=\
select_regions_in_au(
params,
ncs_group_obj=ncs_group_obj,
equiv_dict_ncs_copy=equiv_dict_ncs_copy,
tracking_data=tracking_data,
target_scattered_points=target_scattered_points,
unique_expected_regions=unique_expected_regions,
out=out)
# write out mask and map for all the selected regions...
# Iterate if desired
if params.segmentation.iterate_with_remainder and \
ncs_group_obj.selected_regions:
print("\nCreating remaining mask and map", file=out)
map_data_remaining=create_remaining_mask_and_map(params,
ncs_group_obj=ncs_group_obj,
map_data=map_data,
crystal_symmetry=tracking_data.crystal_symmetry,
out=out)
remainder_ncs_group_obj=iterate_search(params,
map_data=map_data,
map_data_remaining=map_data_remaining,
ncs_obj=ncs_obj,
ncs_group_obj=ncs_group_obj,
scattered_points=scattered_points,
tracking_data=tracking_data,
out=out)
else:
remainder_ncs_group_obj=None
# collect all NCS ops that are needed to relate all the regions
# that are used
ncs_ops_used=ncs_group_obj.ncs_ops_used
if remainder_ncs_group_obj and remainder_ncs_group_obj.ncs_ops_used:
for x in remainder_ncs_group_obj.ncs_ops_used:
if not x in ncs_ops_used: ncs_ops_used.append(x)
if ncs_ops_used:
ncs_ops_used.sort()
print("Final NCS ops used: ",ncs_ops_used, file=out)
# Save the used NCS ops
ncs_used_obj=ncs_group_obj.ncs_obj.deep_copy(ops_to_keep=ncs_ops_used)
if params.output_files.shifted_used_ncs_file:
shifted_used_ncs_file=os.path.join(
tracking_data.params.output_files.output_directory,
params.output_files.shifted_used_ncs_file)
ncs_used_obj.format_all_for_group_specification(
file_name=shifted_used_ncs_file)
tracking_data.set_shifted_used_ncs_info(file_name=shifted_used_ncs_file,
number_of_operators=ncs_used_obj.max_operators(),
is_helical_symmetry=tracking_data.input_ncs_info.is_helical_symmetry)
tracking_data.shifted_used_ncs_info.show_summary(out=out)
# Write out final maps and dummy atom files
if params.output_files.write_output_maps:
print("\nWriting output maps", file=out)
else:
print("\nSetting up but not writing output maps", file=out)
map_files_written=write_output_files(params,
tracking_data=tracking_data,
map_data=map_data,
half_map_data_list=half_map_data_list,
ncs_group_obj=ncs_group_obj,
remainder_ncs_group_obj=remainder_ncs_group_obj,
pdb_hierarchy=pdb_hierarchy,
removed_ncs=removed_ncs,
out=out)
ncs_group_obj.set_map_files_written(map_files_written)
# Restore ncs info if we removed it
if removed_ncs:
print("\nRestoring original NCS info to tracking_data", file=out)
tracking_data.input_ncs_info=original_input_ncs_info
if params.output_files.output_info_file and ncs_group_obj:
write_info_file(params=params,tracking_data=tracking_data,out=out)
return ncs_group_obj,remainder_ncs_group_obj,tracking_data
if __name__=="__main__":
run(args=sys.argv[1:])
|
import os
from PIL import Image, ImageDraw
from PIL import ImageFilter, ImageEnhance
from PIL.ImageFilter import (
GaussianBlur, MaxFilter
)
import numpy as np
import PostOnIg
import random
import json
import randomWords
from bing_image_downloader import downloader
import randomWords
import randomObjects
import time
import math
# Resources
# https://pythontic.com/image-processing/pillow/sharpen-filter
paletteRGBCMYK = [
0, 0, 0, # black
255, 0, 0, # R
0, 255, 0, # G
0, 0, 255, # B
255, 255, 0, # Y
0, 255, 255, # C
255, 0, 255, # M
255,255,255
# 180,180,180 # w
]
# monochromatic
noColors = random.randint(2,3)
# original
# noColors = random.randint(2,8)
def quantizetopalette(silf, palette = paletteRGBCMYK, dither=Image.FLOYDSTEINBERG):
"""Convert an RGB or L mode image to use a given P image's palette."""
silf.load()
# use palette from reference image made below
palette.load()
# silf = silf.convert('RGB').convert(mode = "P", matrix = None, dither, Image.WEB)
im = silf.im.convert("P", 0, palette.im)
# the 0 above means turn OFF dithering making solid colors
return silf._new(im)
# ------ counter for image ID -----
def get_var_value(filename="varstore.dat"):
with open(filename, "a+") as f:
f.seek(0)
val = int(f.read() or 0) + 1
f.seek(0)
f.truncate()
f.write(str(val))
return val
your_counter = get_var_value()
print("Run No. {}".format(your_counter))
mainSearchWord = randomWords.generateWord()
def bingQuery(searchWord):
downloader.download(searchWord,
limit=1,
output_dir='dataset',
adult_filter_off=False,
force_replace=True)
# image_path = filenames[0]
try:
image_path = './dataset/'+ searchWord + '/Image_1.jpg'
return read_image(image_path)
except FileNotFoundError:
image_path = './dataset/'+ searchWord + '/Image_1.png'
def pickImage():
folder=r"/Users/ottobenson/Documents/GitHub/pudding-club-IG2/pudding_club_1"
a = random.choice(os.listdir(folder))
runQuery = True
while(runQuery):
try:
img_file = folder+'/'+a
print("IMAGE = " + img_file)
return read_image(img_file)
except:
runQuery = True
else:
runQuery = False
return read_image(img_file)
def read_image(path):
try:
image = Image.open(path)
return image
except Exception as e:
print(e)
# need to figure out a way to loop back
# if random.choice([True,False]):
# # ------- low res ----------
# cropSize = (your_counter % 5) * 50
# else:
# cropSize = 500
cropSize = random.randint(50,750)
# shapeSize = int(cropSize/4)
shapeSize = int(cropSize/random.randint(2,12))
def transformImage(dimg):
dimg = dimg.resize(random.randint(1,cropSize), random.randint(1,cropSize))
return dimg
def drawSpine(dimg, color):
xy = [(random.randint(-cropSize,cropSize),\
random.randint(-cropSize,cropSize))
for i in range(random.randint(3,6))]
dctx = ImageDraw.Draw(dimg) # create drawing context
dctx.polygon(xy, fill=color) # draw polygon without outline
del dctx
return dimg
def drawSplotch(dimg, color):
xy = [(random.randint(-cropSize,cropSize),random.randint(-cropSize,cropSize)),\
(random.randint(-cropSize,cropSize),random.randint(-cropSize,cropSize)) ]
dctx = ImageDraw.Draw(dimg)
dctx.ellipse(xy, fill=color)
del dctx
return dimg
def drawScribble(dimg, color, scale):
x, y = random.randint(-cropSize, cropSize), random.randint(-cropSize, cropSize)
xyA = [(x, y)]
status = True
while status:
if x <= 0 or x >= cropSize or y <= 0 or y >= cropSize:
status = False
else:
xDirect = random.randint(-scale, scale)
yDirect = random.randint(-scale, scale)
for i in range(random.randint(0, cropSize)):
x += xDirect
y += yDirect
xyA.append((x, y))
xy = tuple(xyA) # convert array of points to tuple
dctx = ImageDraw.Draw(dimg) # create drawing context
dctx.point(xy, fill=color) # draw points
del dctx # destroy drawing context
return dimg
listOfObjects = []
def drawObjects(dimg, position = [random.randint(-cropSize, cropSize),
random.randint(-cropSize, cropSize)], object="tree-1"):
try:
fgPre = Image.open('./objects/'+ object + '.png')
except Exception as e:
print(e)
x, y= position[0], position[1]
resizeSize = [random.randint(1, int(cropSize/2)),
random.randint(1, int(cropSize/2))]
fg = fgPre.resize(list(resizeSize))
dimg.paste(fg, (x,y), fg)
return dimg
def drawImages(dimg, simg):
x, y= random.randint(-cropSize, cropSize), random.randint(-cropSize, cropSize)
# resizeSize = random.randint(1, int(cropSize/2))
resizeSize = [random.randint(1, int(cropSize/2)),
random.randint(1, int(cropSize/2))]
simg = simg.resize(list(resizeSize))
dimg.paste(simg, (x,y))
return dimg
# ----- main process ------
def process(dimg, blurAmount=random.randint(1,5)):
xsize, ysize = dimg.size
randomAnchorX, randomAnchorY = 0, 0
try:
randomAnchorX = random.randint(cropSize, xsize)
randomAnchorY = random.randint(cropSize, ysize)
except ValueError:
dimg = Image.new('RGB', (cropSize, cropSize), (random.randint(0,255),random.randint(0,255),random.randint(0,255)))
box = (randomAnchorX - cropSize, randomAnchorY - cropSize, randomAnchorX, randomAnchorY)
# dimg = dimg.filter(GaussianBlur(blurAmount))
if random.choice((True, False, False, False)):
dimg = dimg.filter(GaussianBlur(blurAmount))
dimg = dimg.crop(box)
# max filter (bright-colored boxes)
# rimg = simg.filter(MaxFilter(size=9))
# edge enhance
# if random.randint(0,7) == 1:
# dimg = dimg.filter(ImageFilter.EDGE_ENHANCE)
# if random.randint(0,50) == 1:
# dimg = dimg.transpose(Image.ROTATE_90)
# if random.randint(0,50) == 1:
# dimg = dimg.transpose(Image.ROTATE_180)
# if random.randint(0,10) == 1:
dimg = dimg.convert('RGB').convert(mode = "P", matrix = None, dither = Image.FLOYDSTEINBERG,
palette = Image.WEB, colors = noColors)
if dimg.mode != 'RGB': dimg = dimg.convert('RGB')
return dimg
def fuq(dimg):
xsize, ysize = dimg.size
try:
randomAnchorX = random.randint(cropSize, xsize)
randomAnchorY = random.randint(cropSize, ysize)
except ValueError:
dimg = Image.new('RGB', (cropSize, cropSize), (random.randint(0,255),random.randint(0,255),random.randint(0,255)))
# box = (randomAnchorX - cropSize, randomAnchorY - cropSize, randomAnchorX, randomAnchorY)
# p = random.randint(0,3)
# if p == 0:
# resize and paste
im1 = dimg.resize((random.randint(1,cropSize), random.randint(1,cropSize)), Image.NEAREST)
Image.Image.paste(dimg, img, (random.randint(0,xsize), random.randint(0,ysize)))
return dimg
runQuery = True
while(runQuery):
img = pickImage()
try:
testX, textY = img.size
except AttributeError:
runQuery = True
else:
runQuery = False
img = fuq(img)
outputScene = []
script = ""
# procedure
for y in range(random.randint(1, 4)):
colorInstance = randomWords.generateColor()
for x in range(random.randint(1, 3)):
# img = drawSplotch(img, colorInstance)
pick = random.randint(0, 6)
if pick == 0:
img = fuq(img)
# img = drawSpine(img, colorInstance)
# if pick == 1:
# img = drawSplotch(img, colorInstance)
if pick == 2:
img = drawScribble(img, colorInstance, random.randint(1,3))
if pick == 3:
img = process(img)
else:
img = process(img)
# if pick == 5:
# search = randomWords.generateWord()
# img = drawImages(img, bingQuery(search))
# ----- display image ------
# img.show()
# resizeSize = 900
# img = img.resize((resizeSize,resizeSize))
# img = img.convert("1", 8)
# img = img.convert('RGB').convert(mode = "P", matrix = None, dither = Image.FLOYDSTEINBERG,
# palette = random.choice([Image.ADAPTIVE, paletteRGBCMYK]), colors = noColors)
# img = img.convert('RGB').convert(mode = "P", matrix = None, dither = Image.FLOYDSTEINBERG,
# palette = Image.ADAPTIVE, colors = noColors)
#
# if random.randint(0,6) == 1:
# palimage = Image.new('P', (16, 16))
# palimage.putpalette(paletteRGBCMYK * random.randint(6, 32))
# img = quantizetopalette(img, palimage, dither=Image.FLOYDSTEINBERG)
# elif random.randint(0,6) != 1:
# palimage = Image.new('P', (16, 16))
# palimage.putpalette(paletteRGBCMYK *32)
# img = quantizetopalette(img, palimage, dither=Image.FLOYDSTEINBERG)
# elif random.randint(0,6) != 1:
# img = img.convert('RGB').convert(mode = "P", matrix = None, dither = Image.FLOYDSTEINBERG,
# palette = Image.ADAPTIVE, colors = noColors)
# img = img.convert('RGB').convert(mode = "P", matrix = None, dither = Image.FLOYDSTEINBERG,
# palette = Image.ADAPTIVE, colors = noColors)
# img = img.quantize(noColors, Image.MEDIANCUT, random.randint(0,6), paletteRGBCMYK, dither = Image.FLOYDSTEINBERG)
if img.mode != 'RGB':
img = img.convert('RGB')
# caption the image
def createCaption():
hardwareFile = open("hardware.dat", "r")
if hardwareFile.mode == "r":
hardware = hardwareFile.read()
# unused caption content:
# + randomWords.generateFlowers() + "\n-----------------------------------------------------" \
captionString = script + " \n" + \
time.strftime("%H:%M:%S %d-%m-%y ") + \
"\nHardware: " + hardware
print(captionString)
return captionString
# save the image
png_save_name = "pudd" + str(your_counter) + '.png'
img.save('./dataset/'+ png_save_name)
imgRSZ = Image.open('./dataset/'+ png_save_name)
imgRSZ = imgRSZ.resize((700,700))
save_name = "pudd" + str(your_counter) + '.jpg'
imgRSZ.save('./dataset/'+ '/' + save_name, optimize = False, quality = 100)
# img.show()
caption = createCaption()
# upload the image
PostOnIg.upload('./dataset/' + save_name, caption)
saveString = open('./dataset/' + '/' + mainSearchWord + '.txt', 'w+')
saveString.write(caption)
saveString.close()
# os.remove('./dataset/')
|
"""The Weibull distribution."""
from equadratures.distributions.template import Distribution
from equadratures.distributions.recurrence_utils import custom_recurrence_coefficients
import numpy as np
from scipy.stats import weibull_min
RECURRENCE_PDF_SAMPLES = 8000
class Weibull(Distribution):
"""
The class defines a Weibull object. It is the child of Distribution.
:param double shape:
Lower bound of the support of the Weibull distribution.
:param double scale:
Upper bound of the support of the Weibull distribution.
"""
def __init__(self, scale=None, shape=None):
if shape is None:
self.shape = 1.0
else:
self.shape = shape
if scale is None:
self.scale = 1.0
else:
self.scale = scale
self.bounds = np.array([0.0, np.inf])
if self.shape < 0 or self.scale < 0:
raise ValueError('Invalid parameters in Weibull distribution. Shape and Scale should be positive.')
self.parent = weibull_min(c=self.shape, scale=self.scale)
self.mean, self.variance, self.skewness, self.kurtosis = self.parent.stats(moments='mvsk')
self.x_range_for_pdf = np.linspace(0, self.scale*10, RECURRENCE_PDF_SAMPLES)
def get_description(self):
"""
A description of the Weibull distribution.
:param Weibull self:
An instance of the Weibull class.
:return:
A string describing the Weibull distribution.
"""
text = "is a Weibull distribution with a shape parameter of "+str(self.shape)+" and a scale parameter of "+str(self.scale)
return text
def get_pdf(self, points=None):
"""
A Weibull probability density function.
:param Weibull self:
An instance of the Weibull class.
:param integer N:
Number of points for defining the probability density function.
"""
if points is not None:
#w = self.shape/self.scale * (points/self.scale)**(self.shape-1.0) * np.exp(-1.0 * (points/self.scale)**self.shape )
#return w
return self.parent.pdf(points)
else:
raise ValueError( 'Please digit an input for getCDF method')
def get_icdf(self, xx):
"""
An inverse Weibull cumulative density function.
:param Weibull self:
An instance of the Weibull class.
:param array xx:
A numpy array of uniformly distributed samples between [0,1].
:return:
Inverse CDF samples associated with the Weibull distribution.
"""
#return self.scale * (-np.log(1.0 - xx))**(1.0/self.shape)
return self.parent.ppf(xx)
def get_cdf(self, points=None):
"""
A Weibull cumulative density function.
:param Weibull self:
An instance of the Weibull class.
:param integer N:
Number of points for defining the cumulative density function.
:return:
An array of N equidistant values over the support of the distribution.
:return:
Cumulative density values along the support of the Weibull distribution.
"""
if points is not None:
# w = 1 - np.exp(-1.0 * ( (points) / (self.scale * 1.0) )**self.shape)
# return w
return self.parent.cdf(points)
else:
raise ValueError( 'Please digit an input for getCDF method')
|
"""Show config player."""
from mpf.core.placeholder_manager import ConditionalEvent
from mpf.config_players.device_config_player import DeviceConfigPlayer
RESERVED_KEYS = ["show", "priority", "speed", "block_queue", "start_step", "loops", "sync_ms", "manual_advance",
"key", "show_tokens", "events_when_played", "events_when_stopped", "events_when_looped",
"events_when_paused", "events_when_resumed", "events_when_advanced",
"events_when_stepped_back", "events_when_updated", "events_when_completed"]
class ShowPlayer(DeviceConfigPlayer):
"""Plays, starts, stops, pauses, resumes or advances shows based on config."""
config_file_section = 'show_player'
show_section = 'shows'
allow_placeholders_in_keys = True
__slots__ = ["_actions"]
def __init__(self, machine):
"""Initialise show player."""
super().__init__(machine)
self._actions = {
'play': self._play,
'stop': self._stop,
'pause': self._pause,
'resume': self._resume,
'advance': self._advance,
'step_back': self._step_back,
'update': self._update,
'queue': self._queue
}
# pylint: disable-msg=too-many-arguments
def play(self, settings, context, calling_context, priority=0, **kwargs):
"""Play, start, stop, pause, resume or advance show based on config."""
# make sure all shows play in sync
queue = kwargs.get("queue", None)
start_time = kwargs.get("start_time", None)
show_tokens = kwargs.get("show_tokens", kwargs)
if not start_time:
start_time = self.machine.clock.get_time()
for show, show_settings in settings.items():
# Look for a conditional event in the show name
if show.condition and not show.condition.evaluate(kwargs):
continue
if 'hold' in show_settings and show_settings['hold'] is not None:
raise AssertionError(
"Setting 'hold' is no longer supported for shows. Use duration -1 in your show.")
if priority:
show_settings = dict(show_settings)
try:
show_settings['priority'] += priority
except KeyError:
show_settings['priority'] = priority
self._update_show(show.name, show_settings, context, queue, start_time, show_tokens)
def _expand_device(self, device):
# parse conditionals
devices = super()._expand_device(device)
for index, device_entry in enumerate(devices):
if not isinstance(device_entry, ConditionalEvent):
devices[index] = self.machine.placeholder_manager.parse_conditional_template(device_entry)
return devices
def _expand_device_config(self, device_settings):
"""Validate show_tokens."""
for key in RESERVED_KEYS:
if key in device_settings["show_tokens"]:
self.raise_config_error("Key {} is not allowed in show_tokens of your show_player because it is also "
"an option in show_player. Did you indent that option too far?".format(key), 1)
return device_settings
def handle_subscription_change(self, value, settings, priority, context, key):
"""Handle subscriptions."""
instance_dict = self._get_instance_dict(context)
for show, show_settings in settings.items():
show_settings = dict(show_settings)
show_key = show_settings["key"] if 'key' in show_settings and show_settings['key'] else key
if show_settings['action'] != 'play':
raise AssertionError("Can only use action play with subscriptions.")
if value:
self._play(show_key, instance_dict, show.name, show_settings, False, None, {})
else:
self._stop(show_key, instance_dict, show.name, show_settings, False, None, {})
# pylint: disable-msg=too-many-arguments
def _play(self, key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
stop_callback = None
if show_settings['block_queue']:
if not queue:
raise AssertionError("block_queue can only be used with a queue event.")
queue.wait()
stop_callback = queue.clear
start_step = show_settings['start_step'].evaluate(placeholder_args)
start_running = show_settings['start_running'].evaluate(placeholder_args)
show_tokens = {k: v.evaluate(placeholder_args) for k, v in show_settings['show_tokens'].items()}
show_config = self.machine.show_controller.create_show_config(
show, show_settings['priority'], show_settings['speed'], show_settings['loops'], show_settings['sync_ms'],
show_settings['manual_advance'], show_tokens, show_settings['events_when_played'],
show_settings['events_when_stopped'], show_settings['events_when_looped'],
show_settings['events_when_paused'], show_settings['events_when_resumed'],
show_settings['events_when_advanced'], show_settings['events_when_stepped_back'],
show_settings['events_when_updated'], show_settings['events_when_completed'])
previous_show = instance_dict.get(key, None)
instance_dict[key] = self.machine.show_controller.replace_or_advance_show(previous_show, show_config,
start_step, start_time,
start_running, stop_callback)
# pylint: disable-msg=too-many-arguments
def _queue(self, key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del queue
del instance_dict
del start_time
del key
if show_settings['block_queue']:
raise AssertionError("Cannot use queue with block_queue.")
start_step = show_settings['start_step'].evaluate(placeholder_args)
show_tokens = {k: v.evaluate(placeholder_args) for k, v in show_settings['show_tokens'].items()}
show_config = self.machine.show_controller.create_show_config(
show, show_settings['priority'], show_settings['speed'], show_settings['loops'], show_settings['sync_ms'],
show_settings['manual_advance'], show_tokens, show_settings['events_when_played'],
show_settings['events_when_stopped'], show_settings['events_when_looped'],
show_settings['events_when_paused'], show_settings['events_when_resumed'],
show_settings['events_when_advanced'], show_settings['events_when_stepped_back'],
show_settings['events_when_updated'], show_settings['events_when_completed'])
show_settings["show_queue"].enqueue_show(show_config, start_step)
@staticmethod
def _stop(key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del show
del show_settings
del queue
del start_time
del placeholder_args
if key in instance_dict:
instance_dict[key].stop()
del instance_dict[key]
@staticmethod
def _pause(key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del show
del show_settings
del queue
del start_time
del placeholder_args
if key in instance_dict:
instance_dict[key].pause()
@staticmethod
def _resume(key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del show
del show_settings
del queue
del start_time
del placeholder_args
if key in instance_dict:
instance_dict[key].resume()
@staticmethod
def _advance(key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del show
del show_settings
del queue
del start_time
del placeholder_args
if key in instance_dict:
instance_dict[key].advance()
@staticmethod
def _step_back(key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del show
del show_settings
del queue
del start_time
del placeholder_args
if key in instance_dict:
instance_dict[key].step_back()
@staticmethod
def _update(key, instance_dict, show, show_settings, queue, start_time, placeholder_args):
del show
del queue
del start_time
del placeholder_args
if key in instance_dict:
instance_dict[key].update(
speed=show_settings.get('speed'),
manual_advance=show_settings.get('manual_advance')
)
# pylint: disable-msg=too-many-arguments
def _update_show(self, show, show_settings, context, queue, start_time, placeholder_args):
instance_dict = self._get_instance_dict(context)
if 'key' in show_settings and show_settings['key']:
key = show_settings['key']
else:
key = show
try:
action = self._actions[show_settings['action']]
except KeyError:
raise AssertionError("Invalid action {} in show_player {}".format(
show_settings['action'], key))
if 'show' in show_settings and show_settings['show']:
show_name = show_settings['show']
else:
show_name = show
action(key, instance_dict, show_name, show_settings, queue, start_time, placeholder_args)
def clear_context(self, context):
"""Stop running shows from context."""
for show in self._get_instance_dict(context).values():
show.stop()
self._reset_instance_dict(context)
def get_express_config(self, value):
"""Parse express config."""
return {"action": value}
|
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "23 Jul 2021"
# !!! SEE CODERULES.TXT !!!
import numpy as np
import scipy.linalg as spl
import sys; sys.path.append('..') # analysis:ignore
from parseq.core import transforms as ctr
from parseq.utils import math as uma
from parseq.third_party import xrt
cpus = 'half' # can be 'all' or 'half'
def _line(xs, ys):
k = (ys[1] - ys[0]) / (xs[1] - xs[0])
b = ys[1] - k*xs[1]
return k, b
class Tr0(ctr.Transform):
name = 'mask Eiger'
defaultParams = dict(
cutoffNeeded=True, cutoff=2000, cutoffMaxBelow=0, cutoffMaxFrame=0,
autoVMax=True, fracVMax=0.1, use2Droi=False, roi=[])
nThreads = cpus
# nProcesses = cpus
# inArrays and outArrays needed only for multiprocessing/multithreading:
inArrays = ['elastic3Draw', 'dispersive2Draw']
outArrays = ['elastic3D', 'dispersive2D']
@staticmethod
def run_main(data):
dtparams = data.transformParams
data.elastic3D = np.array(data.elastic3Draw)
data.dispersive2D = np.array(data.dispersive2Draw)
if dtparams['cutoffNeeded']:
cutoff = dtparams['cutoff']
data.elastic3D[data.elastic3D > cutoff] = 0
data.dispersive2D[data.dispersive2D > cutoff] = 0
dtparams['cutoffMaxBelow'] = data.elastic3D.max()
dtparams['cutoffMaxFrame'] = np.unravel_index(
data.elastic3D.argmax(), data.elastic3D.shape)[0]
shape2D = data.dispersive2D.shape
data.elastic3D = np.concatenate(
(data.elastic3D, data.dispersive2D.reshape(1, *shape2D)), axis=0)
roi = dtparams['roi']
if dtparams['use2Droi'] and roi:
cy, cx, dy, dx = roi
xslice = slice(max(cx, 0), dx+cx)
yslice = slice(max(cy, 0), dy+cy)
tmp = np.zeros_like(data.dispersive2D)
tmp[xslice, yslice] = data.dispersive2D[xslice, yslice]
data.dispersive2D = tmp
tmp = np.zeros_like(data.elastic3D)
tmp[:, xslice, yslice] = data.elastic3D[:, xslice, yslice]
data.elastic3D = tmp
return True
class Tr1(ctr.Transform):
"""Unpublished and therefore obfuscated"""
name = 'project onto basis'
defaultParams = dict(
cutoffMin=None, cutoffMax=None,
# condValue=0,
convolve=True, convolveWith='Si111', convolveShow=True)
nThreads = cpus
# nProcesses = cpus
# inArrays and outArrays needed only for multiprocessing/multithreading:
inArrays = ['energy', 'i0', 'elastic3D', 'dispersive2D']
outArrays = ['xes']
@staticmethod
def run_main (O00OOO000O000OO00 ):
O0O0O00O00O0000OO =O00OOO000O000OO00 .transformParams
O0OOO0O0O0O00OOOO =O0O0O00O00O0000OO ['roi']
if O0O0O00O00O0000OO ['use2Droi']and O0OOO0O0O0O00OOOO :
OO00OOO0O00OO0O0O ,O0OOOO000O0O0OO00 ,O00OOOO00OO00OO00 ,OO000O0OO0OOO0OOO =O0OOO0O0O0O00OOOO
OO0OO0O0O0OO0000O =slice (max (O0OOOO000O0O0OO00 ,0 ),OO000O0OO0OOO0OOO +O0OOOO000O0O0OO00 +1 )
O0O00OOOO0OO0O00O =slice (max (OO00OOO0O00OO0O0O ,0 ),O00OOOO00OO00OO00 +OO00OOO0O00OO0O0O +1 )
OOOOO0OO0O00OO000 =O00OOO000O000OO00 .elastic3D [:-1 ,OO0OO0O0O0OO0000O ,O0O00OOOO0OO0O00O ]
else :
OOOOO0OO0O00OO000 =O00OOO000O000OO00 .elastic3D [:-1 ,:,:]
O0O0OO0O0O0OO0000 =OOOOO0OO0O00OO000 .shape [1 ]*OOOOO0OO0O00OO000 .shape [2 ]
O0OO0O0OO0000OO0O =np .array (OOOOO0OO0O00OO000 ,dtype =np .float64 ).reshape ((-1 ,O0O0OO0O0O0OO0000 ),order ='F').T
try :
O0OO0O0OO0000OO0O *=O00OOO000O000OO00 .i0 .max ()/O00OOO000O000OO00 .i0 [np .newaxis ,:]
except ValueError as OO00000O0OOOOOO00 :
print (OO00000O0OOOOOO00 )
return
if O0O0O00O00O0000OO ['use2Droi']and O0OOO0O0O0O00OOOO :
O00OOO0OOO0OOOO0O =O00OOO000O000OO00 .dispersive2D [OO0OO0O0O0OO0000O ,O0O00OOOO0OO0O00O ].reshape ((-1 ,O0O0OO0O0O0OO0000 ),order ='F').T
else :
O00OOO0OOO0OOOO0O =O00OOO000O000OO00 .dispersive2D .reshape ((-1 ,O0O0OO0O0O0OO0000 ),order ='F').T
OO0OOOOO0O0O0O00O =np .dot (O0OO0O0OO0000OO0O .T ,O00OOO0OOO0OOOO0O )
O00O0O0O0OOO0OOOO =np .dot (OO0OOOOO0O0O0O00O .T ,OO0OOOOO0O0O0O00O )
O0O000O00000OO00O =dict ()
OO0OO00O00000O0O0 ,OO0OOO0OOO00OOOO0 =spl .eigh (O00O0O0O0OOO0OOOO ,**O0O000O00000OO00O )
O0O00000O000OOOOO =np .dot (np .dot (OO0OOO0OOO00OOOO0 ,np .diag (1. /OO0OO00O00000O0O0 )),OO0OOO0OOO00OOOO0 .T )
OO0O0O0O0O0OOOOOO =np .dot (O0OO0O0OO0000OO0O .T *O0O00000O000OOOOO ,O0OO0O0OO0000OO0O *OO0OO00O00000O0O0 )
O0O000O00000OO00O =dict ()
OOOO0OOO0OO0OOO00 ,OO0OOOOO0OO00OO0O =spl .eigh (OO0O0O0O0O0OOOOOO ,**O0O000O00000OO00O )
O0O0O000O000O000O =O0O0O00O00O0000OO ['cutoffMin']
OOO0O0OOOOOOO0OO0 =O0O0O00O00O0000OO ['cutoffMax']
OO0OO0OOOOO00OO0O =OO0OOOOO0OO00OO0O [:,O0O0O000O000O000O :OOO0O0OOOOOOO0OO0 ]
O000OOOOO0000O000 =np .dot (np .dot (OO0OO0OOOOO00OO0O ,np .diag (1. /OOOO0OOO0OO0OOO00 [O0O0O000O000O000O :OOO0O0OOOOOOO0OO0 ])),OO0OO0OOOOO00OO0O .T )
OOO0OOO0000OO0OO0 =np .dot (O000OOOOO0000O000 ,OO0OOOOO0O0O0O00O ).ravel ()
OOO00O000O000OO00 =np .argwhere (OOO0OOO0000OO0OO0 <=0 ).ravel ()
if len (OOO00O000O000OO00 )>1 :
OOO0OOO0000OO0OO0 [:OOO00O000O000OO00 [0 ]]=0
OOO0OOO0000OO0OO0 [OOO00O000O000OO00 [-1 ]:]=0
OOO0OOO0000OO0OO0 [OOO0OOO0000OO0OO0 <0 ]=0
OOO0OOO0000OO0OO0 *=O00OOO0OOO0OOOO0O .sum ()/OOO0OOO0000OO0OO0 .sum ()
O00OOO000O000OO00 .xes =OOO0OOO0000OO0OO0
if O0O0O00O00O0000OO ['convolve']:
OO0O00OOOOO0O0OO0 =O00OOO000O000OO00 .energy [1 ]-O00OOO000O000OO00 .energy [0 ]
OO0O0OO000O0O0OOO =O0O0O00O00O0000OO ['convolveWith']
if xrt .rc [OO0O0OO000O0O0OOO ]is None :
O0OOOO0O00O0O0O0O =xrt .crystals [OO0O0OO000O0O0OOO ]
O0OOOO0O00O00OOO0 =O00OOO000O000OO00 .energy [len (O00OOO000O000OO00 .energy )//2 ]
O00OOO00OO0OO0O00 =O0OOOO0O00O0O0O0O .get_dtheta_symmetric_Bragg (O0OOOO0O00O00OOO0 )
OO00000OO00OO0OOO =O0OOOO0O00O0O0O0O .get_Bragg_angle (O0OOOO0O00O00OOO0 )-O00OOO00OO0OO0O00
OOOO000O00OO0O000 =np .abs (O0OOOO0O00O0O0O0O .get_amplitude (O00OOO000O000OO00 .energy ,np .sin (OO00000OO00OO0OOO ))[0 ])**2
xrt .refl [OO0O0OO000O0O0OOO ]=OOOO000O00OO0O000
xrt .rc [OO0O0OO000O0O0OOO ]=np .convolve (OOOO000O00OO0O000 ,OOOO000O00OO0O000 ,'same')/(OOOO000O00OO0O000 .sum ()*OO0O00OOOOO0O0OO0 )*OO0O00OOOOO0O0OO0
O00OOO000O000OO00 .xes =np .convolve (O00OOO000O000OO00 .xes ,xrt .rc [OO0O0OO000O0O0OOO ],'same')/(xrt .rc [OO0O0OO000O0O0OOO ].sum ()*OO0O00OOOOO0O0OO0 )*OO0O00OOOOO0O0OO0
return True
|
@app.on_message(filters.command(["bandiera","bandiera@NFTlittlebot"]) & ~filters.user(bannati))
def gloria(client, message):
username = message.from_user.username
if player[username]["team"] != None and player[username]["team"] != "nessuno":
team = player[username]["team"]
if "Bandiera" in clan[team]:
testo = "Sventoli con felicità la bandiera del tuo clan davanti a tutti!\n\n\n"
for riga in clan[team]["Bandiera"]:
testo += listToString(riga) + "\n"
message.reply(testo)
else:
message.reply("Nessuno ha mai disegnato la vostra bandiera!")
else:
message.reply("Non hai un team!")
@app.on_message(filters.command(["disegna","disegna@NFTlittlebot"]) & ~filters.user(bannati))
def gloria(client, message):
username = message.from_user.username
if player[username]["team"] != None and player[username]["team"] != "nessuno":
team = player[username]["team"]
if clan[team]["Sarto"] == username:
if len(message.command) == 1:
message.reply("Usa /disegna x y emoji per sostituire una casella!")
x = int(message.command[1]) - 1
y = int(message.command[2]) - 1
emoji = message.command[3][0]
try:
clan[team]["Bandiera"][x][y] = emoji
message.reply("Fatto")
except:
message.reply("Mmmm, non credo vada bene così...")
else:
message.reply("Non sei il sarto del clan!")
else:
message.reply("Non sei in un clan!")
|
#!/usr/bin/env python
# coding: utf8
'''
@author: qitan
@contact: [email protected]
@file: myfilter.py
@time: 2017/3/30 15:32
@desc:
'''
from django import template
from django.contrib.auth.models import Group
from userauth.models import User, Department
from django.db.models import Q
from django.shortcuts import get_object_or_404
from deploy.models import SaltGroup
register = template.Library()
@register.filter(name='add_class')
def add_class(value, arg):
return value.as_widget(attrs={'class': arg, 'required':'required'})
@register.filter(name='group_minions')
def minions(value):
'''
分组列表中显示所有主机
'''
try:
group_minions = value.minions.all()
return group_minions
except:
return ''
@register.filter(name='group_users')
def all_users(group):
'''
分组列表中显示所有主机
'''
try:
#all_users = group.user_set.all()
all_users = User.objects.filter(group=group)
return all_users
except:
return ''
@register.filter(name='department_users')
def all_department_users(pk):
'''
部门所有用户
'''
try:
all_department_users = Department.objects.get(pk=pk).user_set.all()
return all_department_users
except:
return ''
@register.filter(name='user_departments')
def user_departments(user, level):
'''
用户所属部门(组)
'''
try:
#user = User.objects.get(pk=pk)
if level == "1":
department = {i.id:i.deptname for i in user.department.filter(level=1)}
else:
department = {i.id:i.deptname for i in user.department.filter(~Q(level=1))}
return sorted(department.items())
except:
return ''
@register.filter(name='user_groups')
def all_user_groups(pk):
'''
用户所属组
'''
try:
user_group = [i.name for i in Group.objects.filter(user=pk)]
return user_group
except:
return ''
@register.filter(name='department_subs')
def all_dept_subs(pk):
'''
子部门
'''
try:
all_depts = ["<li>%s</li>"%i.deptname for i in Department.objects.filter(parent_id=pk)]
return all_depts
except:
return ''
@register.filter(name='getNextDept')
def all_dept_node(pid):
'''
部门节点
:param pk:
:return:
'''
try:
return Department.objects.filter(parent_id=pid).values('id', 'deptname', 'parent_id')
except:
return None
@register.filter(name='department_level')
def department_display(level):
try:
return 60 * (int(level) - 1)
except:
return ''
@register.filter(name='is_super')
def user_is_super(pk):
'''
是否为超级用户
'''
if pk:
return User.objects.get(pk=pk).is_superuser
else:
return None
@register.filter(name='str_split')
def show_str(value, arg):
'''
分割权限控制中远程命令、远程目录列表
'''
if value:
str_list = value.split(arg)
return str_list
else:
return ''
@register.filter(name='list_item')
def show_item(value, arg):
'''
获取列表中指定项
'''
if value:
return value[arg]
else:
return ''
|
"""
Input classes for nimare data.
"""
import json
class Analyzable(object):
def to_array(self):
pass
class Mappable(Analyzable):
def to_vol(self):
pass
class ConnMatrix(Analyzable):
"""Container for connectome data (i.e., connectivity matrices).
"""
def __init__(self, mat):
pass
def to_array(self):
pass
class Image(Mappable):
"""Container for volumetric brain images.
"""
def __init__(self, nimg):
pass
def to_array(self, masker):
pass
def to_vol(self):
pass
class CoordinateSet(Mappable):
"""Container for peak information, with optional additional metadata (e.g.,
intensity values).
"""
def __init__(self, foci):
pass
def to_array(self, method, masker):
pass
def to_vol(self, method, masker):
pass
class Surface(Mappable):
"""Container for surface brain data (i.e., from gifti files).
"""
def __init__(self, gimg):
pass
def to_array(self, masker):
pass
def to_vol(self, masker):
pass
|
print('hello world new!')
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .multiple_pipeline_trigger import MultiplePipelineTrigger
class BlobTrigger(MultiplePipelineTrigger):
"""Trigger that runs every time the selected Blob container changes.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when
Start/Stop APIs are called on the Trigger. Possible values include:
'Started', 'Stopped', 'Disabled'
:vartype runtime_state: str or
~azure.mgmt.datafactory.models.TriggerRuntimeState
:param type: Required. Constant filled by server.
:type type: str
:param pipelines: Pipelines that need to be started.
:type pipelines:
list[~azure.mgmt.datafactory.models.TriggerPipelineReference]
:param folder_path: Required. The path of the container/folder that will
trigger the pipeline.
:type folder_path: str
:param max_concurrency: Required. The max number of parallel files to
handle when it is triggered.
:type max_concurrency: int
:param linked_service: Required. The Azure Storage linked service
reference.
:type linked_service:
~azure.mgmt.datafactory.models.LinkedServiceReference
"""
_validation = {
'runtime_state': {'readonly': True},
'type': {'required': True},
'folder_path': {'required': True},
'max_concurrency': {'required': True},
'linked_service': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'folder_path': {'key': 'typeProperties.folderPath', 'type': 'str'},
'max_concurrency': {'key': 'typeProperties.maxConcurrency', 'type': 'int'},
'linked_service': {'key': 'typeProperties.linkedService', 'type': 'LinkedServiceReference'},
}
def __init__(self, **kwargs):
super(BlobTrigger, self).__init__(**kwargs)
self.folder_path = kwargs.get('folder_path', None)
self.max_concurrency = kwargs.get('max_concurrency', None)
self.linked_service = kwargs.get('linked_service', None)
self.type = 'BlobTrigger'
|
mysp = __import__('my-voice-analysis')
p="converted"
c=r"C:\Users\ishan\OneDrive\Documents\EngHack"
import wave
params = ()
rating = mysp.myspgend(p,c)
pauses = mysp.mysppaus(p,c)
speed = mysp.myspsr(p,c)
articulation = mysp.myspatc(p,c)
pronounce = mysp.mysppron(p,c)
pauses_response = []
speed_response = []
articulation_response = []
speed_dict = {
'0': [0,'Too Slow'],
'1': [25, 'Slow'],
'2': [50, 'Slow'],
'3': [75, 'Good Speed, could be a bit faster'],
'4': [100, 'Perfect Speed'],
'5': [75, 'Good speed, could be a little slower'],
'6': [50, 'Fast'],
'7': [25, 'Fast'],
'8': [0, 'Too fast']}
articulation_dict = {
'0': [0,'Too Slow'],
'1': [0, 'Too Slow'],
'2': [25, 'Slow'],
'3': [50, 'Slow'],
'4': [75, 'Good Articluation, could be a bit faster'],
'5': [100, 'Perfect Articluation'],
'6': [75, 'Good Articulation, could be a little slower'],
'7': [50, 'Fast'],
'8': [25, 'Fast'],
'9': [0, 'Too fast']}
if pauses > 100:
pauses_response = [0, 'Too many pauses']
elif pauses > 80:
pauses_response = [25, 'Too many pauses']
elif pauses > 60:
pauses_response = [50, 'Just a few too many pauses']
elif pauses > 40:
pauses_response = [75, 'Good, try to reduce the amount of pauses']
else:
pauses_response = [100, 'Very few pauses, well done!']
try:
speed_response = speed_dict[str(speed)]
except KeyError:
speed_response = [0, 'Too fast']
try:
articulation_response = articulation_dict[str(articulation)]
except KeyError:
articluation_response = [0, 'Too fast']
percentages = [pauses_response[0], speed_response[0], articulation_response[0], pronounce]
mean_percent = sum(percentages)/len(percentages)
feedback_statement = 'Feedback for Speed: ' + speed_response[1] + ' Feedback for Articulation: ' + articulation_response[1] + ', Feedback for Pausing: ' + pauses_response[1] + ', Pronounce Rating (out of 100): ' + str(pronounce)
print('\n'*5)
print(mean_percent, feedback_statement)
print('\n'*5)
|
r"""
Algebras With Basis
"""
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <[email protected]>
# 2008-2009 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.abstract_method import abstract_method
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.categories.all import ModulesWithBasis, Algebras
from sage.categories.tensor import TensorProductsCategory, tensor
from sage.categories.cartesian_product import CartesianProductsCategory
from category_types import Category_over_base_ring
class AlgebrasWithBasis(Category_over_base_ring):
"""
The category of algebras with a distinguished basis
EXAMPLES::
sage: C = AlgebrasWithBasis(QQ); C
Category of algebras with basis over Rational Field
sage: C.super_categories()
[Category of modules with basis over Rational Field, Category of algebras over Rational Field]
We construct a typical parent in this category, and do some computations with it::
sage: A = C.example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: A.category()
Category of algebras with basis over Rational Field
sage: A.one_basis()
word:
sage: A.one()
B[word: ]
sage: A.base_ring()
Rational Field
sage: A.basis().keys()
Words over {'a', 'b', 'c'}
sage: (a,b,c) = A.algebra_generators()
sage: a^3, b^2
(B[word: aaa], B[word: bb])
sage: a*c*b
B[word: acb]
sage: A.product
<bound method FreeAlgebra_with_category._product_from_product_on_basis_multiply of An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field>
sage: A.product(a*b,b)
B[word: abb]
sage: TestSuite(A).run(verbose=True)
running ._test_additive_associativity() . . . pass
running ._test_an_element() . . . pass
running ._test_associativity() . . . pass
running ._test_category() . . . pass
running ._test_characteristic() . . . pass
running ._test_distributivity() . . . pass
running ._test_elements() . . .
Running the test suite of self.an_element()
running ._test_category() . . . pass
running ._test_eq() . . . pass
running ._test_nonzero_equal() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
pass
running ._test_elements_eq_reflexive() . . . pass
running ._test_elements_eq_symmetric() . . . pass
running ._test_elements_eq_transitive() . . . pass
running ._test_elements_neq() . . . pass
running ._test_eq() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_one() . . . pass
running ._test_pickling() . . . pass
running ._test_prod() . . . pass
running ._test_some_elements() . . . pass
running ._test_zero() . . . pass
sage: A.__class__
<class 'sage.categories.examples.algebras_with_basis.FreeAlgebra_with_category'>
sage: A.element_class
<class 'sage.combinat.free_module.FreeAlgebra_with_category.element_class'>
Please see the source code of `A` (with ``A??``) for how to
implement other algebras with basis.
TESTS::
sage: TestSuite(AlgebrasWithBasis(QQ)).run()
"""
@cached_method
def super_categories(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).super_categories()
[Category of modules with basis over Rational Field, Category of algebras over Rational Field]
"""
R = self.base_ring()
return [ModulesWithBasis(R), Algebras(R)]
def example(self, alphabet = ('a','b','c')):
"""
Returns an example of algebra with basis::
sage: AlgebrasWithBasis(QQ).example()
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
An other set of generators can be specified as optional argument::
sage: AlgebrasWithBasis(QQ).example((1,2,3))
An example of an algebra with basis: the free algebra on the generators (1, 2, 3) over Rational Field
"""
from sage.categories.examples.algebras_with_basis import Example
return Example(self.base_ring(), alphabet)
class ParentMethods:
@abstract_method(optional = True)
def one_basis(self):
"""
When the one of an algebra with basis is an element of
this basis, this optional method can return the index of
this element. This is used to provide a default
implementation of :meth:`.one`, and an optimized default
implementation of :meth:`.from_base_ring`.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example()
sage: A.one_basis()
word:
sage: A.one()
B[word: ]
sage: A.from_base_ring(4)
4*B[word: ]
"""
@cached_method
def one_from_one_basis(self):
"""
Returns the one of the algebra, as per
:meth:`Monoids.ParentMethods.one()
<sage.categories.monoids.Monoids.ParentMethods.one>`
By default, this is implemented from
:meth:`.one_basis`, if available.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example()
sage: A.one_basis()
word:
sage: A.one_from_one_basis()
B[word: ]
sage: A.one()
B[word: ]
TESTS:
Try to check that #5843 Heisenbug is fixed::
sage: A = AlgebrasWithBasis(QQ).example()
sage: B = AlgebrasWithBasis(QQ).example(('a', 'c'))
sage: A == B
False
sage: Aone = A.one_from_one_basis
sage: Bone = B.one_from_one_basis
sage: Aone is Bone
False
Even if called in the wrong order, they should returns their
respective one::
sage: Bone().parent() is B
True
sage: Aone().parent() is A
True
"""
return self.monomial(self.one_basis()) #.
@lazy_attribute
def one(self):
r"""
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example()
sage: A.one_basis()
word:
sage: A.one()
B[word: ]
"""
if self.one_basis is not NotImplemented:
return self.one_from_one_basis
else:
return NotImplemented
@lazy_attribute
def from_base_ring(self):
"""
TESTS::
sage: A = AlgebrasWithBasis(QQ).example()
sage: A.from_base_ring(3)
3*B[word: ]
"""
if self.one_basis is not NotImplemented:
return self.from_base_ring_from_one_basis
else:
return NotImplemented
def from_base_ring_from_one_basis(self, r):
"""
INPUTS:
- `r`: an element of the coefficient ring
Implements the canonical embeding from the ground ring.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example()
sage: A.from_base_ring_from_one_basis(3)
3*B[word: ]
sage: A.from_base_ring(3)
3*B[word: ]
sage: A(3)
3*B[word: ]
"""
return self.term(self.one_basis(), r) #.
@abstract_method(optional = True)
def product_on_basis(self, i, j):
"""
The product of the algebra on the basis (optional)
INPUT:
- ``i``, ``j`` -- the indices of two elements of the basis of self
Returns the product of the two corresponding basis elements
If implemented, :meth:`product` is defined from it by bilinearity.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example()
sage: Word = A.basis().keys()
sage: A.product_on_basis(Word("abc"),Word("cba"))
B[word: abccba]
"""
@lazy_attribute
def product(self):
"""
The product of the algebra, as per
:meth:`Magmas.ParentMethods.product()
<sage.categories.magmas.Magmas.ParentMethods.product>`
By default, this is implemented using one of the following methods,
in the specified order:
- :meth:`.product_on_basis`
- :meth:`._multiply` or :meth:`._multiply_basis`
- :meth:`.product_by_coercion`
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example()
sage: a, b, c = A.algebra_generators()
sage: A.product(a + 2*b, 3*c)
3*B[word: ac] + 6*B[word: bc]
"""
if self.product_on_basis is not NotImplemented:
return self._product_from_product_on_basis_multiply
# return self._module_morphism(self._module_morphism(self.product_on_basis, position = 0, codomain=self),
# position = 1)
elif hasattr(self, "_multiply") or hasattr(self, "_multiply_basis"):
return self._product_from_combinatorial_algebra_multiply
elif hasattr(self, "product_by_coercion"):
return self.product_by_coercion
else:
return NotImplemented
# Provides a product using the product_on_basis by calling linear_combination only once
def _product_from_product_on_basis_multiply( self, left, right ):
r"""
Computes the product of two elements by extending
bilinearly the method :meth:`product_on_basis`.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: (a,b,c) = A.algebra_generators()
sage: A._product_from_product_on_basis_multiply(a*b + 2*c, a - b)
B[word: aba] - B[word: abb] + 2*B[word: ca] - 2*B[word: cb]
"""
return self.linear_combination( ( self.product_on_basis( mon_left, mon_right ), coeff_left * coeff_right ) for ( mon_left, coeff_left ) in left.monomial_coefficients().iteritems() for ( mon_right, coeff_right ) in right.monomial_coefficients().iteritems() )
# Backward compatibility temporary cruft to help migrating form CombinatorialAlgebra
def _product_from_combinatorial_algebra_multiply(self,left,right):
"""
Returns left\*right where left and right are elements of self.
product() uses either _multiply or _multiply basis to carry out
the actual multiplication.
EXAMPLES::
sage: s = SymmetricFunctions(QQ).schur()
sage: a = s([2])
sage: s._product_from_combinatorial_algebra_multiply(a,a)
s[2, 2] + s[3, 1] + s[4]
sage: s.product(a,a)
s[2, 2] + s[3, 1] + s[4]
"""
A = left.parent()
BR = A.base_ring()
z_elt = {}
#Do the case where the user specifies how to multiply basis elements
if hasattr(self, '_multiply_basis'):
for (left_m, left_c) in left._monomial_coefficients.iteritems():
for (right_m, right_c) in right._monomial_coefficients.iteritems():
res = self._multiply_basis(left_m, right_m)
#Handle the case where the user returns a dictionary
#where the keys are the monomials and the values are
#the coefficients. If res is not a dictionary, then
#it is assumed to be an element of self
if not isinstance(res, dict):
if isinstance(res, self._element_class):
res = res._monomial_coefficients
else:
res = {res: BR(1)}
for m in res:
if m in z_elt:
z_elt[ m ] = z_elt[m] + left_c * right_c * res[m]
else:
z_elt[ m ] = left_c * right_c * res[m]
#We assume that the user handles the multiplication correctly on
#his or her own, and returns a dict with monomials as keys and
#coefficients as values
else:
m = self._multiply(left, right)
if isinstance(m, self._element_class):
return m
if not isinstance(m, dict):
z_elt = m.monomial_coefficients()
else:
z_elt = m
#Remove all entries that are equal to 0
BR = self.base_ring()
zero = BR(0)
del_list = []
for m, c in z_elt.iteritems():
if c == zero:
del_list.append(m)
for m in del_list:
del z_elt[m]
return self._from_dict(z_elt)
#def _test_product(self, **options):
# tester = self._tester(**options)
# tester.assert_(self.product is not None)
# could check that self.product is in Hom( self x self, self)
class ElementMethods:
def __invert__(self):
"""
Returns the inverse of self if self is a multiple of one,
and one is in the basis of this algebra. Otherwise throws
an error.
Caveat: this generic implementation is not complete; there
may be invertible elements in the algebra that can't be
inversed this way. It is correct though for graded
connected algebras with basis.
EXAMPLES::
sage: C = AlgebrasWithBasis(QQ).example()
sage: x = C(2); x
2*B[word: ]
sage: ~x
1/2*B[word: ]
sage: a = C.algebra_generators().first(); a
B[word: a]
sage: ~a
Traceback (most recent call last):
...
ValueError: cannot invert self (= B[word: a])
"""
# FIXME: make this generic
mcs = self._monomial_coefficients
one = self.parent().one_basis()
if len(mcs) == 1 and one in mcs:
return self.parent()( ~mcs[ one ] )
else:
raise ValueError, "cannot invert self (= %s)"%self
class CartesianProducts(CartesianProductsCategory):
"""
The category of algebras with basis, constructed as cartesian products of algebras with basis
Note: this construction give the direct products of algebras with basis.
See comment in :class:`Algebras.CartesianProducts
<sage.categories.algebras.Algebras.CartesianProducts>`
"""
def extra_super_categories(self):
"""
A cartesian product of algebras with basis is endowed with
a natural algebra with basis structure.
EXAMPLES::
sage: AlgebrasWithBasis(QQ).CartesianProducts().extra_super_categories()
[Category of algebras with basis over Rational Field]
sage: AlgebrasWithBasis(QQ).CartesianProducts().super_categories()
[Category of algebras with basis over Rational Field, Category of Cartesian products of algebras over Rational Field, Category of Cartesian products of modules with basis over Rational Field]
"""
return [self.base_category()]
class ParentMethods:
@cached_method # todo: reinstate once #5843 is fixed
def one_from_cartesian_product_of_one_basis(self):
"""
Returns the one of this cartesian product of algebras, as per ``Monoids.ParentMethods.one``
It is constructed as the cartesian product of the ones of the
summands, using their :meth:`.one_basis` methods.
This implementation does not require multiplication by
scalars nor calling cartesian_product. This might help keeping
things as lazy as possible upon initialization.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: A.one_basis()
word:
sage: B = cartesian_product((A, A, A))
sage: B.one_from_cartesian_product_of_one_basis()
B[(0, word: )] + B[(1, word: )] + B[(2, word: )]
sage: B.one()
B[(0, word: )] + B[(1, word: )] + B[(2, word: )]
sage: cartesian_product([SymmetricGroupAlgebra(QQ, 3), SymmetricGroupAlgebra(QQ, 4)]).one()
B[(0, [1, 2, 3])] + B[(1, [1, 2, 3, 4])]
"""
return self.sum_of_monomials( zip( self._sets_keys(), (set.one_basis() for set in self._sets)) )
@lazy_attribute
def one(self):
"""
TESTS::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: B = cartesian_product((A, A, A))
sage: B.one()
B[(0, word: )] + B[(1, word: )] + B[(2, word: )]
"""
if all(hasattr(module, "one_basis") for module in self._sets):
return self.one_from_cartesian_product_of_one_basis
else:
return NotImplemented
#def product_on_basis(self, t1, t2):
# would be easy to implement, but without a special
# version of module morphism, this would not take
# advantage of the bloc structure
class TensorProducts(TensorProductsCategory):
"""
The category of algebras with basis constructed by tensor product of algebras with basis
"""
@cached_method
def extra_super_categories(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).TensorProducts().extra_super_categories()
[Category of algebras with basis over Rational Field]
sage: AlgebrasWithBasis(QQ).TensorProducts().super_categories()
[Category of algebras with basis over Rational Field,
Category of tensor products of algebras over Rational Field,
Category of tensor products of modules with basis over Rational Field]
"""
return [self.base_category()]
class ParentMethods:
"""
implements operations on tensor products of algebras with basis
"""
@cached_method
def one_basis(self):
"""
Returns the index of the one of this tensor product of
algebras, as per ``AlgebrasWithBasis.ParentMethods.one_basis``
It is the tuple whose operands are the indices of the
ones of the operands, as returned by their
:meth:`.one_basis` methods.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: A.one_basis()
word:
sage: B = tensor((A, A, A))
sage: B.one_basis()
(word: , word: , word: )
sage: B.one()
B[word: ] # B[word: ] # B[word: ]
"""
# FIXME: this method should be conditionaly defined,
# so that B.one_basis returns NotImplemented if not
# all modules provide one_basis
if all(hasattr(module, "one_basis") for module in self._sets):
return tuple(module.one_basis() for module in self._sets)
else:
raise NotImplementedError
def product_on_basis(self, t1, t2):
"""
The product of the algebra on the basis, as per
``AlgebrasWithBasis.ParentMethods.product_on_basis``.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: (a,b,c) = A.algebra_generators()
sage: x = tensor( (a, b, c) ); x
B[word: a] # B[word: b] # B[word: c]
sage: y = tensor( (c, b, a) ); y
B[word: c] # B[word: b] # B[word: a]
sage: x*y
B[word: ac] # B[word: bb] # B[word: ca]
sage: x = tensor( ((a+2*b), c) ) ; x
B[word: a] # B[word: c] + 2*B[word: b] # B[word: c]
sage: y = tensor( (c, a) ) + 1; y
B[word: ] # B[word: ] + B[word: c] # B[word: a]
sage: x*y
B[word: a] # B[word: c] + B[word: ac] # B[word: ca] + 2*B[word: b] # B[word: c] + 2*B[word: bc] # B[word: ca]
TODO: optimize this implementation!
"""
return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.
class ElementMethods:
"""
Implements operations on elements of tensor products of algebras with basis
"""
pass
|
#!/usr/bin/env python
import numpy
from numpy import *
import mpi
import sys
from time import sleep
sys.argv = mpi.mpi_init(len(sys.argv),sys.argv)
myid=mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)
numprocs=mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
print "hello from python main1 myid= ",myid
port_name=mpi.mpi_open_port(mpi.MPI_INFO_NULL);
print "port=",port_name
client=mpi.mpi_comm_accept(port_name,mpi.MPI_INFO_NULL,0,mpi.MPI_COMM_WORLD)
back=mpi.mpi_recv(1,mpi.MPI_INT,0,5678,client)
print "back=",back
back[0]=back[0]+1
mpi.mpi_send(back,1,mpi.MPI_INT,0,1234,client)
sleep(10)
mpi.mpi_close_port(port_name);
mpi.mpi_comm_disconnect(client);
mpi.mpi_finalize()
|
import aiohttp
import os
from redbot.core import Config, commands, checks
from redbot.core.utils.chat_formatting import box
import xml.etree.ElementTree as ET
class Wolfram(commands.Cog):
"""Ask Wolfram Alpha any question."""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
default_global = {"WOLFRAM_API_KEY": None}
self.config = Config.get_conf(self, 2788801004)
self.config.register_guild(**default_global)
@commands.command(name="wolfram", aliases=["ask"])
async def _wolfram(self, ctx, *question: str):
"""Ask Wolfram Alpha any question."""
api_key = await self.config.WOLFRAM_API_KEY()
if api_key:
url = "http://api.wolframalpha.com/v2/query?"
query = " ".join(question)
payload = {"input": query, "appid": api_key}
headers = {"user-agent": "Red-cog/2.0.0"}
async with self.session.get(url, params=payload, headers=headers) as r:
result = await r.text()
root = ET.fromstring(result)
a = []
for pt in root.findall(".//plaintext"):
if pt.text:
a.append(pt.text.capitalize())
if len(a) < 1:
message = "There is as yet insufficient data for a meaningful answer."
else:
message = "\n".join(a[0:3])
else:
message = "No API key set for Wolfram Alpha. Get one at http://products.wolframalpha.com/api/"
await ctx.send(box(message))
@checks.is_owner()
@commands.command(name="setwolframapi", aliases=["setwolfram"])
async def _setwolframapi(self, ctx, key: str):
"""Set the api-key."""
if key:
await self.config.WOLFRAM_API_KEY.set(key)
await ctx.send("Key set.")
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
|
AMR_HEADERS = ['ORF_ID', 'Start', 'Stop', 'Orientation', 'Best_Identities', 'Best_Hit_ARO']
ARGS_DICT={'disable_serotype':False,'disable_vf':False,'pi':90, 'options':{'vf': True, 'amr': True, 'serotype': True}}
set_spfyids_o157 = set([u'https://www.github.com/superphy#spfy3418', u'https://www.github.com/superphy#spfy3419', u'https://www.github.com/superphy#spfy3412', u'https://www.github.com/superphy#spfy3413', u'https://www.github.com/superphy#spfy3414', u'https://www.github.com/superphy#spfy3415', u'https://www.github.com/superphy#spfy3416', u'https://www.github.com/superphy#spfy3417', u'https://www.github.com/superphy#spfy597', u'https://www.github.com/superphy#spfy596', u'https://www.github.com/superphy#spfy595', u'https://www.github.com/superphy#spfy594', u'https://www.github.com/superphy#spfy593', u'https://www.github.com/superphy#spfy592', u'https://www.github.com/superphy#spfy591', u'https://www.github.com/superphy#spfy590', u'https://www.github.com/superphy#spfy599', u'https://www.github.com/superphy#spfy598', u'https://www.github.com/superphy#spfy93', u'https://www.github.com/superphy#spfy92', u'https://www.github.com/superphy#spfy91', u'https://www.github.com/superphy#spfy90', u'https://www.github.com/superphy#spfy94', u'https://www.github.com/superphy#spfy4709', u'https://www.github.com/superphy#spfy1825', u'https://www.github.com/superphy#spfy1822', u'https://www.github.com/superphy#spfy1820', u'https://www.github.com/superphy#spfy1829', u'https://www.github.com/superphy#spfy5254', u'https://www.github.com/superphy#spfy1458', u'https://www.github.com/superphy#spfy1459', u'https://www.github.com/superphy#spfy2290', u'https://www.github.com/superphy#spfy1452', u'https://www.github.com/superphy#spfy1453', u'https://www.github.com/superphy#spfy1450', u'https://www.github.com/superphy#spfy1451', u'https://www.github.com/superphy#spfy1456', u'https://www.github.com/superphy#spfy1457', u'https://www.github.com/superphy#spfy1454', u'https://www.github.com/superphy#spfy1455', u'https://www.github.com/superphy#spfy1751', u'https://www.github.com/superphy#spfy1753', u'https://www.github.com/superphy#spfy1752', u'https://www.github.com/superphy#spfy1755', u'https://www.github.com/superphy#spfy1754', u'https://www.github.com/superphy#spfy1757', u'https://www.github.com/superphy#spfy1756', u'https://www.github.com/superphy#spfy1697', u'https://www.github.com/superphy#spfy2324', u'https://www.github.com/superphy#spfy4439', u'https://www.github.com/superphy#spfy4438', u'https://www.github.com/superphy#spfy4598', u'https://www.github.com/superphy#spfy3959', u'https://www.github.com/superphy#spfy4599', u'https://www.github.com/superphy#spfy3814', u'https://www.github.com/superphy#spfy1443', u'https://www.github.com/superphy#spfy5248', u'https://www.github.com/superphy#spfy2958', u'https://www.github.com/superphy#spfy4644', u'https://www.github.com/superphy#spfy4645', u'https://www.github.com/superphy#spfy4646', u'https://www.github.com/superphy#spfy4647', u'https://www.github.com/superphy#spfy4640', u'https://www.github.com/superphy#spfy4641', u'https://www.github.com/superphy#spfy4642', u'https://www.github.com/superphy#spfy4643', u'https://www.github.com/superphy#spfy244', u'https://www.github.com/superphy#spfy4648', u'https://www.github.com/superphy#spfy4649', u'https://www.github.com/superphy#spfy2355', u'https://www.github.com/superphy#spfy5253', u'https://www.github.com/superphy#spfy148', u'https://www.github.com/superphy#spfy149', u'https://www.github.com/superphy#spfy1133', u'https://www.github.com/superphy#spfy4310', u'https://www.github.com/superphy#spfy4851', u'https://www.github.com/superphy#spfy4850', u'https://www.github.com/superphy#spfy4853', u'https://www.github.com/superphy#spfy4852', u'https://www.github.com/superphy#spfy1783', u'https://www.github.com/superphy#spfy1780', u'https://www.github.com/superphy#spfy1785', u'https://www.github.com/superphy#spfy2776', u'https://www.github.com/superphy#spfy3622', u'https://www.github.com/superphy#spfy3621', u'https://www.github.com/superphy#spfy3620', u'https://www.github.com/superphy#spfy1719', u'https://www.github.com/superphy#spfy1715', u'https://www.github.com/superphy#spfy1714', u'https://www.github.com/superphy#spfy1716', u'https://www.github.com/superphy#spfy1711', u'https://www.github.com/superphy#spfy1712', u'https://www.github.com/superphy#spfy1917', u'https://www.github.com/superphy#spfy1913', u'https://www.github.com/superphy#spfy1858', u'https://www.github.com/superphy#spfy1859', u'https://www.github.com/superphy#spfy1856', u'https://www.github.com/superphy#spfy1857', u'https://www.github.com/superphy#spfy1854', u'https://www.github.com/superphy#spfy1855', u'https://www.github.com/superphy#spfy1852', u'https://www.github.com/superphy#spfy1853', u'https://www.github.com/superphy#spfy1851', u'https://www.github.com/superphy#spfy2913', u'https://www.github.com/superphy#spfy2916', u'https://www.github.com/superphy#spfy2918', u'https://www.github.com/superphy#spfy1769', u'https://www.github.com/superphy#spfy1767', u'https://www.github.com/superphy#spfy2253', u'https://www.github.com/superphy#spfy4446', u'https://www.github.com/superphy#spfy365', u'https://www.github.com/superphy#spfy366', u'https://www.github.com/superphy#spfy4445', u'https://www.github.com/superphy#spfy360', u'https://www.github.com/superphy#spfy361', u'https://www.github.com/superphy#spfy362', u'https://www.github.com/superphy#spfy363', u'https://www.github.com/superphy#spfy4708', u'https://www.github.com/superphy#spfy218', u'https://www.github.com/superphy#spfy2863', u'https://www.github.com/superphy#spfy4688', u'https://www.github.com/superphy#spfy4689', u'https://www.github.com/superphy#spfy5273', u'https://www.github.com/superphy#spfy5280', u'https://www.github.com/superphy#spfy108', u'https://www.github.com/superphy#spfy109', u'https://www.github.com/superphy#spfy103', u'https://www.github.com/superphy#spfy4616', u'https://www.github.com/superphy#spfy106', u'https://www.github.com/superphy#spfy107', u'https://www.github.com/superphy#spfy2225', u'https://www.github.com/superphy#spfy3409', u'https://www.github.com/superphy#spfy1060', u'https://www.github.com/superphy#spfy587', u'https://www.github.com/superphy#spfy588', u'https://www.github.com/superphy#spfy426', u'https://www.github.com/superphy#spfy4868', u'https://www.github.com/superphy#spfy3035', u'https://www.github.com/superphy#spfy4865', u'https://www.github.com/superphy#spfy4867', u'https://www.github.com/superphy#spfy4862', u'https://www.github.com/superphy#spfy4863', u'https://www.github.com/superphy#spfy3668', u'https://www.github.com/superphy#spfy3663', u'https://www.github.com/superphy#spfy3662', u'https://www.github.com/superphy#spfy3661', u'https://www.github.com/superphy#spfy3660', u'https://www.github.com/superphy#spfy5245', u'https://www.github.com/superphy#spfy1449', u'https://www.github.com/superphy#spfy1448', u'https://www.github.com/superphy#spfy1445', u'https://www.github.com/superphy#spfy1444', u'https://www.github.com/superphy#spfy1447', u'https://www.github.com/superphy#spfy1446', u'https://www.github.com/superphy#spfy1441', u'https://www.github.com/superphy#spfy4088', u'https://www.github.com/superphy#spfy1442', u'https://www.github.com/superphy#spfy1725', u'https://www.github.com/superphy#spfy1727', u'https://www.github.com/superphy#spfy1720', u'https://www.github.com/superphy#spfy4653', u'https://www.github.com/superphy#spfy1728', u'https://www.github.com/superphy#spfy1684', u'https://www.github.com/superphy#spfy4718', u'https://www.github.com/superphy#spfy4719', u'https://www.github.com/superphy#spfy1968', u'https://www.github.com/superphy#spfy4717', u'https://www.github.com/superphy#spfy1967', u'https://www.github.com/superphy#spfy2949', u'https://www.github.com/superphy#spfy43', u'https://www.github.com/superphy#spfy2940', u'https://www.github.com/superphy#spfy4713', u'https://www.github.com/superphy#spfy1438', u'https://www.github.com/superphy#spfy1439', u'https://www.github.com/superphy#spfy1430', u'https://www.github.com/superphy#spfy1431', u'https://www.github.com/superphy#spfy1432', u'https://www.github.com/superphy#spfy1433', u'https://www.github.com/superphy#spfy1434', u'https://www.github.com/superphy#spfy1435', u'https://www.github.com/superphy#spfy1436', u'https://www.github.com/superphy#spfy1437', u'https://www.github.com/superphy#spfy4657', u'https://www.github.com/superphy#spfy4656', u'https://www.github.com/superphy#spfy4655', u'https://www.github.com/superphy#spfy4654', u'https://www.github.com/superphy#spfy258', u'https://www.github.com/superphy#spfy259', u'https://www.github.com/superphy#spfy4651', u'https://www.github.com/superphy#spfy4650', u'https://www.github.com/superphy#spfy4716', u'https://www.github.com/superphy#spfy255', u'https://www.github.com/superphy#spfy4714', u'https://www.github.com/superphy#spfy257', u'https://www.github.com/superphy#spfy4712', u'https://www.github.com/superphy#spfy2261', u'https://www.github.com/superphy#spfy4710', u'https://www.github.com/superphy#spfy4711', u'https://www.github.com/superphy#spfy5308', u'https://www.github.com/superphy#spfy5258', u'https://www.github.com/superphy#spfy5259', u'https://www.github.com/superphy#spfy1698', u'https://www.github.com/superphy#spfy4652', u'https://www.github.com/superphy#spfy4447', u'https://www.github.com/superphy#spfy4444', u'https://www.github.com/superphy#spfy4442', u'https://www.github.com/superphy#spfy1097', u'https://www.github.com/superphy#spfy4443', u'https://www.github.com/superphy#spfy4440', u'https://www.github.com/superphy#spfy4441', u'https://www.github.com/superphy#spfy476', u'https://www.github.com/superphy#spfy478', u'https://www.github.com/superphy#spfy3612', u'https://www.github.com/superphy#spfy2710', u'https://www.github.com/superphy#spfy3619', u'https://www.github.com/superphy#spfy4285', u'https://www.github.com/superphy#spfy1841', u'https://www.github.com/superphy#spfy1840', u'https://www.github.com/superphy#spfy1843', u'https://www.github.com/superphy#spfy1842', u'https://www.github.com/superphy#spfy1845', u'https://www.github.com/superphy#spfy1844', u'https://www.github.com/superphy#spfy1847', u'https://www.github.com/superphy#spfy600', u'https://www.github.com/superphy#spfy1849', u'https://www.github.com/superphy#spfy1848', u'https://www.github.com/superphy#spfy609', u'https://www.github.com/superphy#spfy608', u'https://www.github.com/superphy#spfy2900', u'https://www.github.com/superphy#spfy5234', u'https://www.github.com/superphy#spfy1576', u'https://www.github.com/superphy#spfy5231', u'https://www.github.com/superphy#spfy1779', u'https://www.github.com/superphy#spfy1778', u'https://www.github.com/superphy#spfy1470', u'https://www.github.com/superphy#spfy1471', u'https://www.github.com/superphy#spfy1773', u'https://www.github.com/superphy#spfy1772', u'https://www.github.com/superphy#spfy1771', u'https://www.github.com/superphy#spfy1770', u'https://www.github.com/superphy#spfy1776', u'https://www.github.com/superphy#spfy1775', u'https://www.github.com/superphy#spfy1774', u'https://www.github.com/superphy#spfy359', u'https://www.github.com/superphy#spfy358', u'https://www.github.com/superphy#spfy357', u'https://www.github.com/superphy#spfy170', u'https://www.github.com/superphy#spfy2872', u'https://www.github.com/superphy#spfy2972', u'https://www.github.com/superphy#spfy4690', u'https://www.github.com/superphy#spfy4020', u'https://www.github.com/superphy#spfy2884', u'https://www.github.com/superphy#spfy2886', u'https://www.github.com/superphy#spfy500', u'https://www.github.com/superphy#spfy4623', u'https://www.github.com/superphy#spfy502', u'https://www.github.com/superphy#spfy503', u'https://www.github.com/superphy#spfy3481', u'https://www.github.com/superphy#spfy2988', u'https://www.github.com/superphy#spfy2289', u'https://www.github.com/superphy#spfy2286', u'https://www.github.com/superphy#spfy2283', u'https://www.github.com/superphy#spfy1058', u'https://www.github.com/superphy#spfy137', u'https://www.github.com/superphy#spfy4715', u'https://www.github.com/superphy#spfy4236', u'https://www.github.com/superphy#spfy2053', u'https://www.github.com/superphy#spfy5265', u'https://www.github.com/superphy#spfy1891', u'https://www.github.com/superphy#spfy1896', u'https://www.github.com/superphy#spfy1895', u'https://www.github.com/superphy#spfy1898', u'https://www.github.com/superphy#spfy1899', u'https://www.github.com/superphy#spfy5266', u'https://www.github.com/superphy#spfy1808', u'https://www.github.com/superphy#spfy3022', u'https://www.github.com/superphy#spfy5261', u'https://www.github.com/superphy#spfy3656', u'https://www.github.com/superphy#spfy3657', u'https://www.github.com/superphy#spfy3015', u'https://www.github.com/superphy#spfy3658', u'https://www.github.com/superphy#spfy3659', u'https://www.github.com/superphy#spfy3016', u'https://www.github.com/superphy#spfy336', u'https://www.github.com/superphy#spfy3727', u'https://www.github.com/superphy#spfy3728', u'https://www.github.com/superphy#spfy3729', u'https://www.github.com/superphy#spfy1730', u'https://www.github.com/superphy#spfy1879', u'https://www.github.com/superphy#spfy5278', u'https://www.github.com/superphy#spfy5279', u'https://www.github.com/superphy#spfy1870', u'https://www.github.com/superphy#spfy610', u'https://www.github.com/superphy#spfy1872', u'https://www.github.com/superphy#spfy1873', u'https://www.github.com/superphy#spfy1874', u'https://www.github.com/superphy#spfy1875', u'https://www.github.com/superphy#spfy1877', u'https://www.github.com/superphy#spfy1429', u'https://www.github.com/superphy#spfy1428', u'https://www.github.com/superphy#spfy1423', u'https://www.github.com/superphy#spfy1427', u'https://www.github.com/superphy#spfy1426', u'https://www.github.com/superphy#spfy1425', u'https://www.github.com/superphy#spfy1424', u'https://www.github.com/superphy#spfy4705', u'https://www.github.com/superphy#spfy4704', u'https://www.github.com/superphy#spfy4707', u'https://www.github.com/superphy#spfy261', u'https://www.github.com/superphy#spfy618', u'https://www.github.com/superphy#spfy263', u'https://www.github.com/superphy#spfy262', u'https://www.github.com/superphy#spfy265', u'https://www.github.com/superphy#spfy264', u'https://www.github.com/superphy#spfy267', u'https://www.github.com/superphy#spfy266', u'https://www.github.com/superphy#spfy260', u'https://www.github.com/superphy#spfy4720', u'https://www.github.com/superphy#spfy540', u'https://www.github.com/superphy#spfy2842', u'https://www.github.com/superphy#spfy168', u'https://www.github.com/superphy#spfy169', u'https://www.github.com/superphy#spfy167', u'https://www.github.com/superphy#spfy4972', u'https://www.github.com/superphy#spfy2158', u'https://www.github.com/superphy#spfy4107', u'https://www.github.com/superphy#spfy1647', u'https://www.github.com/superphy#spfy1748', u'https://www.github.com/superphy#spfy1749', u'https://www.github.com/superphy#spfy88', u'https://www.github.com/superphy#spfy89', u'https://www.github.com/superphy#spfy85', u'https://www.github.com/superphy#spfy86', u'https://www.github.com/superphy#spfy87', u'https://www.github.com/superphy#spfy1834', u'https://www.github.com/superphy#spfy1835', u'https://www.github.com/superphy#spfy1830', u'https://www.github.com/superphy#spfy1831', u'https://www.github.com/superphy#spfy1832', u'https://www.github.com/superphy#spfy1838', u'https://www.github.com/superphy#spfy3606', u'https://www.github.com/superphy#spfy2640', u'https://www.github.com/superphy#spfy1467', u'https://www.github.com/superphy#spfy1466', u'https://www.github.com/superphy#spfy1465', u'https://www.github.com/superphy#spfy1464', u'https://www.github.com/superphy#spfy1463', u'https://www.github.com/superphy#spfy1462', u'https://www.github.com/superphy#spfy1461', u'https://www.github.com/superphy#spfy1460', u'https://www.github.com/superphy#spfy1746', u'https://www.github.com/superphy#spfy1469', u'https://www.github.com/superphy#spfy1468', u'https://www.github.com/superphy#spfy4583', u'https://www.github.com/superphy#spfy4581', u'https://www.github.com/superphy#spfy15', u'https://www.github.com/superphy#spfy18', u'https://www.github.com/superphy#spfy340', u'https://www.github.com/superphy#spfy2964', u'https://www.github.com/superphy#spfy1513', u'https://www.github.com/superphy#spfy1512', u'https://www.github.com/superphy#spfy2969', u'https://www.github.com/superphy#spfy1516', u'https://www.github.com/superphy#spfy4639', u'https://www.github.com/superphy#spfy3421', u'https://www.github.com/superphy#spfy3420', u'https://www.github.com/superphy#spfy3423', u'https://www.github.com/superphy#spfy3422', u'https://www.github.com/superphy#spfy3425', u'https://www.github.com/superphy#spfy3424', u'https://www.github.com/superphy#spfy3427', u'https://www.github.com/superphy#spfy3426', u'https://www.github.com/superphy#spfy3428', u'https://www.github.com/superphy#spfy120', u'https://www.github.com/superphy#spfy121', u'https://www.github.com/superphy#spfy401', u'https://www.github.com/superphy#spfy150', u'https://www.github.com/superphy#spfy427', u'https://www.github.com/superphy#spfy589', u'https://www.github.com/superphy#spfy1123', u'https://www.github.com/superphy#spfy2747', u'https://www.github.com/superphy#spfy1887', u'https://www.github.com/superphy#spfy1886', u'https://www.github.com/superphy#spfy5033', u'https://www.github.com/superphy#spfy4846', u'https://www.github.com/superphy#spfy4847', u'https://www.github.com/superphy#spfy4845', u'https://www.github.com/superphy#spfy4848', u'https://www.github.com/superphy#spfy4849', u'https://www.github.com/superphy#spfy3649', u'https://www.github.com/superphy#spfy1709', u'https://www.github.com/superphy#spfy971', u'https://www.github.com/superphy#spfy1703', u'https://www.github.com/superphy#spfy4998', u'https://www.github.com/superphy#spfy974', u'https://www.github.com/superphy#spfy1871', u'https://www.github.com/superphy#spfy5271', u'https://www.github.com/superphy#spfy1869', u'https://www.github.com/superphy#spfy1868', u'https://www.github.com/superphy#spfy1900', u'https://www.github.com/superphy#spfy1863', u'https://www.github.com/superphy#spfy1862', u'https://www.github.com/superphy#spfy1861', u'https://www.github.com/superphy#spfy1860', u'https://www.github.com/superphy#spfy1867', u'https://www.github.com/superphy#spfy5260', u'https://www.github.com/superphy#spfy1864', u'https://www.github.com/superphy#spfy4788', u'https://www.github.com/superphy#spfy4758', u'https://www.github.com/superphy#spfy2928', u'https://www.github.com/superphy#spfy4785', u'https://www.github.com/superphy#spfy2690', u'https://www.github.com/superphy#spfy4787', u'https://www.github.com/superphy#spfy2692', u'https://www.github.com/superphy#spfy607', u'https://www.github.com/superphy#spfy606', u'https://www.github.com/superphy#spfy605', u'https://www.github.com/superphy#spfy5264', u'https://www.github.com/superphy#spfy604', u'https://www.github.com/superphy#spfy603', u'https://www.github.com/superphy#spfy602', u'https://www.github.com/superphy#spfy601', u'https://www.github.com/superphy#spfy2853', u'https://www.github.com/superphy#spfy2851', u'https://www.github.com/superphy#spfy2856', u'https://www.github.com/superphy#spfy2855', u'https://www.github.com/superphy#spfy575', u'https://www.github.com/superphy#spfy4674', u'https://www.github.com/superphy#spfy278', u'https://www.github.com/superphy#spfy279', u'https://www.github.com/superphy#spfy119', u'https://www.github.com/superphy#spfy4600', u'https://www.github.com/superphy#spfy111', u'https://www.github.com/superphy#spfy110', u'https://www.github.com/superphy#spfy282', u'https://www.github.com/superphy#spfy281', u'https://www.github.com/superphy#spfy280'])
BEAUTIFY_VF_SEROTYPE = [
{
"analysis": "Serotype",
"contigid": "n/a",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": "n/a",
"hitname": "O16:H48",
"hitorientation": "n/a",
"hitstart": "n/a",
"hitstop": "n/a"
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "EC958",
"hitorientation": "+",
"hitstart": 2073473,
"hitstop": 2074658
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 306807,
"hitstop": 309332
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 309358,
"hitstop": 310075
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 310084,
"hitstop": 310700
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 310746,
"hitstop": 311336
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECS88",
"hitorientation": "-",
"hitstart": 3308040,
"hitstop": 3308924
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z1307",
"hitorientation": "-",
"hitstart": 1019013,
"hitstop": 1020053
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2203",
"hitorientation": "-",
"hitstart": 1588853,
"hitstop": 1590079
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2204",
"hitorientation": "-",
"hitstart": 1588309,
"hitstop": 1588839
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2205",
"hitorientation": "-",
"hitstart": 1587793,
"hitstop": 1588296
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2206",
"hitorientation": "-",
"hitstart": 1586820,
"hitstop": 1587734
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "agn43",
"hitorientation": "+",
"hitstart": 2071539,
"hitstop": 2074658
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "artj",
"hitorientation": "-",
"hitstart": 899844,
"hitstop": 900575
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "aslA",
"hitorientation": "-",
"hitstart": 3984579,
"hitstop": 3986007
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "b2972",
"hitorientation": "-",
"hitstart": 3113543,
"hitstop": 3114352
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cadA",
"hitorientation": "-",
"hitstart": 4356481,
"hitstop": 4358656
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cah",
"hitorientation": "+",
"hitstart": 2073486,
"hitstop": 2074658
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheA",
"hitorientation": "-",
"hitstart": 1973360,
"hitstop": 1975324
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheB",
"hitorientation": "-",
"hitstart": 1967452,
"hitstop": 1968501
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheR",
"hitorientation": "-",
"hitstart": 1968504,
"hitstop": 1969364
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheW",
"hitorientation": "-",
"hitstart": 1972836,
"hitstop": 1973339
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheZ",
"hitorientation": "-",
"hitstart": 1966393,
"hitstop": 1967037
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cs3",
"hitorientation": "-",
"hitstart": 2994460,
"hitstop": 2995092
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "csgD",
"hitorientation": "-",
"hitstart": 1102546,
"hitstop": 1103196
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "csgG",
"hitorientation": "-",
"hitstart": 1100851,
"hitstop": 1101684
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "eae",
"hitorientation": "+",
"hitstart": 314420,
"hitstop": 315232
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpA",
"hitorientation": "-",
"hitstart": 310084,
"hitstop": 310671
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpB",
"hitorientation": "-",
"hitstart": 309358,
"hitstop": 310026
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpC",
"hitorientation": "-",
"hitstart": 306807,
"hitstop": 309332
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpD",
"hitorientation": "-",
"hitstart": 305174,
"hitstop": 306817
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpE",
"hitorientation": "-",
"hitstart": 304497,
"hitstop": 305250
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpR",
"hitorientation": "-",
"hitstart": 310746,
"hitstop": 311336
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ehaB",
"hitorientation": "+",
"hitstart": 392973,
"hitstop": 394418
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entA",
"hitorientation": "+",
"hitstart": 628551,
"hitstop": 629297
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entB",
"hitorientation": "+",
"hitstart": 627694,
"hitstop": 628551
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entC",
"hitorientation": "+",
"hitstart": 624873,
"hitstop": 626060
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entD",
"hitorientation": "-",
"hitstart": 609459,
"hitstop": 610229
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entE",
"hitorientation": "+",
"hitstart": 626070,
"hitstop": 627680
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entF",
"hitorientation": "+",
"hitstart": 614157,
"hitstop": 617980
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entS",
"hitorientation": "+",
"hitstart": 622300,
"hitstop": 623550
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espL1",
"hitorientation": "+",
"hitstart": 1803439,
"hitstop": 1804993
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espL3",
"hitorientation": "-",
"hitstart": 3861987,
"hitstop": 3863864
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espL4",
"hitorientation": "-",
"hitstart": 4221348,
"hitstop": 4222487
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espR1",
"hitorientation": "-",
"hitstart": 1544385,
"hitstop": 1545447
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espX4",
"hitorientation": "+",
"hitstart": 4250703,
"hitstop": 4252283
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espX5",
"hitorientation": "-",
"hitstart": 4281783,
"hitstop": 4283075
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espY1",
"hitorientation": "+",
"hitstart": 58474,
"hitstop": 59103
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fdeC",
"hitorientation": "+",
"hitstart": 314357,
"hitstop": 315232
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepA",
"hitorientation": "-",
"hitstart": 610254,
"hitstop": 612494
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepB",
"hitorientation": "-",
"hitstart": 623554,
"hitstop": 624510
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepC",
"hitorientation": "-",
"hitstart": 619384,
"hitstop": 620199
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepD",
"hitorientation": "-",
"hitstart": 621185,
"hitstop": 622201
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepE",
"hitorientation": "+",
"hitstart": 618254,
"hitstop": 619387
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepG",
"hitorientation": "-",
"hitstart": 620196,
"hitstop": 621188
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fes",
"hitorientation": "+",
"hitstart": 612737,
"hitstop": 613939
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimA",
"hitorientation": "+",
"hitstart": 4543115,
"hitstop": 4543663
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimB",
"hitorientation": "+",
"hitstart": 4540957,
"hitstop": 4541559
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimC",
"hitorientation": "+",
"hitstart": 4544355,
"hitstop": 4545029
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimD",
"hitorientation": "-",
"hitstart": 1588853,
"hitstop": 1590079
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimD",
"hitorientation": "+",
"hitstart": 4545096,
"hitstop": 4547732
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimE",
"hitorientation": "+",
"hitstart": 4542037,
"hitstop": 4542633
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimF",
"hitorientation": "-",
"hitstart": 1588309,
"hitstop": 1588839
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimF",
"hitorientation": "+",
"hitstart": 4547742,
"hitstop": 4548272
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimG",
"hitorientation": "-",
"hitstart": 1587793,
"hitstop": 1588296
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimG",
"hitorientation": "+",
"hitstart": 4548285,
"hitstop": 4548788
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimH",
"hitorientation": "+",
"hitstart": 4548808,
"hitstop": 4549710
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimI",
"hitorientation": "+",
"hitstart": 4543620,
"hitstop": 4544267
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgA",
"hitorientation": "-",
"hitstart": 1130204,
"hitstop": 1130863
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgD",
"hitorientation": "+",
"hitstart": 1131854,
"hitstop": 1132549
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgE",
"hitorientation": "+",
"hitstart": 1132574,
"hitstop": 1133782
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgF",
"hitorientation": "+",
"hitstart": 1133802,
"hitstop": 1134557
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgG",
"hitorientation": "+",
"hitstart": 1134729,
"hitstop": 1135511
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgH",
"hitorientation": "+",
"hitstart": 1135564,
"hitstop": 1136262
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgI",
"hitorientation": "+",
"hitstart": 1136274,
"hitstop": 1137371
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgJ",
"hitorientation": "+",
"hitstart": 1137371,
"hitstop": 1138312
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgK",
"hitorientation": "+",
"hitstart": 1138378,
"hitstop": 1140021
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgL",
"hitorientation": "+",
"hitstart": 1140033,
"hitstop": 1140986
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flhA",
"hitorientation": "-",
"hitstart": 1962974,
"hitstop": 1965050
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flhB",
"hitorientation": "-",
"hitstart": 1965043,
"hitstop": 1966191
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flhC",
"hitorientation": "-",
"hitstart": 1977266,
"hitstop": 1977844
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliA",
"hitorientation": "-",
"hitstart": 2001070,
"hitstop": 2001789
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliD",
"hitorientation": "+",
"hitstart": 2003872,
"hitstop": 2005278
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliF",
"hitorientation": "+",
"hitstart": 2013229,
"hitstop": 2014887
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliG",
"hitorientation": "+",
"hitstart": 2014880,
"hitstop": 2015875
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliH",
"hitorientation": "+",
"hitstart": 2015868,
"hitstop": 2016554
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliI",
"hitorientation": "+",
"hitstart": 2016554,
"hitstop": 2017927
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliK",
"hitorientation": "+",
"hitstart": 2018386,
"hitstop": 2019513
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliM",
"hitorientation": "+",
"hitstart": 2020087,
"hitstop": 2021091
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliP",
"hitorientation": "+",
"hitstart": 2021869,
"hitstop": 2022606
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliR",
"hitorientation": "+",
"hitstart": 2022893,
"hitstop": 2023678
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliY",
"hitorientation": "-",
"hitstart": 1999585,
"hitstop": 2000385
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliZ",
"hitorientation": "-",
"hitstart": 2000473,
"hitstop": 2001060
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flk",
"hitorientation": "+",
"hitstart": 2437950,
"hitstop": 2438945
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gadX",
"hitorientation": "-",
"hitstart": 3664986,
"hitstop": 3665618
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gspC",
"hitorientation": "-",
"hitstart": 3112091,
"hitstop": 3113049
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gspL",
"hitorientation": "-",
"hitstart": 3111128,
"hitstop": 3112092
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gspo",
"hitorientation": "+",
"hitstart": 3465543,
"hitstop": 3466220
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "hcp",
"hitorientation": "-",
"hitstart": 115714,
"hitstop": 117099
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "hlye",
"hitorientation": "-",
"hitstart": 1229483,
"hitstop": 1230538
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "hofq",
"hitorientation": "-",
"hitstart": 3519465,
"hitstop": 3520703
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ibeB",
"hitorientation": "+",
"hitstart": 595600,
"hitstop": 596981
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ibeC",
"hitorientation": "-",
"hitstart": 4148532,
"hitstop": 4150309
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "motA",
"hitorientation": "-",
"hitstart": 1976252,
"hitstop": 1977139
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "motB",
"hitorientation": "-",
"hitstart": 1975329,
"hitstop": 1976255
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "nada",
"hitorientation": "+",
"hitstart": 782085,
"hitstop": 783128
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "nadb",
"hitorientation": "+",
"hitstart": 2710420,
"hitstop": 2712042
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ompA",
"hitorientation": "-",
"hitstart": 1019013,
"hitstop": 1020053
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ompt",
"hitorientation": "-",
"hitstart": 584680,
"hitstop": 585633
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ppdb",
"hitorientation": "-",
"hitstart": 2963153,
"hitstop": 2963716
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "tar/cheM",
"hitorientation": "-",
"hitstart": 1971030,
"hitstop": 1972691
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "upaC",
"hitorientation": "+",
"hitstart": 392973,
"hitstop": 394418
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbF",
"hitorientation": "+",
"hitstart": 1003920,
"hitstop": 1004657
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbQ",
"hitorientation": "+",
"hitstart": 997859,
"hitstop": 998407
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbR",
"hitorientation": "+",
"hitstart": 998490,
"hitstop": 999191
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbS",
"hitorientation": "+",
"hitstart": 999216,
"hitstop": 1001816
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbT",
"hitorientation": "+",
"hitstart": 1001807,
"hitstop": 1002784
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbV",
"hitorientation": "+",
"hitstart": 1003391,
"hitstop": 1003954
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycfz",
"hitorientation": "-",
"hitstart": 1180479,
"hitstop": 1181267
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ygeH",
"hitorientation": "+",
"hitstart": 2992094,
"hitstop": 2993470
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "yggr",
"hitorientation": "-",
"hitstart": 3094100,
"hitstop": 3095080
}
]
BEAUTIFY_SEROTYPE = [
{
"analysis": "Serotype",
"contigid": "n/a",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": "n/a",
"hitname": "O16:H48",
"hitorientation": "n/a",
"hitstart": "n/a",
"hitstop": "n/a"
}
]
BEAUTIFY_VF = [
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "EC958",
"hitorientation": "+",
"hitstart": 2073473,
"hitstop": 2074658
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 306807,
"hitstop": 309332
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 309358,
"hitstop": 310075
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 310084,
"hitstop": 310700
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECP",
"hitorientation": "-",
"hitstart": 310746,
"hitstop": 311336
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ECS88",
"hitorientation": "-",
"hitstart": 3308040,
"hitstop": 3308924
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z1307",
"hitorientation": "-",
"hitstart": 1019013,
"hitstop": 1020053
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2203",
"hitorientation": "-",
"hitstart": 1588853,
"hitstop": 1590079
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2204",
"hitorientation": "-",
"hitstart": 1588309,
"hitstop": 1588839
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2205",
"hitorientation": "-",
"hitstart": 1587793,
"hitstop": 1588296
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "Z2206",
"hitorientation": "-",
"hitstart": 1586820,
"hitstop": 1587734
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "agn43",
"hitorientation": "+",
"hitstart": 2071539,
"hitstop": 2074658
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "artj",
"hitorientation": "-",
"hitstart": 899844,
"hitstop": 900575
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "aslA",
"hitorientation": "-",
"hitstart": 3984579,
"hitstop": 3986007
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "b2972",
"hitorientation": "-",
"hitstart": 3113543,
"hitstop": 3114352
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cadA",
"hitorientation": "-",
"hitstart": 4356481,
"hitstop": 4358656
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cah",
"hitorientation": "+",
"hitstart": 2073486,
"hitstop": 2074658
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheA",
"hitorientation": "-",
"hitstart": 1973360,
"hitstop": 1975324
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheB",
"hitorientation": "-",
"hitstart": 1967452,
"hitstop": 1968501
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheR",
"hitorientation": "-",
"hitstart": 1968504,
"hitstop": 1969364
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheW",
"hitorientation": "-",
"hitstart": 1972836,
"hitstop": 1973339
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cheZ",
"hitorientation": "-",
"hitstart": 1966393,
"hitstop": 1967037
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "cs3",
"hitorientation": "-",
"hitstart": 2994460,
"hitstop": 2995092
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "csgD",
"hitorientation": "-",
"hitstart": 1102546,
"hitstop": 1103196
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "csgG",
"hitorientation": "-",
"hitstart": 1100851,
"hitstop": 1101684
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "eae",
"hitorientation": "+",
"hitstart": 314420,
"hitstop": 315232
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpA",
"hitorientation": "-",
"hitstart": 310084,
"hitstop": 310671
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpB",
"hitorientation": "-",
"hitstart": 309358,
"hitstop": 310026
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpC",
"hitorientation": "-",
"hitstart": 306807,
"hitstop": 309332
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpD",
"hitorientation": "-",
"hitstart": 305174,
"hitstop": 306817
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpE",
"hitorientation": "-",
"hitstart": 304497,
"hitstop": 305250
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ecpR",
"hitorientation": "-",
"hitstart": 310746,
"hitstop": 311336
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ehaB",
"hitorientation": "+",
"hitstart": 392973,
"hitstop": 394418
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entA",
"hitorientation": "+",
"hitstart": 628551,
"hitstop": 629297
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entB",
"hitorientation": "+",
"hitstart": 627694,
"hitstop": 628551
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entC",
"hitorientation": "+",
"hitstart": 624873,
"hitstop": 626060
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entD",
"hitorientation": "-",
"hitstart": 609459,
"hitstop": 610229
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entE",
"hitorientation": "+",
"hitstart": 626070,
"hitstop": 627680
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entF",
"hitorientation": "+",
"hitstart": 614157,
"hitstop": 617980
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "entS",
"hitorientation": "+",
"hitstart": 622300,
"hitstop": 623550
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espL1",
"hitorientation": "+",
"hitstart": 1803439,
"hitstop": 1804993
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espL3",
"hitorientation": "-",
"hitstart": 3861987,
"hitstop": 3863864
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espL4",
"hitorientation": "-",
"hitstart": 4221348,
"hitstop": 4222487
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espR1",
"hitorientation": "-",
"hitstart": 1544385,
"hitstop": 1545447
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espX4",
"hitorientation": "+",
"hitstart": 4250703,
"hitstop": 4252283
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espX5",
"hitorientation": "-",
"hitstart": 4281783,
"hitstop": 4283075
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "espY1",
"hitorientation": "+",
"hitstart": 58474,
"hitstop": 59103
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fdeC",
"hitorientation": "+",
"hitstart": 314357,
"hitstop": 315232
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepA",
"hitorientation": "-",
"hitstart": 610254,
"hitstop": 612494
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepB",
"hitorientation": "-",
"hitstart": 623554,
"hitstop": 624510
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepC",
"hitorientation": "-",
"hitstart": 619384,
"hitstop": 620199
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepD",
"hitorientation": "-",
"hitstart": 621185,
"hitstop": 622201
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepE",
"hitorientation": "+",
"hitstart": 618254,
"hitstop": 619387
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fepG",
"hitorientation": "-",
"hitstart": 620196,
"hitstop": 621188
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fes",
"hitorientation": "+",
"hitstart": 612737,
"hitstop": 613939
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimA",
"hitorientation": "+",
"hitstart": 4543115,
"hitstop": 4543663
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimB",
"hitorientation": "+",
"hitstart": 4540957,
"hitstop": 4541559
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimC",
"hitorientation": "+",
"hitstart": 4544355,
"hitstop": 4545029
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimD",
"hitorientation": "-",
"hitstart": 1588853,
"hitstop": 1590079
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimD",
"hitorientation": "+",
"hitstart": 4545096,
"hitstop": 4547732
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimE",
"hitorientation": "+",
"hitstart": 4542037,
"hitstop": 4542633
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimF",
"hitorientation": "-",
"hitstart": 1588309,
"hitstop": 1588839
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimF",
"hitorientation": "+",
"hitstart": 4547742,
"hitstop": 4548272
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimG",
"hitorientation": "-",
"hitstart": 1587793,
"hitstop": 1588296
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimG",
"hitorientation": "+",
"hitstart": 4548285,
"hitstop": 4548788
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimH",
"hitorientation": "+",
"hitstart": 4548808,
"hitstop": 4549710
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fimI",
"hitorientation": "+",
"hitstart": 4543620,
"hitstop": 4544267
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgA",
"hitorientation": "-",
"hitstart": 1130204,
"hitstop": 1130863
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgD",
"hitorientation": "+",
"hitstart": 1131854,
"hitstop": 1132549
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgE",
"hitorientation": "+",
"hitstart": 1132574,
"hitstop": 1133782
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgF",
"hitorientation": "+",
"hitstart": 1133802,
"hitstop": 1134557
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgG",
"hitorientation": "+",
"hitstart": 1134729,
"hitstop": 1135511
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgH",
"hitorientation": "+",
"hitstart": 1135564,
"hitstop": 1136262
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgI",
"hitorientation": "+",
"hitstart": 1136274,
"hitstop": 1137371
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgJ",
"hitorientation": "+",
"hitstart": 1137371,
"hitstop": 1138312
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgK",
"hitorientation": "+",
"hitstart": 1138378,
"hitstop": 1140021
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flgL",
"hitorientation": "+",
"hitstart": 1140033,
"hitstop": 1140986
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flhA",
"hitorientation": "-",
"hitstart": 1962974,
"hitstop": 1965050
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flhB",
"hitorientation": "-",
"hitstart": 1965043,
"hitstop": 1966191
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flhC",
"hitorientation": "-",
"hitstart": 1977266,
"hitstop": 1977844
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliA",
"hitorientation": "-",
"hitstart": 2001070,
"hitstop": 2001789
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliD",
"hitorientation": "+",
"hitstart": 2003872,
"hitstop": 2005278
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliF",
"hitorientation": "+",
"hitstart": 2013229,
"hitstop": 2014887
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliG",
"hitorientation": "+",
"hitstart": 2014880,
"hitstop": 2015875
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliH",
"hitorientation": "+",
"hitstart": 2015868,
"hitstop": 2016554
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliI",
"hitorientation": "+",
"hitstart": 2016554,
"hitstop": 2017927
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliK",
"hitorientation": "+",
"hitstart": 2018386,
"hitstop": 2019513
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliM",
"hitorientation": "+",
"hitstart": 2020087,
"hitstop": 2021091
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliP",
"hitorientation": "+",
"hitstart": 2021869,
"hitstop": 2022606
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliR",
"hitorientation": "+",
"hitstart": 2022893,
"hitstop": 2023678
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliY",
"hitorientation": "-",
"hitstart": 1999585,
"hitstop": 2000385
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "fliZ",
"hitorientation": "-",
"hitstart": 2000473,
"hitstop": 2001060
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "flk",
"hitorientation": "+",
"hitstart": 2437950,
"hitstop": 2438945
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gadX",
"hitorientation": "-",
"hitstart": 3664986,
"hitstop": 3665618
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gspC",
"hitorientation": "-",
"hitstart": 3112091,
"hitstop": 3113049
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gspL",
"hitorientation": "-",
"hitstart": 3111128,
"hitstop": 3112092
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "gspo",
"hitorientation": "+",
"hitstart": 3465543,
"hitstop": 3466220
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "hcp",
"hitorientation": "-",
"hitstart": 115714,
"hitstop": 117099
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "hlye",
"hitorientation": "-",
"hitstart": 1229483,
"hitstop": 1230538
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "hofq",
"hitorientation": "-",
"hitstart": 3519465,
"hitstop": 3520703
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ibeB",
"hitorientation": "+",
"hitstart": 595600,
"hitstop": 596981
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ibeC",
"hitorientation": "-",
"hitstart": 4148532,
"hitstop": 4150309
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "motA",
"hitorientation": "-",
"hitstart": 1976252,
"hitstop": 1977139
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "motB",
"hitorientation": "-",
"hitstart": 1975329,
"hitstop": 1976255
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "nada",
"hitorientation": "+",
"hitstart": 782085,
"hitstop": 783128
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "nadb",
"hitorientation": "+",
"hitstart": 2710420,
"hitstop": 2712042
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ompA",
"hitorientation": "-",
"hitstart": 1019013,
"hitstop": 1020053
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ompt",
"hitorientation": "-",
"hitstart": 584680,
"hitstop": 585633
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ppdb",
"hitorientation": "-",
"hitstart": 2963153,
"hitstop": 2963716
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "tar/cheM",
"hitorientation": "-",
"hitstart": 1971030,
"hitstop": 1972691
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "upaC",
"hitorientation": "+",
"hitstart": 392973,
"hitstop": 394418
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbF",
"hitorientation": "+",
"hitstart": 1003920,
"hitstop": 1004657
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbQ",
"hitorientation": "+",
"hitstart": 997859,
"hitstop": 998407
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbR",
"hitorientation": "+",
"hitstart": 998490,
"hitstop": 999191
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbS",
"hitorientation": "+",
"hitstart": 999216,
"hitstop": 1001816
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbT",
"hitorientation": "+",
"hitstart": 1001807,
"hitstop": 1002784
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycbV",
"hitorientation": "+",
"hitstart": 1003391,
"hitstop": 1003954
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ycfz",
"hitorientation": "-",
"hitstart": 1180479,
"hitstop": 1181267
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "ygeH",
"hitorientation": "+",
"hitstart": 2992094,
"hitstop": 2993470
},
{
"analysis": "Virulence Factors",
"contigid": "U00096.3",
"filename": "GCA_000005845.2_ASM584v2_genomic.fna",
"hitcutoff": 90,
"hitname": "yggr",
"hitorientation": "-",
"hitstart": 3094100,
"hitstop": 3095080
}
]
BEAUTIFY_AMR = [
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Escherichia coli gyrA conferring resistance to fluoroquinolones",
"hitorientation": "+",
"hitstart": 159252,
"hitstop": 161879
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "PmrE",
"hitorientation": "+",
"hitstart": 388190,
"hitstop": 389356
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "PmrF",
"hitorientation": "-",
"hitstart": 134984,
"hitstop": 135952
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "arnA",
"hitorientation": "-",
"hitstart": 133002,
"hitstop": 134984
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "baeR",
"hitorientation": "-",
"hitstart": 323408,
"hitstop": 324130
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "baeS",
"hitorientation": "-",
"hitstart": 324127,
"hitstop": 325530
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "mdtB",
"hitorientation": "-",
"hitstart": 330021,
"hitstop": 333143
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtD",
"hitorientation": "-",
"hitstart": 325527,
"hitstop": 326942
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000001.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mexN",
"hitorientation": "-",
"hitstart": 326943,
"hitstop": 330020
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000003.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "PmrA",
"hitorientation": "+",
"hitstart": 28893,
"hitstop": 29561
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000003.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "PmrB",
"hitorientation": "+",
"hitstart": 29562,
"hitstop": 30662
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000003.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "PmrC",
"hitorientation": "+",
"hitstart": 27253,
"hitstop": 28896
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000003.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtN",
"hitorientation": "+",
"hitstart": 58230,
"hitstop": 59261
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000003.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtO",
"hitorientation": "+",
"hitstart": 59261,
"hitstop": 61312
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000003.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtP",
"hitorientation": "+",
"hitstart": 61309,
"hitstop": 62775
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000004.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtK",
"hitorientation": "+",
"hitstart": 126030,
"hitstop": 127403
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000005.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "ACT-7",
"hitorientation": "-",
"hitstart": 4604,
"hitstop": 5737
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000005.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtM",
"hitorientation": "-",
"hitstart": 187550,
"hitstop": 188782
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000005.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "robA",
"hitorientation": "-",
"hitstart": 251658,
"hitstop": 252527
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000006.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "CRP",
"hitorientation": "-",
"hitstart": 176803,
"hitstop": 177435
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000006.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "gadX",
"hitorientation": "+",
"hitstart": 397,
"hitstop": 1221
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000006.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtE",
"hitorientation": "-",
"hitstart": 5818,
"hitstop": 6975
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000006.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mexD",
"hitorientation": "-",
"hitstart": 2680,
"hitstop": 5793
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000007.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "H-NS",
"hitorientation": "-",
"hitstart": 187722,
"hitstop": 188135
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000007.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "mdtG",
"hitorientation": "-",
"hitstart": 25571,
"hitstop": 26797
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000007.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdtH",
"hitorientation": "-",
"hitstart": 35428,
"hitstop": 36636
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000007.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "phoP",
"hitorientation": "-",
"hitstart": 101156,
"hitstop": 101827
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000007.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "phoQ",
"hitorientation": "-",
"hitstart": 99696,
"hitstop": 101156
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000008.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "emrK",
"hitorientation": "-",
"hitstart": 9140,
"hitstop": 10303
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000008.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "emrY",
"hitorientation": "-",
"hitstart": 7602,
"hitstop": 9140
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000008.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "evgA",
"hitorientation": "+",
"hitstart": 10719,
"hitstop": 11333
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000008.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "evgS",
"hitorientation": "+",
"hitstart": 11338,
"hitstop": 14931
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000008.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mexD",
"hitorientation": "+",
"hitstart": 104776,
"hitstop": 107889
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000009.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "cpxA",
"hitorientation": "+",
"hitstart": 22429,
"hitstop": 23802
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000009.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "cpxR",
"hitorientation": "+",
"hitstart": 21734,
"hitstop": 22432
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000011.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Escherichia coli marR mutant resulting in antibiotic resistance",
"hitorientation": "+",
"hitstart": 51100,
"hitstop": 51534
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000011.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "marA",
"hitorientation": "+",
"hitstart": 51554,
"hitstop": 51937
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000012.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "emrA",
"hitorientation": "-",
"hitstart": 312493,
"hitstop": 313665
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000012.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "emrB",
"hitorientation": "-",
"hitstart": 310938,
"hitstop": 312476
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000012.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "emrR",
"hitorientation": "-",
"hitstart": 313792,
"hitstop": 314322
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000013.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Staphylococcus aureus gyrB conferring resistance to aminocoumarin",
"hitorientation": "-",
"hitstart": 131568,
"hitstop": 133982
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000013.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "emrD",
"hitorientation": "+",
"hitstart": 107782,
"hitstop": 108966
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000013.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "mdtL",
"hitorientation": "+",
"hitstart": 145479,
"hitstop": 146654
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000015.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Escherichia coli parC conferring resistance to fluoroquinolone",
"hitorientation": "-",
"hitstart": 68709,
"hitstop": 70967
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000015.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "bacA",
"hitorientation": "-",
"hitstart": 104717,
"hitstop": 105538
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000015.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "tolC",
"hitorientation": "+",
"hitstart": 80879,
"hitstop": 82360
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000016.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "ACT-7",
"hitorientation": "+",
"hitstart": 286,
"hitstop": 1431
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000022.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Mycobacterium tuberculosis rpoB mutants conferring resistance to rifampicin",
"hitorientation": "-",
"hitstart": 22720,
"hitstop": 26748
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000023.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "macA",
"hitorientation": "-",
"hitstart": 5642,
"hitstop": 6757
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000023.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "macB",
"hitorientation": "-",
"hitstart": 3699,
"hitstop": 5645
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000023.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mdfA",
"hitorientation": "-",
"hitstart": 39796,
"hitstop": 41028
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000024.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "CTX-M-55",
"hitorientation": "-",
"hitstart": 37702,
"hitstop": 38577
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000026.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Mycobacterium tuberculosis katG mutations conferring resistance to isoniazid",
"hitorientation": "+",
"hitstart": 8536,
"hitstop": 10716
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000027.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "APH(3'')",
"hitorientation": "-",
"hitstart": 10215,
"hitstop": 11018
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000027.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "APH(6)",
"hitorientation": "-",
"hitstart": 9379,
"hitstop": 10215
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000027.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "floR",
"hitorientation": "+",
"hitstart": 5030,
"hitstop": 6244
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000027.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "sul2",
"hitorientation": "-",
"hitstart": 11079,
"hitstop": 11894
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000027.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "tetG",
"hitorientation": "-",
"hitstart": 6844,
"hitstop": 8043
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000028.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "emrE",
"hitorientation": "+",
"hitstart": 30648,
"hitstop": 30980
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000032.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "acrE",
"hitorientation": "+",
"hitstart": 32702,
"hitstop": 33859
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000032.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "acrS",
"hitorientation": "-",
"hitstart": 31641,
"hitstop": 32303
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000032.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mexD",
"hitorientation": "+",
"hitstart": 33871,
"hitstop": 36975
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000036.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "Klebsiella pneumoniae acrR mutant resulting in high level antibiotic resistance",
"hitorientation": "+",
"hitstart": 107902,
"hitstop": 108495
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000036.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "acrE",
"hitorientation": "-",
"hitstart": 106513,
"hitstop": 107706
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000036.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "mexD",
"hitorientation": "-",
"hitstart": 103341,
"hitstop": 106490
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000036.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "rosA",
"hitorientation": "-",
"hitstart": 125513,
"hitstop": 126733
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000036.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "rosB",
"hitorientation": "-",
"hitstart": 123599,
"hitstop": 125275
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000036.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "vanG",
"hitorientation": "-",
"hitstart": 19876,
"hitstop": 20970
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000037.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "MCR-1",
"hitorientation": "+",
"hitstart": 13553,
"hitstop": 15178
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000050.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "FosA3",
"hitorientation": "+",
"hitstart": 4459,
"hitstop": 4875
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000050.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "mphA",
"hitorientation": "+",
"hitstart": 89,
"hitstop": 994
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000053.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "ErmB",
"hitorientation": "-",
"hitstart": 1455,
"hitstop": 2192
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000062.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "sul1",
"hitorientation": "+",
"hitstart": 452,
"hitstop": 1291
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000064.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "TEM-1",
"hitorientation": "+",
"hitstart": 3455,
"hitstop": 4315
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000080.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "NDM-1",
"hitorientation": "+",
"hitstart": 724,
"hitstop": 1536
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000090.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "aadA11",
"hitorientation": "+",
"hitstart": 690,
"hitstop": 1535
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000090.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Strict",
"hitname": "dfrA25",
"hitorientation": "+",
"hitstart": 36,
"hitstop": 509
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000098.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "catI",
"hitorientation": "-",
"hitstart": 166,
"hitstop": 825
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000101.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "arr-3",
"hitorientation": "+",
"hitstart": 37,
"hitstop": 489
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000104.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "rmtB",
"hitorientation": "+",
"hitstart": 43,
"hitstop": 798
},
{
"analysis": "Antimicrobial Resistance",
"contigid": "MOHB01000106.1",
"filename": "GCA_001891995.1_ASM189199v1_genomic.fna",
"hitcutoff": "Perfect",
"hitname": "aadA5",
"hitorientation": "-",
"hitstart": 115,
"hitstop": 903
}
]
BEAUTIFY_STX1 = [
{
"contig": "lcl|ECI-2644|NODE_8_length_178521_cov_25.218_ID_15",
"genome": "ECI-2644_lcl.fasta",
"probability": 0.9561446,
"start": 174535,
"stop": 175491,
"subtype": "a",
"subtype_gene": "stx1A"
},
{
"contig": "lcl|ECI-2644|NODE_8_length_178521_cov_25.218_ID_15",
"genome": "ECI-2644_lcl.fasta",
"probability": 0.9561446,
"start": 175501,
"stop": 175770,
"subtype": "a",
"subtype_gene": "stx1B"
},
{
"contig": "lcl|ECI-2644|NODE_8_length_178521_cov_25.218_ID_15",
"genome": "ECI-2644_lcl.fasta",
"probability": 0.9561446,
"start": 174544,
"stop": 175491,
"subtype": "a",
"subtype_gene": "stx1A"
},
{
"contig": "lcl|ECI-2644|NODE_8_length_178521_cov_25.218_ID_15",
"genome": "ECI-2644_lcl.fasta",
"probability": 0.9561446,
"start": 175501,
"stop": 175770,
"subtype": "a",
"subtype_gene": "stx1B"
}
]
BEAUTIFY_STX2 = [
{
"contig": "lcl|ECI-2644|NODE_51_length_5713_cov_24.063_ID_101",
"genome": "ECI-2644_lcl.fasta",
"probability": 0.9460619,
"start": 4390,
"stop": 5349,
"subtype": "a",
"subtype_gene": "stx2A"
},
{
"contig": "lcl|ECI-2644|NODE_51_length_5713_cov_24.063_ID_101",
"genome": "ECI-2644_lcl.fasta",
"probability": 0.9460619,
"start": 4109,
"stop": 4378,
"subtype": "a",
"subtype_gene": "stx2B"
}
]
BEAUTIFY_EAE = [
{
"contig": "N/A",
"genome": "GCA_000005845.2_ASM584v2_genomic.fna",
"probability": "N/A",
"start": "N/A",
"stop": "N/A",
"subtype": "Subtype loci not found in genome",
"subtype_gene": "N/A"
}
]
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from Layer import Layer
class ImageDataSource(Layer):
srcDimList = [(0),(0,1),(0,1,2)]
def __init__(self,para):
Layer.__init__(self,para)
self.lastMiniBatch = True
def createAndInitializeStruct(self):
if self.top.isPadded():
(pHOut,pWOut) = self.top.getPadShape()
(hIn,wIn,cIn) = self.imageDim
# out_shape is padded
outShape = (Layer.batchSize,hIn+2*pHOut,wIn+2*pWOut,cIn)
self.pOut = np.zeros(outShape,dtype=float) # padded output
# out is actual output. slice of pOut
self.out = self.pOut[:,pHOut:-pHOut,pWOut:-pWOut,:]
else:
self.out = np.zeros((Layer.batchSize,)+self.imageDim,dtype=float)
# out is actual output. slice of pOut
self.pOut = self.out
self.label = np.zeros((Layer.batchSize,)+self.labelDim,dtype=float)
self.zeros = np.zeros(self.imageDim)
self.zerosL = np.zeros(self.labelDim)
def setTrainData(self,image,label):
self.trainImage = image
self.trainLabel = label
self.trainNum = self.trainImage.shape[0]
self.imageDim = self.trainImage.shape[1:]
self.labelDim = self.trainLabel.shape[1:]
self.sumAxis = ImageDataSource.srcDimList[len(self.trainImage.shape)-2]
self.trainImageMean = np.mean(self.trainImage,axis=self.sumAxis)
self.trainImageStd = np.std(self.trainImage,axis=self.sumAxis)
self.trainImageNorm = (self.trainImage - self.trainImageMean)/self.trainImageStd
def setTestData(self,image,label):
self.testImage = image
self.testLabel = label
self.testNum = self.testImage.shape[0]
self.testImageNorm = (self.testImage - self.trainImageMean)/self.trainImageStd
def useTrainData(self):
self.currentImageNum = self.trainNum
self.currentImage = self.trainImageNorm
self.currentLabel = self.trainLabel
self.lastMiniBatch = True
def useTestData(self):
self.currentImageNum = self.testNum
self.currentImage = self.testImageNorm
self.currentLabel = self.testLabel
self.lastMiniBatch = True
def getY(self):
return self.label
def forward(self):
Layer.currentBatchSize = Layer.batchSize
if self.lastMiniBatch:
self.lastMiniBatch = False
if not Layer.inferenceMode:
self.perm = np.random.permutation(self.currentImageNum)
else:
self.perm = np.arange(self.currentImageNum)
self.currentStart = 0
self.sampleCount = 0
if (self.currentStart + Layer.batchSize) <= self.currentImageNum:
j = 0
for i in self.perm[self.currentStart:self.currentStart+Layer.batchSize]:
self.out[j] = self.currentImage[i]
self.label[j] = self.currentLabel[i]
j = j+1
self.currentStart = self.currentStart + Layer.batchSize
if self.currentStart == self.currentImageNum:
self.lastMiniBatch = True
else:
j = 0
for i in self.perm[self.currentStart:]:
self.out[j] = self.currentImage[i]
self.label[j] = self.currentLabel[i]
j = j+1
Layer.currentBatchSize = j
for k in range(j,Layer.batchSize):
self.out[k] = self.zeros
self.label[k] = self.zerosL
self.lastMiniBatch = True
self.sampleCount = self.sampleCount + Layer.currentBatchSize
def currentDataSetSize(self):
return self.currentImageNum
def endOfEpoch(self):
return self.lastMiniBatch
|
default_app_config = "oscarbluelight.dashboard.offers.apps.OffersDashboardConfig"
|
from cli.application import CliApplication
if __name__ == "__main__" or __name__ == "src.cli.__main__":
cli = CliApplication()
cli.run()
|
from typing import Any, Union
import numpy as np
import biorbd
from bioptim import Solution
def compute_error_single_shooting(
time: Union[np.ndarray, list],
n_shooting: int,
model: biorbd.Model,
q: np.ndarray,
q_integrated: np.ndarray,
duration: float = None,
):
"""
Compute the error between the solution of the OCP and the solution of the integrated OCP
Parameters
----------
time : np.ndarray
Time vector
n_shooting : int
Number of shooting points
model : biorbd.Model
Model
q : np.ndarray
ocp generalized coordinates
q_integrated : np.ndarray
integrated generalized coordinates
duration: float
The duration to report the error in states btween the two solutions
Returns
-------
The error between the two solutions
:tuple
"""
duration = time[-1] if duration is None else duration
if time[-1] < duration:
raise ValueError(
f"Single shooting integration duration must be smaller than ocp duration :{time[-1]} s"
)
# get the index of translation and rotation dof
trans_idx = []
rot_idx = []
for i in range(model.nbQ()):
if model.nameDof()[i].to_string()[-4:-1] == "Rot":
rot_idx += [i]
else:
trans_idx += [i]
rot_idx = np.array(rot_idx)
trans_idx = np.array(trans_idx)
sn_1s = int(n_shooting / time[-1] * duration) # shooting node at {duration} second
single_shoot_error_r = (
rmse(q[rot_idx, sn_1s], q_integrated[rot_idx, sn_1s]) * 180 / np.pi if len(rot_idx) > 0 else np.nan
)
single_shoot_error_t = (
(rmse(q[trans_idx, sn_1s], q_integrated[trans_idx, sn_1s]) / 1000) if len(trans_idx) > 0 else np.nan
)
return (
single_shoot_error_t,
single_shoot_error_r,
)
def stack_states(states: list[dict], key: str = "q"):
"""
Stack the controls in one vector
Parameters
----------
states : list[dict]
List of dictionaries containing the states
key : str
Key of the states to stack such as "q" or "qdot"
"""
the_tuple = (s[key][:, :-1] if i < len(states) else s[key][:, :] for i, s in enumerate(states))
return np.hstack(the_tuple)
def stack_controls(controls: list[dict], key: str = "tau"):
"""
Stack the controls in one vector
Parameters
----------
controls : list[dict]
List of dictionaries containing the controls
key : str
Key of the controls to stack such as "tau" or "qddot"
"""
the_tuple = (c[key][:, :-1] if i < len(controls) else c[key][:, :] for i, c in enumerate(controls))
return np.hstack(the_tuple)
def define_time(time: list, n_shooting: tuple):
"""
Create the time vector
Parameters
----------
time : list
List of duration of each phase of the simulation
n_shooting : tuple
Number of shooting points for each phase
"""
the_tuple = (
np.linspace(0, float(time[i]) - 1 / n_shooting[i] * float(time[i]), n_shooting[i])
if i < len(time)
else np.linspace(float(time[i]), float(time[i]) + float(time[i + 1]), n_shooting[i] + 1)
for i, t in enumerate(time)
)
return np.hstack(the_tuple)
def rmse(predictions, targets) -> float:
"""
Compute the Root Mean Square Error
Parameters
----------
predictions : numpy.array
Predictions
targets : numpy.array
Targets
Returns
-------
rmse : float
Root Mean Square Error
"""
return np.sqrt(((predictions - targets) ** 2).mean())
|
'''
Created on Dec 4, 2018
@author: gsnyder
Get a list of user objects
'''
import json
from blackduck.HubRestApi import HubInstance
hub = HubInstance()
user_groups = hub.get_user_groups()
if 'totalCount' in user_groups and user_groups['totalCount'] > 0:
print(json.dumps(user_groups))
else:
print("No user_groups found") |
# $Id$
#
# Copyright (C) 2008 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit.sping import pid
import math, re
from rdkit.sping.PIL.pidPIL import PILCanvas
from rdkit.Chem.Draw.canvasbase import CanvasBase
faceMap = {'sans': 'helvetica', 'serif': 'times'}
def convertColor(color):
color = pid.Color(color[0], color[1], color[2])
return color
class Canvas(CanvasBase):
def __init__(self, size, name, imageType='png'):
if imageType == "pdf":
from rdkit.sping.PDF.pidPDF import PDFCanvas as _Canvas
elif imageType == "ps":
from rdkit.sping.PS.pidPS import PSCanvas as _Canvas #@UnresolvedImport
elif imageType == "svg":
from rdkit.sping.SVG.pidSVG import SVGCanvas as _Canvas
elif imageType == "png":
from rdkit.sping.PIL.pidPIL import PILCanvas as _Canvas
else:
raise ValueError('unrecognized format: %s' % imageType)
self.canvas = _Canvas(size=size, name=name)
if hasattr(self.canvas, '_image'):
self._image = self.canvas._image
else:
self._image = None
self.size = size
def addCanvasLine(self, p1, p2, color=(0, 0, 0), color2=None, **kwargs):
if color2 and color2 != color:
mp = (p1[0] + p2[0]) / 2., (p1[1] + p2[1]) / 2.
color = convertColor(color)
self.canvas.drawLine(p1[0], p1[1], mp[0], mp[1], color=color,
width=int(kwargs.get('linewidth', 1)), dash=kwargs.get('dash', None))
color2 = convertColor(color2)
self.canvas.drawLine(mp[0], mp[1], p2[0], p2[1], color=color2,
width=int(kwargs.get('linewidth', 1)), dash=kwargs.get('dash', None))
else:
color = convertColor(color)
width = kwargs.get('linewidth', 1)
self.canvas.drawLine(p1[0], p1[1], p2[0], p2[1], color=color, width=int(width),
dash=kwargs.get('dash', None))
def addCanvasText(self, text, pos, font, color=(0, 0, 0), **kwargs):
text = re.sub(r'\<.+?\>', '', text)
font = pid.Font(face=faceMap[font.face], size=font.size)
txtWidth, txtHeight = self.canvas.stringBox(text, font)
bw, bh = txtWidth + txtHeight * 0.4, txtHeight * 1.4
offset = txtWidth * pos[2]
labelP = pos[0] - txtWidth / 2 + offset, pos[1] + txtHeight / 2
color = convertColor(color)
self.canvas.drawString(text, labelP[0], labelP[1], font, color=color)
return (bw, bh, offset)
def addCanvasPolygon(self, ps, color=(0, 0, 0), fill=True, stroke=False, **kwargs):
if not fill and not stroke:
return
edgeWidth = kwargs.get('lineWidth', 0)
edgeColor = pid.transparent
color = convertColor(color)
if not stroke:
edgeWidth = 0
edgeColor = pid.transparent
else:
edgeWidth = kwargs.get('lineWidth', 1)
edgeColor = color
if not fill:
fillColor = pid.transparent
else:
fillColor = color
self.canvas.drawPolygon(ps, edgeColor=edgeColor, edgeWidth=int(edgeWidth), fillColor=fillColor,
closed=1)
def addCanvasDashedWedge(self, p1, p2, p3, dash=(2, 2), color=(0, 0, 0), color2=None, **kwargs):
color = convertColor(color)
dash = (4, 4)
pts1 = self._getLinePoints(p1, p2, dash)
pts2 = self._getLinePoints(p1, p3, dash)
if len(pts2) < len(pts1):
pts2, pts1 = pts1, pts2
for i in range(len(pts1)):
self.canvas.drawLine(pts1[i][0], pts1[i][1], pts2[i][0], pts2[i][1], color=color, width=1)
def flush(self):
self.canvas.flush()
def save(self):
self.canvas.save()
|
import json
import pprint
import sys
from .backuptools import BackupResource, BackupTools
from .googledriveclient import GoogleDriveClient
if __name__ == '__main__':
config_file = sys.argv[1]
args = sys.argv[2:]
config: dict = None
with open(config_file) as f:
config = json.load(f)
tools = BackupTools(config)
result = tools.exec(*args)
pprint.pprint(result)
|
from django.contrib import messages
from django.contrib.auth import login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from core.email_backend import EmailBackend
# Create your views here.
def go_index(request):
return render(request, 'index.html')
def go_index2(request):
return render(request, 'index2.html')
def go_index3(request):
return render(request, 'index3.html')
# LOGIN
def go_login_page(request):
return render(request, 'login/login.html')
def go_login_forgot(request):
return render(request, 'login/forgot-password.html')
def go_register(request):
return render(request, 'login/register.html')
def go_recovery(request):
return render(request, 'login/recovery-password.html')
def do_login(request):
if request.method != 'POST':
return HttpResponse('<h2>Método não Permitido</h2>')
else:
user = EmailBackend.authenticate(
request,
username=request.POST.get('email'),
password=request.POST.get('password')
)
if user is not None:
login(request, user)
if user.user_type == '1':
return HttpResponseRedirect(reverse('admin_home'))
elif user.user_type == '2':
return HttpResponseRedirect(reverse('staff_home'))
elif user.user_type == '3':
return HttpResponseRedirect(reverse('student_home'))
else:
return HttpResponse('<h2>Deu ruim no login</h2>')
else:
messages.error(request, 'Invalid Login Details')
return HttpResponseRedirect('/')
def get_user_detail(request):
if request.user is not None:
return HttpResponse(f'<h2>Email: {request.user.email} | UserType: {request.user.user_type}</h2>')
else:
return HttpResponse('<h2>Por favor, faça login!</h2>')
def do_logout(request):
logout(request)
return HttpResponseRedirect('/')
|
import fnmatch
import importlib
import os
from setuptools import setup
import jenkins_monitor.version
# Let's add this later
# long_description = open('README.txt').read()
def discover_packages(base):
"""
Discovers all sub-packages for a base package
Note: does not work with namespaced packages (via pkg_resources
or similar)
"""
mod = importlib.import_module(base)
mod_fname = mod.__file__
mod_dirname = os.path.normpath(os.path.dirname(mod_fname))
for root, _dirnames, filenames in os.walk(mod_dirname):
for _ in fnmatch.filter(filenames, '__init__.py'):
yield '.'.join(os.path.relpath(root).split(os.sep))
def load_requirements(fname):
with open(fname, 'r') as reqfile:
reqs = reqfile.read()
return list(filter(None, reqs.strip().splitlines()))
REQUIREMENTS = dict()
REQUIREMENTS['install'] = load_requirements('requirements.txt')
setup_args = dict(
name='monitor_jenkins',
version=jenkins_monitor.version.__version__,
description='Monitor basic Jenkins functions',
# long_description = long_description,
author='Couchbase Build and Release Team',
author_email='[email protected]',
license='Apache License, Version 2.0',
packages=list(discover_packages('jenkins_monitor')),
include_package_data=True,
install_requires=REQUIREMENTS['install'],
entry_points={
'console_scripts': [
'monitor_jenkins = jenkins_monitor.scripts.monitor_jenkins_prog:main',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
]
)
if __name__ == '__main__':
setup(**setup_args)
|
# Check that -vv makes the line number of the failing RUN command clear.
# (-v is actually sufficient in the case of the internal shell.)
#
# RUN: env -u FILECHECK_OPTS not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# CHECK: Testing: 4 tests
# In the case of the external shell, we check for only RUN lines in stderr in
# case some shell implementations format "set -x" output differently.
# CHECK-LABEL: FAIL: shtest-run-at-line :: external-shell/basic.txt
# CHECK: Script:
# CHECK: RUN: at line 4{{.*}} true
# CHECK-NEXT: RUN: at line 5{{.*}} false
# CHECK-NEXT: RUN: at line 6{{.*}} true
# CHECK: RUN: at line 4
# CHECK: RUN: at line 5
# CHECK-NOT: RUN
# CHECK-LABEL: FAIL: shtest-run-at-line :: external-shell/line-continuation.txt
# CHECK: Script:
# CHECK: RUN: at line 4{{.*}} echo 'foo bar' | FileCheck
# CHECK-NEXT: RUN: at line 6{{.*}} echo 'foo baz' | FileCheck
# CHECK-NEXT: RUN: at line 9{{.*}} echo 'foo bar' | FileCheck
# CHECK: RUN: at line 4
# CHECK: RUN: at line 6
# CHECK-NOT: RUN
# CHECK-LABEL: FAIL: shtest-run-at-line :: internal-shell/basic.txt
# CHECK: Script:
# CHECK: : 'RUN: at line 1'; true
# CHECK-NEXT: : 'RUN: at line 2'; false
# CHECK-NEXT: : 'RUN: at line 3'; true
# CHECK: Command Output (stdout)
# CHECK: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ "true"
# CHECK-NEXT: $ ":" "RUN: at line 2"
# CHECK-NEXT: $ "false"
# CHECK-NOT: RUN
# CHECK-LABEL: FAIL: shtest-run-at-line :: internal-shell/line-continuation.txt
# CHECK: Script:
# CHECK: : 'RUN: at line 1'; : first line continued to second line
# CHECK-NEXT: : 'RUN: at line 3'; echo 'foo bar' | FileCheck
# CHECK-NEXT: : 'RUN: at line 5'; echo 'foo baz' | FileCheck
# CHECK-NEXT: : 'RUN: at line 8'; echo 'foo bar' | FileCheck
# CHECK: Command Output (stdout)
# CHECK: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ ":" "first" "line" "continued" "to" "second" "line"
# CHECK-NEXT: $ ":" "RUN: at line 3"
# CHECK-NEXT: $ "echo" "foo bar"
# CHECK-NEXT: $ "FileCheck" "{{.*}}"
# CHECK-NEXT: $ ":" "RUN: at line 5"
# CHECK-NEXT: $ "echo" "foo baz"
# CHECK-NEXT: $ "FileCheck" "{{.*}}"
# CHECK-NOT: RUN
|
# -*- coding: utf-8 -*-
# Peter Zatka-Haas - April 2009
import os
import pickle
import random
quotes_file = "commands/quotes.db"
class Quotes:
"""
Users assign quotes, which momo stores.
Momo can output the quote in channel later.
Momo will respond:
!setquote ..
Momo registers the quote .. in a dictionary
where the KEY is the sender of the command. Any previous quote
already assigned will be overwritten
!quote
Momo says the quote assigned to the sender of this command
If no quote has been assigned then momo responds accordingly
!quote <user>
Momo says the quote assigned to the <user> name given.
If no quote has been assigned then momo responds accordingly
!quote NICKNAME
Momo will return a random quote from the dictionary
"""
def __init__(self, bot):
self.bot = bot
self.quotes = {}
self.__load_quotes()
bot.register_command('setquote', self.set_quote)
bot.register_command('quote', self.get_quote)
def set_quote(self, data):
self.quotes[data['username']] = data['message']
self.bot.say("Okay, %s" % data['username'], data['channel'])
self.__store_quotes()
def get_quote(self, data):
requester = data['username']
quotee = data['message']
quotee = quotee.strip()
if len(quotee) == 0:
quotee = requester
if quotee == self.bot.nickname and self.quotes:
quotee = random.choice(self.quotes.keys())
self.bot.say(self.__get_quote(requester, quotee))
def __get_quote(self, requester, quotee):
if quotee in self.quotes:
return '"%s" - %s' % (self.quotes[quotee], quotee)
elif quotee == requester:
return 'Sorry, you never set a quote. Try the setquote command!'
elif quotee == self.bot.nickname:
return 'Sorry, but there are no quotes. Try setting one!'
else:
return "%s hasn't set a quote yet." % quotee
def __store_quotes(self):
f = open(quotes_file, 'wb')
pickle.dump(self.quotes, f)
f.close()
def __load_quotes(self):
if os.path.exists(quotes_file):
f = open(quotes_file, 'rb')
self.quotes = pickle.load(f)
f.close()
else:
self.quotes = {}
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='screenAlign',
version='1.5',
py_modules=['screenAlign'],
author='Fahrstuhl',
author_email='[email protected]',
url='https://github.com/fahrstuhl/screenAlign.py',
)
|
#☆𝒐𝒎𝒂𝒋𝒊𝒏𝒂𝒊☆#
import sys
import math
from math import ceil, floor
import itertools
from functools import lru_cache
from collections import deque
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
'''''✂'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
n,m=map(int,input().split())
l_max=1
r_min=10**5
for i in range(m):
l,r=map(int,input().split())
l_max=max(l_max,l)
r_min=min(r_min,r)
print(max(r_min-l_max+1,0))
|
import math
N=int(input("Enter values up to which you need to find prime numbers"))
primeflag=[0 for i in range(N+1)]
for i in range(2,int(math.sqrt(N))+1):
for j in range(i*i,N+1,i):
primeflag[j]=1
primeflag[0]=1
primeflag[1]=1
for i in range(N):
if primeflag[i]==0:
print(str(i) + "is prime")
|
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# This is very simple example of some code
# which will use files and will try call external REST API
# so - we will try to create mock for these methods
import requests
class MySuperProgram():
def read_string_from_file(self):
""" This function reads first string in file and
returns this string. """
with open('my_file.txt', 'r') as f:
result = f.readline()
return result
def read_and_sort_all_strings(self):
""" This function reads all strings from file and returns
sorted list of strings.
"""
with open('my_file.txt', 'r') as f:
result = f.readlines()
return sorted(result)
def title_all_words_in_line(self):
""" This function reads all strings from file and returns
sorted list of strings.
"""
with open('my_file.txt', 'r') as f:
result = f.readline()
return result.title()
def get_current_ip(self):
""" This function returns the current external ip of the host. """
res = requests.get('https://api.ipify.org/?format=json')
data = res.json()
return data['ip']
def get_current_country(self):
""" This function returns the current external ip of the host. """
res = requests.get('https://api.ipify.org/?format=json')
data = res.json()
return data['country']
def get_current_status_code(self):
res = requests.get('https://api.ipify.org/?format=json')
return res
|
import pyxb
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = '%s/../schemas/xsi-type.xsd' % (os.path.dirname(__file__),)
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
originalOneFloor = oneFloor
def oneFloorCtor (*args, **kw):
return restaurant(*args, **kw)
originalOneFloor._SetAlternativeConstructor(oneFloorCtor)
from pyxb.exceptions_ import *
import unittest
class TestXSIType (unittest.TestCase):
def testFailsNoType (self):
xml = '<elt/>'
doc = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(pyxb.AbstractInstantiationError, CreateFromDOM, doc.documentElement)
def testDirect (self):
xml = '<notAlt attrOne="low"><first>content</first></notAlt>'
doc = pyxb.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(doc.documentElement)
self.assertEqual('content', instance.first)
self.assertEqual('low', instance.attrOne)
def testSubstitutions (self):
xml = '<elt attrOne="low" xsi:type="alt1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><first>content</first></elt>'
doc = pyxb.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(doc.documentElement)
self.assertEqual('content', instance.first)
self.assertEqual('low', instance.attrOne)
xml = '<elt attrTwo="hi" xsi:type="alt2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><second/></elt>'
doc = pyxb.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(doc.documentElement)
self.assertTrue(instance.second is not None)
self.assertEqual('hi', instance.attrTwo)
def testMultilevel (self):
xml = '<concreteBase><basement>dirt floor</basement></concreteBase>'
doc = pyxb.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(doc.documentElement)
self.assertEqual('dirt floor', instance.basement)
xml = '<oneFloor xsi:type="restaurant" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><basement>concrete</basement><lobby>tiled</lobby><room>eats</room></oneFloor>'
doc = pyxb.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(doc.documentElement)
self.assertEqual(concreteBase_.basement, instance.__class__.basement)
self.assertEqual(oneFloor_.lobby, instance.__class__.lobby)
self.assertEqual(restaurant_.room, instance.__class__.room)
self.assertEqual('tiled', instance.lobby)
self.assertEqual('eats', instance.room)
def testConstructor (self):
kw = { 'basement' : 'concrete',
'lobby' : 'tiled',
'room' : 'eats' }
ctd = restaurant_(**kw)
dom = ctd.toDOM().documentElement
xml = '<restaurant><basement>concrete</basement><lobby>tiled</lobby><room>eats</room></restaurant>'
self.assertEqual(xml, dom.toxml())
rest = restaurant(**kw)
dom = rest.toDOM().documentElement
self.assertEqual(xml, dom.toxml())
self.assertRaises(pyxb.AbstractInstantiationError, originalOneFloor, **kw)
def testNesting (self):
instance = block(oneFloor=[ restaurant(basement="dirt", lobby="tile", room="messy"),
restaurant(basement="concrete", lobby="carpet", room="tidy")])
self.assertEqual('dirt', instance.oneFloor[0].basement)
self.assertEqual('messy', instance.oneFloor[0].room)
self.assertEqual('concrete', instance.oneFloor[1].basement)
self.assertEqual('tidy', instance.oneFloor[1].room)
xml = instance.toxml()
dom = pyxb.utils.domutils.StringToDOM(xml)
instance2 = CreateFromDOM(dom.documentElement)
r2 = instance2.toxml()
r3 = instance2.toxml()
self.assertEqual(r2, r3)
self.assertEqual(xml, r2)
if __name__ == '__main__':
unittest.main()
|
import logging
from collections import namedtuple
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.optim as optim
HIDDEN_SIZE = 128
BATCH_SIZE = 16
PERCENTILE = 70
from cartpole import CartPole
class Net(nn.Module):
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions)
)
def forward(self, x):
return self.net(x)
class CartPoleTraining:
""" Training cartpole using cross entropy agoritham based on the code from the book
'Deep Reinforcement Learning Hands-On'
"""
Episode = namedtuple('Episode', field_names=['reward', 'steps'])
EpisodeStep = namedtuple('EpisodeStep', field_names=['observation', 'action'])
def __init__(self) -> None:
self.cartpole = CartPole()
def iterate_batches(self, net, batch_size):
batch = []
episode_reward = 0.0
episode_steps = []
#start the episode
self.cartpole.episode_start()
state = self.cartpole.get_state()
obs = self.cartpole.state_to_gym(state)
sm = nn.Softmax(dim=1)
while True:
obs_v = torch.FloatTensor([obs])
act_probs_v = sm(net(obs_v))
act_probs = act_probs_v.data.numpy()[0]
action = np.random.choice(len(act_probs), p=act_probs)
bonsai_action = self.cartpole.gym_to_action(action)
self.cartpole.episode_step(bonsai_action)
is_done = self.cartpole.halted()
reward = self.cartpole.get_last_reward()
next_obs = self.cartpole.state_to_gym(self.cartpole.get_state())
episode_reward += reward
step = self.EpisodeStep(observation=obs, action=action)
episode_steps.append(step)
if is_done:
e = self.Episode(reward=episode_reward, steps=episode_steps)
batch.append(e)
episode_reward = 0.0
episode_steps = []
self.cartpole.episode_finish("")
self.cartpole.episode_start()
state = self.cartpole.get_state()
next_obs = self.cartpole.state_to_gym(state)
if len(batch) == batch_size:
yield batch
batch = []
obs = next_obs
def filter_batch(self, batch, percentile):
rewards = list(map(lambda s: s.reward, batch))
reward_bound = np.percentile(rewards, percentile)
reward_mean = float(np.mean(rewards))
train_obs = []
train_act = []
for reward, steps in batch:
if reward < reward_bound:
continue
train_obs.extend(map(lambda step: step.observation, steps))
train_act.extend(map(lambda step: step.action, steps))
train_obs_v = torch.FloatTensor(train_obs)
train_act_v = torch.LongTensor(train_act)
return train_obs_v, train_act_v, reward_bound, reward_mean
def train(self):
obs_size = self.cartpole._env.unwrapped.observation_space.shape[0]
n_actions = self.cartpole._env.unwrapped.action_space.n
net = Net(obs_size, HIDDEN_SIZE, n_actions)
objective = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=net.parameters(), lr=0.01)
writer = SummaryWriter(comment="-cartpole")
for iter_no, batch in enumerate(self.iterate_batches(net, BATCH_SIZE)):
obs_v, acts_v, reward_b, reward_m = self.filter_batch(batch, PERCENTILE)
optimizer.zero_grad()
action_scores_v = net(obs_v)
loss_v = objective(action_scores_v, acts_v)
loss_v.backward()
optimizer.step()
#env.render()
print("%d: loss=%.3f, reward_mean=%.1f, rw_bound=%.1f" % (
iter_no, loss_v.item(), reward_m, reward_b))
writer.add_scalar("loss", loss_v.item(), iter_no)
writer.add_scalar("reward_bound", reward_b, iter_no)
writer.add_scalar("reward_mean", reward_m, iter_no)
if reward_m > 199:
print("Solved!")
break
writer.close()
if __name__ == '__main__':
logging.basicConfig()
log = logging.getLogger("cartpole")
log.setLevel(level='INFO')
cross_entropy_agent = CartPoleTraining()
cross_entropy_agent.train()
#TODO save the model after training and load it in agent
# we will use our environment (wrapper of OpenAI env)
cartpole = CartPole()
|
from dateutils import DateUtils
import datetime
import unittest
CURRENT_YEAR = datetime.datetime.now().year
class TestDateUtils(unittest.TestCase):
def test_last_day_of_month(self):
self.assertEqual(DateUtils.last_day_of_month(2019, 3), 31)
self.assertEqual(DateUtils.last_day_of_month(2018, 7), 31)
self.assertEqual(DateUtils.last_day_of_month(2016, 2), 29)
self.assertEqual(DateUtils.last_day_of_month(2017, 2), 28)
def test_date_from_string_exact(self):
dates = DateUtils.date_from_string("01/01/2018")
self.assertEqual(dates[0], datetime.date(2018, 1, 1))
self.assertEqual(dates[1], datetime.date(2018, 1, 1))
dates = DateUtils.date_from_string("31/01/2018")
self.assertEqual(dates[0], datetime.date(2018, 1, 31))
self.assertEqual(dates[1], datetime.date(2018, 1, 31))
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("01/0/2018")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("01/13/2018")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("01/99/2018")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("32/1/2018")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("0/1/2018")
dates = DateUtils.date_from_string("12/06/2323")
self.assertEqual(dates[0], datetime.date(2323, 6, 12))
self.assertEqual(dates[1], datetime.date(2323, 6, 12))
def test_date_from_string_year(self):
dates = DateUtils.date_from_string("2018")
self.assertEqual(dates[0], datetime.date(2018, 1, 1))
self.assertEqual(dates[1], datetime.date(2018, 12, 31))
dates = DateUtils.date_from_string("3000")
self.assertEqual(dates[0], datetime.date(3000, 1, 1))
self.assertEqual(dates[1], datetime.date(3000, 12, 31))
dates = DateUtils.date_from_string("1950")
self.assertEqual(dates[0], datetime.date(1950, 1, 1))
self.assertEqual(dates[1], datetime.date(1950, 12, 31))
# We don't support years that don't have four digits.
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("659")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("23")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("1")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("65900")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("100000000000")
def test_date_from_string_month(self):
dates = DateUtils.date_from_string("12/2018")
self.assertEqual(dates[0], datetime.date(2018, 12, 1))
self.assertEqual(dates[1], datetime.date(2018, 12, 31))
dates = DateUtils.date_from_string("2/2016")
self.assertEqual(dates[0], datetime.date(2016, 2, 1))
self.assertEqual(dates[1], datetime.date(2016, 2, 29))
dates = DateUtils.date_from_string("02/2016")
self.assertEqual(dates[0], datetime.date(2016, 2, 1))
self.assertEqual(dates[1], datetime.date(2016, 2, 29))
# We don't support years that don't have four digits.
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("02/232")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("111/2012")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("0/2012")
def test_date_from_string_week(self):
dates = DateUtils.date_from_string("w1/2019")
self.assertEqual(dates[0], datetime.date(2018, 12, 31))
self.assertEqual(dates[1], datetime.date(2019, 1, 6))
dates = DateUtils.date_from_string("w01/2019")
self.assertEqual(dates[0], datetime.date(2018, 12, 31))
self.assertEqual(dates[1], datetime.date(2019, 1, 6))
dates = DateUtils.date_from_string("w01")
self.assertEqual(dates[0], DateUtils.from_week_number(CURRENT_YEAR, 1))
self.assertEqual(dates[1], DateUtils.from_week_number(CURRENT_YEAR, 1, end=True))
dates = DateUtils.date_from_string("w52/2016")
self.assertEqual(dates[0], datetime.date(2016, 12, 26))
self.assertEqual(dates[1], datetime.date(2017, 1, 1))
dates = DateUtils.date_from_string("w1/2017")
self.assertEqual(dates[0], datetime.date(2017, 1, 2))
self.assertEqual(dates[1], datetime.date(2017, 1, 8))
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("w02/232")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("w111/2012")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("w0/2012")
def test_date_from_string_quarter(self):
dates = DateUtils.date_from_string("q1/2019")
self.assertEqual(dates[0], datetime.date(2019, 1, 1))
self.assertEqual(dates[1], datetime.date(2019, 3, 31))
dates = DateUtils.date_from_string("Q1/2019")
self.assertEqual(dates[0], datetime.date(2019, 1, 1))
self.assertEqual(dates[1], datetime.date(2019, 3, 31))
dates = DateUtils.date_from_string("Q2")
self.assertEqual(dates[0], datetime.date(CURRENT_YEAR, 4, 1))
self.assertEqual(dates[1], datetime.date(CURRENT_YEAR, 6, 30))
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("Q2/232")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("Q2/00232")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("Q5/2012")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("Q0/2012")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("Q0234/2012")
def test_date_from_string_half(self):
dates = DateUtils.date_from_string("h1/2019")
self.assertEqual(dates[0], datetime.date(2019, 1, 1))
self.assertEqual(dates[1], datetime.date(2019, 6, 30))
dates = DateUtils.date_from_string("H1/2019")
self.assertEqual(dates[0], datetime.date(2019, 1, 1))
self.assertEqual(dates[1], datetime.date(2019, 6, 30))
dates = DateUtils.date_from_string("h2/2019")
self.assertEqual(dates[0], datetime.date(2019, 7, 1))
self.assertEqual(dates[1], datetime.date(2019, 12, 31))
dates = DateUtils.date_from_string("H2/2019")
self.assertEqual(dates[0], datetime.date(2019, 7, 1))
self.assertEqual(dates[1], datetime.date(2019, 12, 31))
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("H2/232")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("H2/00232")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("H5/2012")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("H0/2012")
with self.assertRaises(ValueError):
dates = DateUtils.date_from_string("H0234/2012")
def test_date_range(self):
dates = DateUtils.date_range_from_string("w11-")
self.assertEqual(dates[0], DateUtils.date_from_string("w11")[0])
self.assertEqual(dates[1], datetime.date.today())
if __name__ == '__main__':
unittest.main()
|
import logging
class GlobalRouting:
def __init__(self, floorplan, top_rtl_parser, slot_manager):
self.floorplan = floorplan
self.top_rtl_parser = top_rtl_parser
self.slot_manager = slot_manager
self.v2s = floorplan.getVertexToSlot()
self.s2e = floorplan.getSlotToEdges()
self.e_name2path = {} # from edge to all slots passed
self.naiveGlobalRouting()
self.updateEdgePipelineLevel()
def updateEdgePipelineLevel(self):
"""
update the pipeline_level filed based on the routing results
"""
for e_list in self.s2e.values():
for e in e_list:
slot_path = []
src_slot = self.v2s[e.src]
dst_slot = self.v2s[e.dst]
slot_path = self.e_name2path[e.name]
# 2 levels of pipelining for each slot crossing
if src_slot == dst_slot:
e.pipeline_level = 0
else:
e.pipeline_level = (len(slot_path) + 1) * 2
def naiveGlobalRouting(self):
"""
each edge first go in the Y direction then in the X direction
assume all slots are of the same size and are aligned
the slot_path exclude the src slot and the dst slot
"""
for e_list in self.s2e.values():
for e in e_list:
slot_path = []
src_slot = self.v2s[e.src]
dst_slot = self.v2s[e.dst]
slot_path.append(src_slot)
curr = src_slot
len_x = src_slot.getLenX()
len_y = src_slot.getLenY()
# first go in X direction
x_diff = curr.getPositionX() - dst_slot.getPositionX()
if x_diff:
dir = 'LEFT' if x_diff > 0 else 'RIGHT'
for i in range(int(abs(x_diff/len_x))):
curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))
slot_path.append(curr)
y_diff = curr.getPositionY() - dst_slot.getPositionY()
if y_diff:
dir = 'DOWN' if y_diff > 0 else 'UP'
for i in range(int(abs(y_diff/len_y))):
curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))
slot_path.append(curr)
assert curr == dst_slot
slot_path = slot_path[1:-1] # exclude the src and the dst
logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))
self.e_name2path[e.name] = slot_path
|
import datetime
from .. import db
from sqlalchemy.dialects import postgresql
class BookCarts(db.Model):
""" BookCarts model for storing book details selected"""
__tablename__ = "book_carts"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
book_id = db.Column(db.Integer)
cart_id = db.Column(db.Integer)
price = db.Column(db.Numeric(), nullable=False)
quantity = db.Column(db.Integer)
updated_at = db.Column(db.DateTime())
|
'''
Created on Feb 27, 2012
@author: IslamM
'''
import web
from web.wsgiserver.ssl_builtin import BuiltinSSLAdapter
from logging import getLogger
log = getLogger(__name__)
web.config.debug = False
class UIMWEBApp(web.application):
def run(self, certinfo, server_address=('0.0.0.0', 8080), timeout=900, rqs=10, nthreads=20):
from web.wsgiserver import CherryPyWSGIServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import BaseHTTPRequestHandler
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
self.status = str(status) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(self, *a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
from cStringIO import StringIO
self.wfile = StringIO() # for capturing error
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class WSGIWrapper(BaseHTTPRequestHandler):
"""WSGI wrapper for logging the status and serving static files."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
write = start_response(status, response_headers, *args)
self.log(status, environ)
return write
path = environ.get('PATH_INFO', '').lower()
if path.startswith('/static/') and not ('..' in path or '%2f' in path or '%2e' in path or '%5c' in path) and not path.endswith('/'):
return StaticApp(environ, xstart_response)
else:
return self.app(environ, xstart_response)
def log(self, status, environ):
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = '%s:%s' % (environ.get('REMOTE_ADDR', '-'),
environ.get('REMOTE_PORT', '-'))
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
log.debug(msg)
#wrapper for the CherryPyWSGIServer to enable certificates
#force the server address so the port is available
func = WSGIWrapper(self.wsgifunc())
self.server = CherryPyWSGIServer(server_address, func, timeout = timeout, request_queue_size=rqs, numthreads=nthreads)
protocol = 'http'
if certinfo:
protocol = 'https'
adapter = BuiltinSSLAdapter(certinfo['cert'], certinfo['key'])
self.server.ssl_adapter = adapter
self.server.thread_name = 'HPIC4VCUIM'
log.info('%s://%s:%d/', protocol, server_address[0], server_address[1])
try:
self.server.start()
except KeyboardInterrupt:
log.info('Exception starting uim...')
self.server.stop()
log.info('Started uim...')
|
import pandas as pd
import requests
import time
import folium
import pathlib
from folium.plugins import MarkerCluster
from matplotlib import pylab
from pylab import *
from opencage.geocoder import OpenCageGeocode
from pprint import pprint
key = 'add your own Open Cage Code key here'
geocoder = OpenCageGeocode(key)
file = pathlib.Path('South_Baltimore_Business_Status_COVID19_Update.csv')
if not file.exists ():
lat = []
lng = []
df0 = pd.read_csv('South_Baltimore_Business_Status_COVID19.csv',encoding = "ISO-8859-1")
df1 = df0.loc[:, ['Business_Name', 'Address']]
df1['Address'] = df1['Address']+', Baltimore, MD'
for i in range(len(df1)):
ADDRESS = df1['Address'][i]
result = geocoder.geocode(ADDRESS, no_annotations='1')
temp = result[0]['geometry']['lat']
lat.append(result[0]['geometry']['lat'])
lng.append(result[0]['geometry']['lng'])
time.sleep(0.5)
lat = pd.DataFrame(lat)
lng = pd.DataFrame(lng)
df0['Latitude'] = lat
df0['Longitude'] = lng
df0.to_csv('South_Baltimore_Business_Status_COVID19_Update.csv')
df2 = pd.read_csv('South_Baltimore_Business_Status_COVID19_Update.csv',encoding = "ISO-8859-1")
df2 = df2[df2.Status != 'CLOSED']
# df2['category'] = 'other'
# central coordinates of Baltimore
EDI_COORDINATES = (39.2904, -76.6122)
# create empty map zoomed in on Baltimore
map = folium.Map(location=EDI_COORDINATES, zoom_start=12)
spots = MarkerCluster(name='spots').add_to(map)
def makeHref(url,link_text = None):
if link_text == None:
link_text = str(url)
return '<a href="' + url + '"target="_blank">' + re.sub(r"[']+", "\\\\'", link_text[:45]) +'</a>'
def popopHTMLString(df2):
'''input: a series that contains a url somewhere in it and generate html'''
html = makeHref(df2.Website, df2.Business_Name)
return html
def plotDot(df2):
htmlString = folium.Html(popopHTMLString(df2), script=True)
folium.Marker(location=[df2.Latitude, df2.Longitude],
popup = folium.Popup(htmlString),
fill_color='#000000').add_to(spots)
#use df.apply(,axis=1) to iterate through every row in your dataframe
df2.apply(plotDot, axis = 1)
map.fit_bounds(map.get_bounds())
folium.LayerControl().add_to(map)
fn = 'south_baltimore_covid.html'
map.save(fn)
|
from .. modello_lineare import RegressioneLineare
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
import numpy as np
X, y = make_regression(n_samples=100, n_features=10, bias=5, random_state=42)
rl_fit_intercept_true = RegressioneLineare(fit_intercept=True)
lr_fit_intercept_true = LinearRegression(fit_intercept=True)
rl_fit_intercept_true.fit(X, y)
lr_fit_intercept_true.fit(X, y)
rl_fit_intercept_false = RegressioneLineare(fit_intercept=False)
lr_fit_intercept_false = LinearRegression(fit_intercept=False)
rl_fit_intercept_false.fit(X, y)
lr_fit_intercept_false.fit(X, y)
def test_fit_intercept_true_intercept_():
assert np.allclose(rl_fit_intercept_true.intercept_,
lr_fit_intercept_true.intercept_)
def test_fit_intercept_true_coef_():
assert np.allclose(rl_fit_intercept_true.coef_,
lr_fit_intercept_true.coef_)
def test_fit_intercept_false_intercept_():
assert np.allclose(rl_fit_intercept_false.intercept_,
lr_fit_intercept_false.intercept_)
def test_fit_intercept_false_coef_():
assert np.allclose(rl_fit_intercept_false.coef_,
lr_fit_intercept_false.coef_)
|
cc_library(
name = "zlib",
hdrs = glob(["include/*.h"]),
srcs = ["lib/libz.a"],
includes = ["include"],
visibility = ["//visibility:public"],
)
|
import os
import os.path
import pathlib
import tempfile
import logging
import warnings
from collections import namedtuple
from joblib import Parallel, delayed, dump, load
import pandas as pd
import numpy as np
try:
import dask.dataframe as ddf
except ImportError:
ddf = None
from ..algorithms import Recommender
from .. import util
from ..sharing import sharing_mode
_logger = logging.getLogger(__name__)
_AlgoKey = namedtuple('AlgoKey', ['type', 'data'])
@util.last_memo(check_type='equality')
def __load_algo(path):
return load(path, mmap_mode='r')
def _recommend_user(algo, user, n, candidates):
dask = False
if type(algo).__name__ == 'AlgoKey': # pickling doesn't preserve isinstance
if algo.type == 'file':
algo = __load_algo(algo.data)
elif algo.type == 'future':
algo = algo.data.result()
dask = True
else:
raise ValueError('unknown algorithm key type %s', algo.type)
_logger.debug('generating recommendations for %s', user)
watch = util.Stopwatch()
res = algo.recommend(user, n, candidates)
_logger.debug('%s recommended %d/%s items for %s in %s', algo, len(res), n, user, watch)
res['user'] = user
res['rank'] = np.arange(1, len(res) + 1)
if dask:
res = ddf.from_pandas(res, npartitions=1)
return res
def __standard_cand_fun(candidates):
"""
Convert candidates from the forms accepted by :py:fun:`recommend` into
a standard form, a function that takes a user and returns a candidate
list.
"""
if isinstance(candidates, dict):
return candidates.get
elif candidates is None:
return lambda u: None
else:
return candidates
def recommend(algo, users, n, candidates=None, *, n_jobs=None, dask_result=False, **kwargs):
"""
Batch-recommend for multiple users. The provided algorithm should be a
:py:class:`algorithms.Recommender`.
Args:
algo: the algorithm
users(array-like): the users to recommend for
n(int): the number of recommendations to generate (None for unlimited)
candidates:
the users' candidate sets. This can be a function, in which case it will
be passed each user ID; it can also be a dictionary, in which case user
IDs will be looked up in it. Pass ``None`` to use the recommender's
built-in candidate selector (usually recommended).
n_jobs(int):
The number of processes to use for parallel recommendations. Passed as
``n_jobs`` to :cls:`joblib.Parallel`. The default, ``None``, will make
the process sequential _unless_ called inside the :func:`joblib.parallel_backend`
context manager.
.. note:: ``nprocs`` is accepted as a deprecated alias.
dask_result(bool):
Whether to return a Dask data frame instead of a Pandas one.
Returns:
A frame with at least the columns ``user``, ``rank``, and ``item``; possibly also
``score``, and any other columns returned by the recommender.
"""
if n_jobs is None and 'nprocs' in kwargs:
n_jobs = kwargs['nprocs']
warnings.warn('nprocs is deprecated, use n_jobs', DeprecationWarning)
rec_algo = Recommender.adapt(algo)
if candidates is None and rec_algo is not algo:
warnings.warn('no candidates provided and algo is not a recommender, unlikely to work')
del algo # don't need reference any more
if 'ratings' in kwargs:
warnings.warn('Providing ratings to recommend is not supported', DeprecationWarning)
candidates = __standard_cand_fun(candidates)
loop = Parallel(n_jobs=n_jobs)
path = None
try:
_logger.debug('activating recommender loop')
with loop:
backend = loop._backend.__class__.__name__
njobs = loop._effective_n_jobs()
_logger.info('parallel backend %s, effective njobs %s',
backend, njobs)
using_dask = backend == 'DaskDistributedBackend'
astr = str(rec_algo)
if using_dask:
_logger.debug('pre-scattering algorithm %s', rec_algo)
futures = loop._backend.client.scatter([rec_algo], broadcast=True, hash=False)
rec_algo = _AlgoKey('future', futures[0])
elif njobs > 1:
fd, path = tempfile.mkstemp(prefix='lkpy-predict', suffix='.pkl',
dir=util.scratch_dir(joblib=True))
path = pathlib.Path(path)
os.close(fd)
_logger.debug('pre-serializing algorithm %s to %s', rec_algo, path)
with sharing_mode():
dump(rec_algo, path)
rec_algo = _AlgoKey('file', path)
_logger.info('recommending with %s for %d users (n_jobs=%s)', astr, len(users), n_jobs)
timer = util.Stopwatch()
results = loop(delayed(_recommend_user)(rec_algo, user, n, candidates(user))
for user in users)
if using_dask or dask_result:
results = ddf.concat(results, interleave_partitions=True)
if not dask_result: # only if we're running inside dask, but don't want results
results = results.compute()
else:
results = pd.concat(results, ignore_index=True, copy=False)
_logger.info('recommended for %d users in %s', len(users), timer)
finally:
util.delete_sometime(path)
return results
|
print('======= DESAFIO 14 =======')
c = float(input('Informe a temperatura em ºC: '))
f = ((9 * c) / 5) + 32
# também pode ser escrito sem parêntese nenhum: ordem de precedência!
print('A temperatura de {:.1f}ºC corresponde a {:.1f}ºF!'.format(c, f))
|
from . import xlsx
class XlsxImport(xlsx.XlsxImport):
def handle(self):
return self
|
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy import crapp
from devilry.apps.core.models import Subject
from devilry.devilry_cradmin import devilry_crinstance
from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin
from devilry.devilry_admin.views.subject_for_period_admin import overview_for_periodadmin, subject_redirect
from devilry.devilry_cradmin import devilry_crmenu
class Menu(devilry_crmenu_admin.Menu):
def build_menu(self):
super(Menu, self).build_menu()
subject = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_subject_breadcrumb_item(subject=subject, active=True)
def add_subject_breadcrumb_item(self, subject, active=False):
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin):
menuclass = Menu
roleclass = Subject
apps = [
('overview', overview_for_periodadmin.App),
('subject_redirect', subject_redirect.App)
]
id = 'devilry_admin_subject_for_periodadmin'
rolefrontpage_appname = 'overview'
def get_rolequeryset(self):
return Subject.objects.filter_user_is_admin_for_any_periods_within_subject(user=self.request.user)
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is a Subject.
"""
subject = role
return subject
@classmethod
def matches_urlpath(cls, urlpath):
return urlpath.startswith('/devilry_admin/subject_for_periodadmin')
def get_devilryrole_for_requestuser(self):
"""
Only period admins will be redirected back to this crinstance
"""
return 'periodadmin'
|
import logging
import os
import textwrap
import yaml
from . import constants
from .exceptions import DefinitionError
from .steps import (
AdditionalBuildSteps, BuildContextSteps, GalaxyInstallSteps, GalaxyCopySteps, AnsibleConfigSteps
)
from .utils import run_command, copy_file
logger = logging.getLogger(__name__)
# Files that need to be moved into the build context, and their naming inside the context
CONTEXT_FILES = {
'galaxy': 'requirements.yml',
'python': 'requirements.txt',
'system': 'bindep.txt',
}
ALLOWED_KEYS = [
'version',
'build_arg_defaults',
'dependencies',
'ansible_config',
'additional_build_steps',
]
class AnsibleBuilder:
def __init__(self,
command_type=None,
action=None,
filename=constants.default_file,
build_args=None,
build_context=constants.default_build_context,
tag=None,
container_runtime=constants.default_container_runtime,
output_filename=None,
no_cache=False,
verbosity=constants.default_verbosity):
self.action = action
self.definition = UserDefinition(filename=filename)
self.tags = tag or []
self.build_context = build_context
self.build_outputs_dir = os.path.join(
build_context, constants.user_content_subfolder)
self.container_runtime = container_runtime
self.build_args = build_args or {}
self.no_cache = no_cache
self.containerfile = Containerfile(
definition=self.definition,
build_context=self.build_context,
container_runtime=self.container_runtime,
output_filename=output_filename)
self.verbosity = verbosity
@property
def version(self):
return self.definition.version
@property
def ansible_config(self):
return self.definition.ansible_config
def create(self):
logger.debug('Ansible Builder is generating your execution environment build context.')
return self.write_containerfile()
def write_containerfile(self):
# File preparation
self.containerfile.create_folder_copy_files()
# First stage, galaxy
self.containerfile.prepare_galaxy_stage_steps()
self.containerfile.prepare_ansible_config_file()
self.containerfile.prepare_build_context()
self.containerfile.prepare_galaxy_install_steps()
# Second stage, builder
self.containerfile.prepare_build_stage_steps()
self.containerfile.prepare_galaxy_copy_steps()
self.containerfile.prepare_introspect_assemble_steps()
# Second stage
self.containerfile.prepare_final_stage_steps()
self.containerfile.prepare_prepended_steps()
self.containerfile.prepare_galaxy_copy_steps()
self.containerfile.prepare_system_runtime_deps_steps()
self.containerfile.prepare_appended_steps()
logger.debug('Rewriting Containerfile to capture collection requirements')
return self.containerfile.write()
@property
def build_command(self):
command = [
self.container_runtime, "build",
"-f", self.containerfile.path
]
for tag in self.tags:
command.extend(["-t", tag])
for key, value in self.build_args.items():
if value:
build_arg = f"--build-arg={key}={value}"
else:
build_arg = f"--build-arg={key}"
command.append(build_arg)
command.append(self.build_context)
if self.no_cache:
command.append('--no-cache')
return command
def build(self):
logger.debug(f'Ansible Builder is building your execution environment image. Tags: {", ".join(self.tags)}')
self.write_containerfile()
run_command(self.build_command)
return True
class BaseDefinition:
"""Subclasses should populate these properties in the __init__ method
self.raw - a dict that basically is the definition
self.reference_path - the folder which dependencies are specified relative to
"""
@property
def version(self):
version = self.raw.get('version')
if not version:
raise ValueError("Expected top-level 'version' key to be present.")
return str(version)
@property
def ansible_config(self):
ansible_config = self.raw.get('ansible_config')
if not ansible_config:
pass
else:
return str(ansible_config)
class UserDefinition(BaseDefinition):
def __init__(self, filename):
self.filename = filename
self.reference_path = os.path.dirname(filename)
try:
with open(filename, 'r') as f:
y = yaml.safe_load(f)
self.raw = y if y else {}
except FileNotFoundError:
raise DefinitionError(textwrap.dedent("""
Could not detect '{0}' file in this directory.
Use -f to specify a different location.
""").format(filename))
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise DefinitionError(textwrap.dedent("""
An error occured while parsing the definition file:
{0}
""").format(str(e)))
if not isinstance(self.raw, dict):
raise DefinitionError("Definition must be a dictionary, not {0}".format(type(self.raw).__name__))
if self.raw.get('dependencies') is not None:
if not isinstance(self.raw.get('dependencies'), dict):
raise DefinitionError(textwrap.dedent(
f"""
Error: Unknown type {type(self.raw.get('dependencies'))} found for dependencies, must be a dict.\n
Allowed options are:
{list(CONTEXT_FILES.keys())}
""")
)
# Populate build arg defaults, which are customizable in definition
self.build_arg_defaults = {}
user_build_arg_defaults = self.raw.get('build_arg_defaults', {})
if not isinstance(user_build_arg_defaults, dict):
user_build_arg_defaults = {} # so that validate method can throw error
for key, default_value in constants.build_arg_defaults.items():
self.build_arg_defaults[key] = user_build_arg_defaults.get(key, default_value)
def get_additional_commands(self):
"""Gets additional commands from the exec env file, if any are specified.
"""
commands = self.raw.get('additional_build_steps')
return commands
def get_dep_abs_path(self, entry):
"""Unique to the user EE definition, files can be referenced by either
an absolute path or a path relative to the EE definition folder
This method will return the absolute path.
"""
req_file = self.raw.get('dependencies', {}).get(entry)
if not req_file:
return None
if os.path.isabs(req_file):
return req_file
return os.path.join(self.reference_path, req_file)
def validate(self):
# Check that all specified keys in the definition file are valid.
def_file_dict = self.raw
yaml_keys = set(def_file_dict.keys())
invalid_keys = yaml_keys - set(ALLOWED_KEYS)
if invalid_keys:
raise DefinitionError(textwrap.dedent(
f"""
Error: Unknown yaml key(s), {invalid_keys}, found in the definition file.\n
Allowed options are:
{ALLOWED_KEYS}
""")
)
if self.raw.get('dependencies') is not None:
dependencies_keys = set(self.raw.get('dependencies'))
invalid_dependencies_keys = dependencies_keys - set(CONTEXT_FILES.keys())
if invalid_dependencies_keys:
raise DefinitionError(textwrap.dedent(
f"""
Error: Unknown yaml key(s), {invalid_dependencies_keys}, found in dependencies.\n
Allowed options are:
{list(CONTEXT_FILES.keys())}
""")
)
for item in CONTEXT_FILES:
requirement_path = self.get_dep_abs_path(item)
if requirement_path:
if not os.path.exists(requirement_path):
raise DefinitionError("Dependency file {0} does not exist.".format(requirement_path))
build_arg_defaults = self.raw.get('build_arg_defaults')
if build_arg_defaults:
if not isinstance(build_arg_defaults, dict):
raise DefinitionError(
f"Error: Unknown type {type(build_arg_defaults)} found for build_arg_defaults; "
f"must be a dict."
)
unexpected_keys = set(build_arg_defaults.keys()) - set(constants.build_arg_defaults)
if unexpected_keys:
raise DefinitionError(
f"Keys {unexpected_keys} are not allowed in 'build_arg_defaults'."
)
for key, value in constants.build_arg_defaults.items():
user_value = build_arg_defaults.get(key)
if user_value and not isinstance(user_value, str):
raise DefinitionError(
f"Expected build_arg_defaults.{key} to be a string; "
f"Found a {type(user_value)} instead."
)
additional_cmds = self.get_additional_commands()
if additional_cmds:
if not isinstance(additional_cmds, dict):
raise DefinitionError(textwrap.dedent("""
Expected 'additional_build_steps' in the provided definition file to be a dictionary
with keys 'prepend' and/or 'append'; found a {0} instead.
""").format(type(additional_cmds).__name__))
expected_keys = frozenset(('append', 'prepend'))
unexpected_keys = set(additional_cmds.keys()) - expected_keys
if unexpected_keys:
raise DefinitionError(
f"Keys {*unexpected_keys,} are not allowed in 'additional_build_steps'."
)
ansible_config_path = self.raw.get('ansible_config')
if ansible_config_path:
if not isinstance(ansible_config_path, str):
raise DefinitionError(textwrap.dedent("""
Expected 'ansible_config' in the provided definition file to
be a string; found a {0} instead.
""").format(type(ansible_config_path).__name__))
class Containerfile:
newline_char = '\n'
def __init__(self, definition,
build_context=None,
container_runtime=None,
output_filename=None):
self.build_context = build_context
self.build_outputs_dir = os.path.join(
build_context, constants.user_content_subfolder)
self.definition = definition
if output_filename is None:
filename = constants.runtime_files[container_runtime]
else:
filename = output_filename
self.path = os.path.join(self.build_context, filename)
self.container_runtime = container_runtime
# Build args all need to go at top of file to avoid errors
self.steps = [
"ARG EE_BASE_IMAGE={}".format(
self.definition.build_arg_defaults['EE_BASE_IMAGE']
),
"ARG EE_BUILDER_IMAGE={}".format(
self.definition.build_arg_defaults['EE_BUILDER_IMAGE']
),
]
def create_folder_copy_files(self):
"""Creates the build context file for this Containerfile
moves files from the definition into the folder
"""
# courteously validate items before starting to write files
self.definition.validate()
os.makedirs(self.build_outputs_dir, exist_ok=True)
for item, new_name in CONTEXT_FILES.items():
requirement_path = self.definition.get_dep_abs_path(item)
if requirement_path is None:
continue
dest = os.path.join(
self.build_context, constants.user_content_subfolder, new_name)
copy_file(requirement_path, dest)
if self.definition.ansible_config:
copy_file(
self.definition.ansible_config,
os.path.join(self.build_outputs_dir, 'ansible.cfg')
)
def prepare_ansible_config_file(self):
ansible_config_file_path = self.definition.ansible_config
if ansible_config_file_path:
context_file_path = os.path.join(
constants.user_content_subfolder, 'ansible.cfg')
return self.steps.extend(AnsibleConfigSteps(context_file_path))
def prepare_prepended_steps(self):
additional_prepend_steps = self.definition.get_additional_commands()
if additional_prepend_steps:
prepended_steps = additional_prepend_steps.get('prepend')
if prepended_steps:
return self.steps.extend(AdditionalBuildSteps(prepended_steps))
return False
def prepare_appended_steps(self):
additional_append_steps = self.definition.get_additional_commands()
if additional_append_steps:
appended_steps = additional_append_steps.get('append')
if appended_steps:
return self.steps.extend(AdditionalBuildSteps(appended_steps))
return False
def prepare_build_context(self):
if any(self.definition.get_dep_abs_path(thing) for thing in ('galaxy', 'system', 'python')):
self.steps.extend(BuildContextSteps())
return self.steps
def prepare_galaxy_install_steps(self):
if self.definition.get_dep_abs_path('galaxy'):
self.steps.extend(GalaxyInstallSteps(CONTEXT_FILES['galaxy']))
return self.steps
def prepare_introspect_assemble_steps(self):
# The introspect/assemble block is valid if there are any form of requirements
if any(self.definition.get_dep_abs_path(thing) for thing in ('galaxy', 'system', 'python')):
introspect_cmd = "RUN ansible-builder introspect --sanitize"
requirements_file_exists = os.path.exists(os.path.join(
self.build_outputs_dir, CONTEXT_FILES['python']
))
if requirements_file_exists:
relative_requirements_path = os.path.join(constants.user_content_subfolder, CONTEXT_FILES['python'])
self.steps.append(f"ADD {relative_requirements_path} {CONTEXT_FILES['python']}")
# WORKDIR is /build, so we use the (shorter) relative paths there
introspect_cmd += " --user-pip={0}".format(CONTEXT_FILES['python'])
bindep_exists = os.path.exists(os.path.join(self.build_outputs_dir, CONTEXT_FILES['system']))
if bindep_exists:
relative_bindep_path = os.path.join(constants.user_content_subfolder, CONTEXT_FILES['system'])
self.steps.append(f"ADD {relative_bindep_path} {CONTEXT_FILES['system']}")
introspect_cmd += " --user-bindep={0}".format(CONTEXT_FILES['system'])
introspect_cmd += " --write-bindep=/tmp/src/bindep.txt --write-pip=/tmp/src/requirements.txt"
self.steps.append(introspect_cmd)
self.steps.append("RUN assemble")
return self.steps
def prepare_system_runtime_deps_steps(self):
self.steps.extend([
"COPY --from=builder /output/ /output/",
"RUN /output/install-from-bindep && rm -rf /output/wheels",
])
return self.steps
def prepare_galaxy_stage_steps(self):
self.steps.extend([
"",
"FROM $EE_BASE_IMAGE as galaxy",
"ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS={}".format(
self.definition.build_arg_defaults['ANSIBLE_GALAXY_CLI_COLLECTION_OPTS']
),
"USER root",
""
])
return self.steps
def prepare_build_stage_steps(self):
self.steps.extend([
"",
"FROM $EE_BUILDER_IMAGE as builder"
"",
])
return self.steps
def prepare_final_stage_steps(self):
self.steps.extend([
"",
"FROM $EE_BASE_IMAGE",
"USER root"
"",
])
return self.steps
def prepare_galaxy_copy_steps(self):
if self.definition.get_dep_abs_path('galaxy'):
self.steps.extend(GalaxyCopySteps())
return self.steps
def write(self):
with open(self.path, 'w') as f:
for step in self.steps:
f.write(step + self.newline_char)
return True
|
from invoke import task
import shutil
from pathlib import Path
@task
def lint(c):
c.run("isort --check winsnap", warn=True)
c.run("black --check winsnap", warn=True)
c.run("flake8 winsnap", warn=True)
@task
def format(c):
c.run("isort winsnap", warn=True)
c.run("black winsnap", warn=True)
@task
def bootstrap(c):
import comtypes.client
comtypes.client.GetModule(r"C:\\Windows\\System32\\UIAutomationCore.dll")
@task(bootstrap)
def package(c):
windows = (Path(__file__) / ".." / "windows").resolve()
if windows.exists():
shutil.rmtree(windows)
c.run("briefcase create")
c.run("briefcase build")
# The first time it runs it fails but the next time it works. This is to bootstrap the uia
# automation files for comtypes
c.run("briefcase run", echo=False, warn=True)
c.run("briefcase package")
@task(package)
def install(c):
windows = (Path(__file__) / ".." / "windows").resolve()
msi = list(windows.glob("*.msi"))[0]
c.run(str(msi))
@task(bootstrap)
def exe(c):
c.run("pyinstaller winsnap.spec")
|
from time import time
import numpy as np
import cv2 as cv
import win32gui, win32ui, win32con
class Vision:
# Attributes
match_template_methods = list()
normalized_match_template_methods = list()
debug_modes = list()
copper_ore_rgb = list()
tin_ore_rgb = list()
# --------------- Constructor
def __init__(self):
self.match_template_methods = [cv.TM_CCOEFF, cv.TM_CCOEFF_NORMED,
cv.TM_CCORR, cv.TM_CCORR_NORMED,
cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]
self.normalized_match_template_methods = [cv.TM_CCOEFF_NORMED, cv.TM_CCORR_NORMED, cv.TM_SQDIFF_NORMED]
self.debug_modes = [None, 'rectangles', 'crosshairs', 'visual']
# Set colour codes
self.copper_ore_rgb.extend([(255, 228, 154), (201, 163, 111), (149, 126, 86)])
self.tin_ore_rgb.extend([(92, 253, 152), (33, 147, 38), (59, 196, 119)])
# --------------- Methods
# Method 1: Finding click positions based on the cv.matchTemplate() function
def findClickPositions(self, haystack_img, needle_img, method=cv.TM_CCOEFF_NORMED, threshold=0.9,
debug_mode=None):
'''
This method performs the openCV.matchTemplate()-method, using the needle_img on the haystack_img.
Heavily inspired and mostly inherited from: https://github.com/learncodebygaming/opencv_tutorials/blob/master/003_group_rectangles/main.py
:param haystack_image:
:param needle_image:
:param threshold:
:param debug_mode:
:return click_points:
'''
# Local constants
group_threshold = 1
group_eps = 0.5
line_color = (0, 255, 0)
line_type = cv.LINE_4
line_thickness = 2
marker_color = line_color
marker_type = cv.MARKER_CROSS
marker_size = 40
# Local variables
needle_img_width = 0
needle_img_height = 0
rectangles = []
click_points = []
# Validate user inputs from parameters
if method not in self.match_template_methods:
method = self.match_template_methods[0]
if debug_mode not in self.debug_modes:
debug_mode = self.debug_modes[1]
# We're already passing in image data from cv.imread(), get image shape now
needle_img_width = needle_img.shape[0]
needle_img_height = needle_img.shape[1]
# Perform the matchTemplate function
result_matrix = cv.matchTemplate(haystack_img, needle_img, method)
# Grab positions from the match result matrix that exceed the given threshold
locations = np.where(result_matrix >= threshold)
# Transform the output from np.where to an array of tuples, containing our (X, Y) coordinates
locations = list(zip(*locations[::-1]))
# print('Found locations: {}'.format(len(locations)))
# Create a list of rectangles out of the locations we found. Append each rectangle twice to escape the
# cv.groupRectanges() function eliminating single rectangles, since we want them as well.
for location in locations:
rectangle = [int(location[0]), int(location[1]), needle_img_width, needle_img_height]
rectangles.append(rectangle)
rectangles.append(rectangle)
# Group the rectangles so we get cleaner output
rectangles, weights = cv.groupRectangles(rectangles, groupThreshold=group_threshold, eps=group_eps)
# print('Found rectangles: {}'.format(len(rectangles)))
# Get the center point from each rectangle
for (x_coordinate, y_coordinate, width, height) in rectangles:
center_x = x_coordinate + int(width / 2)
center_y = y_coordinate + int(height / 2)
coordinate_tuple = (center_x, center_y)
click_points.append(coordinate_tuple)
# Draw rectangles if we're in rectangles-debug mode
if debug_mode == self.debug_modes[1]:
top_left = (x_coordinate, y_coordinate)
bottom_right = (x_coordinate + width, y_coordinate + height)
cv.rectangle(haystack_img, top_left, bottom_right,
color=line_color, lineType=line_type,
thickness=line_thickness)
# Draw crosshairs if we're in crosshairs-debug mode
elif debug_mode == self.debug_modes[2]:
cv.drawMarker(haystack_img, (center_x, center_y),
color=marker_color, marker_type=marker_type,
markerSize=marker_size, thickness=line_thickness)
# Show outputs
if debug_mode:
cv.imshow('Bot Vision', haystack_img)
return click_points
# Method 2: Return window handle, window_width and window_height of a given window
def getWindowInfo(self, window_name='Albion Online Client'):
# Local variables
window_information = []
# Local variables
window_handle = None
window_width = None
window_height = None
# Try grabbing the albion online client window_handle and setting the width and height
try:
window_handle = win32gui.FindWindow(None, window_name)
window_rectangle = win32gui.GetWindowRect(window_handle)
window_width = window_rectangle[2] - window_rectangle[0]
window_height = window_rectangle[3] - window_rectangle[1]
except BaseException as exception:
print('[VISION] Exception capturing a window_handle using win32gui.FindWindow()')
print('[VISION] Exception info: ', exception)
# Add results to the result array
window_information.extend([window_handle, window_width, window_height])
return window_information
# Method 3: Capturing the window from Albion and returning it in an openCV-understandable format
def captureWindow(self, window_width, window_height, window_handle=None, debug_mode=False):
# Parse out window handle - if it is none, we capture the whole desktop
# IMPORTANT: To capture the Albion client, we need to set window_handle to None, since we only capture the
# initial screenshot and never update it. See: https://www.youtube.com/watch?v=7k4j-uL8WSQ&list=PL1m2M8LQlzfKtkKq2lK5xko4X-8EZzFPI&t=132s
if window_handle is None:
window_handle = win32gui.GetDesktopWindow()
# Get window device context - a structure that defines a set of graphic objects and their associated attributes
# https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
window_device_context = win32gui.GetWindowDC(window_handle)
device_context = win32ui.CreateDCFromHandle(window_device_context)
compatible_device_context = device_context.CreateCompatibleDC()
# Creates a bitmap out of the device context and convert it into a format openCV can read
bitmap = win32ui.CreateBitmap()
bitmap.CreateCompatibleBitmap(device_context, window_width, window_height)
compatible_device_context.SelectObject(bitmap)
compatible_device_context.BitBlt((0, 0), (window_width, window_height), device_context,
(0, 0), win32con.SRCCOPY)
bitmap_bits = bitmap.GetBitmapBits(True)
img = np.frombuffer(bitmap_bits, dtype='uint8')
img.shape = (window_height, window_width, 4)
# Free resources
device_context.DeleteDC()
compatible_device_context.DeleteDC()
win32gui.ReleaseDC(window_handle, window_device_context)
win32gui.DeleteObject(bitmap.GetHandle())
# Drop alpha channel to avoid cv.matchTemplate() error
img = img[..., :3]
# Make image C_CONTIGUOUS to avoid typeErrors
img = np.ascontiguousarray(img)
if debug_mode:
cv.imshow('Bot Vision', img)
cv.waitKey()
return img
# Method 4: Showing the bot-vision
def showBotVision(self, image, show_fps=True):
cv.imshow('Bot Vision', image)
if cv.waitKey(1) == ord('y'):
cv.destroyAllWindows()
return False
|
#!/usr/bin/env python
"""
Translate DNA reads from a fasta file.
"""
import sys
import click
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, Gapped
from flea.util import insert_gaps
def _translate(record, gapped=False):
result = record[:]
if gapped:
translated = record.seq.ungap('-').translate()
result.seq = Seq(insert_gaps(str(record.seq), str(translated), '---', '-'),
alphabet=Gapped(IUPAC.IUPACProtein))
else:
result.seq = record.seq.translate()
return result
def translate(infile, outfile, gapped=False):
alphabet=IUPAC.ambiguous_dna
if gapped:
alphabet = Gapped(alphabet)
records = SeqIO.parse(infile, "fasta", alphabet=alphabet)
result = (_translate(r, gapped) for r in records)
SeqIO.write(result, outfile, "fasta")
@click.command()
@click.option('-g', '--gapped', is_flag=True, help='allow gaps')
def main(gapped):
translate(sys.stdin, sys.stdout, gapped)
if __name__ == "__main__":
main()
|
from selenium.webdriver.support.ui import Select
class ContactHelper:
def __init__(self, app):
self.app = app
def add_new_contact(self, contact):
wd = self.app.wd
# Нажимаем кнопку "Добавить контакт"
wd.find_element_by_link_text("add new").click()
# Заполняем поля
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").send_keys(contact.first_name)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").send_keys(contact.middle_name)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").send_keys(contact.last_name)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").send_keys(contact.title)
wd.find_element_by_name("theform").click()
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").send_keys(contact.tel_home)
wd.find_element_by_name("mobile").send_keys(contact.tel_mobile)
wd.find_element_by_name("work").send_keys(contact.tel_work)
wd.find_element_by_name("fax").send_keys(contact.tel_fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_name("email2").send_keys(contact.email2)
wd.find_element_by_name("email3").send_keys(contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
# работа с выпадающими списками
wd.find_element_by_name("bday").click()
Select(wd.find_element_by_name("bday")).select_by_visible_text("1")
wd.find_element_by_xpath("//option[@value='1']").click()
wd.find_element_by_name("bmonth").click()
Select(wd.find_element_by_name("bmonth")).select_by_visible_text("December")
wd.find_element_by_xpath("//option[@value='December']").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").send_keys("2000")
wd.find_element_by_name("aday").click()
Select(wd.find_element_by_name("aday")).select_by_visible_text("1")
wd.find_element_by_xpath("//div[@id='content']/form/select[3]/option[3]").click()
wd.find_element_by_name("amonth").click()
Select(wd.find_element_by_name("amonth")).select_by_visible_text("September")
wd.find_element_by_xpath("//div[@id='content']/form/select[4]/option[10]").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").send_keys("2010")
# Заполнение доп. полей
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").send_keys(contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").send_keys(contact.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").send_keys(contact.notes)
# sumbit contact
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.return_to_home_page()
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def delete_first_contact(self):
wd = self.app.wd
# select first group
wd.find_element_by_name("selected[]").click()
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to.alert.accept()
def modify(self, contact):
wd = self.app.wd
# select first group
wd.find_element_by_name("selected[]").click()
# click edit
wd.find_element_by_xpath("//img[@title='Edit']").click()
# modify contact
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").send_keys(contact.first_name)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").send_keys(contact.middle_name)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").send_keys(contact.last_name)
# click update
wd.find_element_by_name("update").click()
self.return_to_home_page()
|
# -*- coding: utf-8 -*-
# This example shows a simple calculation of SNOM contrast between two bulk materials: silicon and gold.
from numpy import *
from matplotlib.pyplot import *
from NearFieldOptics import Materials as Mat
from NearFieldOptics import TipModels as T
##########################################################
#--- 1. Compute SNOM contrast of silicon relative to gold:
# This uses `T.LightningRodModel` (the celebrated model
# for SNOM, c.f. Phys. Rev. B 90, 085136.) to solve the
# quasi-electrostatic scattering problem of a tip over
# a planar sample and compute the scattered field and
# demodulated "near-field signal".
##########################################################
#mid-IR frequencies (in units of cm^-1)
frequencies = linspace(700,1000,100)
#The call signature for `T.LightningRodModel` is explicated below,
# showing the meaning (in order) of the keyword arguments:
# T.LightningRodModel(frequency, rp, tip radius, number q pts,
# number z pts, tapping amplitude, normalization material, normalization frequency)
S_lay_Si = T.LightningRodModel(frequencies,rp=Mat.Si.reflection_p,a=30,Nqs=244,\
Nzs=40,amplitude=80,normalize_to=Mat.Au.reflection_p,normalize_at=1000)
##########################################################
#--- 2. Plot the results:
# The computed arrays are of type `ArrayWithAxes`
# and have a method `.plot(...)` which automatically
# plots the array against its intrinsic axes.
##########################################################
figure();
abs(S_lay_Si['signal_3']).plot()
#The result is implicitly normalized to the signal from gold,
# because of `normalize_to=...` in the calculation above.
ylabel('s_3(Si)/s_3(Au)')
show() |
def main():
import argparse
import re
import traceback
import requests
from dlinkscraper import DLink
parser = argparse.ArgumentParser(
'DuckDNS Updater',
description=
"""This script updates your DuckDNS IPv4 address to scraped address
from your D-Link router. Because in LTE routers, your visible public
IP doesn't always match with IP that is needed to access you,
we need to scrape it from router's admin page"""
)
parser.add_argument('--token', '-t', type=str, required=True, help='Your DuckDNS token')
parser.add_argument('--domain', '-d', type=str, required=True, help='Your DuckDNS domain')
parser.add_argument(
'--login', '-l', type=str, required=False, default='admin',
help="Login to your router. It's always 'admin', so, yeah, "
"you don't need to specify it...")
parser.add_argument(
'--password', '-p', type=str, required=True,
help="Password to your router's admin"
)
parser.add_argument(
'--router-url', '-u', type=str, required=False, default='http://192.168.1.1',
help="Base URL to you router. Usually something "
"like 'http://192.168.1.1' (that's default)")
parser.add_argument(
'--no-cache', action='store_true',
help="Don't cache and check last known IP. This is default behaviour, "
"as it won't ping DuckDNS every time - only when IP changed")
parser.add_argument(
'--cache-file', type=str, required=False, default='last_ip.txt',
help='Path to file where last known IP will be cached')
args = parser.parse_args()
dl = DLink(args.router_url)
print('Logging in to router...')
dl.login(args.login, args.password)
print('Getting router main page...')
dl.get_main_site()
print('Logging out...')
dl.logout()
# Check if it's actually valid IP
if dl.public_ip is None or not re.match(r'\d+\.\d+\.\d+\.\d+', dl.public_ip):
print('Got invalid IP from router! Exit!')
exit(-1)
print('IP from router: ' + dl.public_ip)
if not args.no_cache:
print('Checking last known IP...')
try:
with open(args.cache_file, 'r') as f:
saved_ip = f.read()
print('Last IP: ' + saved_ip)
except:
saved_ip = 'error'
print(f"Can't open cache file ({args.cache_file})")
traceback.print_exc()
if saved_ip == dl.public_ip:
print('Last IP was the same :) Exit.')
exit(0)
else:
print('IP changed!')
req = requests.get(
f'https://www.duckdns.org/update'
f'?domains={args.domain}'
f'&token={args.token}'
f'&ip={dl.public_ip}'
)
if req.ok and req.content.decode('utf-8') == 'OK':
print('Updating IP success :)')
if not args.no_cache:
print('Saving current IP for later...')
try:
with open(args.cache_file, 'w') as f:
f.write(dl.public_ip)
except:
print("Can't write cache file!")
traceback.print_exc()
print('Saving current IP success :)')
exit(0)
else:
print('Updating IP failed!')
exit(-1)
if __name__ == '__main__':
main()
|
import json
import re
import numpy as np
from fastapi import FastAPI, HTTPException
from .colours import random_hex
from .identifiers import (catalogue_id_to_miro_id, valid_catalogue_ids,
miro_id_to_identifiers, index_lookup)
from .neighbours import get_neighbour_ids, palette_index
from .palette_embedder import embed_hex_palette
# initialise API
app = FastAPI(
title='Palette Similarity',
description='Find similar images based on their colour, using approximate embeddings of euclidean distance in LAB space between 5-colour palettes',
docs_url='/palette-similarity/docs',
redoc_url='/palette-similarity/redoc'
)
# create API endpoints
@app.get('/works/{catalogue_id}')
def palette_similarity_by_catalogue_id(catalogue_id: str, n: int = 10):
catalogue_id = catalogue_id or np.random.choice(valid_catalogue_ids)
if catalogue_id not in valid_catalogue_ids:
raise HTTPException(status_code=404, detail="Invalid catalogue id")
miro_id = catalogue_id_to_miro_id[catalogue_id]
query_index = index_lookup[miro_id]
query_embedding = np.array(palette_index[query_index]).reshape(1, -1)
neighbour_ids = get_neighbour_ids(
query_embedding, n, skip_first_result=True)
return {
'original': miro_id_to_identifiers(miro_id),
'neighbours': [
miro_id_to_identifiers(miro_id)
for miro_id in neighbour_ids
]
}
@app.get('/palette')
def palette_similarity_by_palette(palette: list = None, n: int = 10):
if palette:
palette = json.loads(palette)
else:
palette = [random_hex() for _ in range(5)]
if len(palette) != 5:
raise HTTPException(
status_code=422,
detail='Palette must consist of 5 colours'
)
for colour in palette:
if not re.fullmatch(r'[A-Fa-f0-9]{6}', colour):
raise HTTPException(
status_code=422,
detail=f'{colour} is not a valid hex colour'
)
query_embedding = embed_hex_palette(palette)
neighbour_ids = get_neighbour_ids(query_embedding, n)
return {
'original': {
'palette': palette,
},
'neighbours': [
miro_id_to_identifiers(miro_id)
for miro_id in neighbour_ids
]
}
@app.get('/healthcheck')
def healthcheck():
return {'status': 'healthy'}
|
from pathlib import Path
# Define path constant at import time
PATH = Path(__file__).parent # Parent will fetch this files parent package
|
# Generated by Django 3.2.9 on 2021-12-31 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nezbank', '0002_auto_20211228_1743'),
]
operations = [
migrations.AlterField(
model_name='account',
name='rate',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Средств на счете'),
),
migrations.AlterField(
model_name='accounttype',
name='currency',
field=models.CharField(max_length=50, verbose_name='Валюта'),
),
migrations.AlterField(
model_name='accounttype',
name='value',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Курс'),
),
]
|
from selenium import webdriver
from sys import platform as _platform
import platform
driver = ""
opts = webdriver.ChromeOptions()
opts.add_argument('headless')
opts.add_argument('remote-debugging-port=9222')
if _platform.startswith("linux"):
driver = webdriver.Chrome(executable_path="webdrivers/chromedriver-linux", options=opts)
elif _platform == "darwin":
driver = webdriver.Chrome(executable_path="webdrivers/chromedriver-mac", options=opts)
elif _platform == "win32" or _platform == "win64":
driver = webdriver.Chrome(executable_path="webdrivers/chromedriver-windows.exe", options=opts)
# driver.close() |
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#####################################################
## rs400 advanced mode tutorial ##
#####################################################
# First import the library
import pyrealsense2 as rs
import time
import json
DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07"]
def find_device_that_supports_advanced_mode() :
ctx = rs.context()
ds5_dev = rs.device()
devices = ctx.query_devices();
for dev in devices:
if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids:
if dev.supports(rs.camera_info.name):
print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name))
return dev
raise Exception("No device that supports advanced mode was found")
try:
dev = find_device_that_supports_advanced_mode()
advnc_mode = rs.rs400_advanced_mode(dev)
print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")
# Loop until we successfully enable advanced mode
while not advnc_mode.is_enabled():
print("Trying to enable advanced mode...")
advnc_mode.toggle_advanced_mode(True)
# At this point the device will disconnect and re-connect.
print("Sleeping for 5 seconds...")
time.sleep(5)
# The 'dev' object will become invalid and we need to initialize it again
dev = find_device_that_supports_advanced_mode()
advnc_mode = rs.rs400_advanced_mode(dev)
print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")
# Get each control's current value
print("Depth Control: \n", advnc_mode.get_depth_control())
print("RSM: \n", advnc_mode.get_rsm())
print("RAU Support Vector Control: \n", advnc_mode.get_rau_support_vector_control())
print("Color Control: \n", advnc_mode.get_color_control())
print("RAU Thresholds Control: \n", advnc_mode.get_rau_thresholds_control())
print("SLO Color Thresholds Control: \n", advnc_mode.get_slo_color_thresholds_control())
print("SLO Penalty Control: \n", advnc_mode.get_slo_penalty_control())
print("HDAD: \n", advnc_mode.get_hdad())
print("Color Correction: \n", advnc_mode.get_color_correction())
print("Depth Table: \n", advnc_mode.get_depth_table())
print("Auto Exposure Control: \n", advnc_mode.get_ae_control())
print("Census: \n", advnc_mode.get_census())
#To get the minimum and maximum value of each control use the mode value:
query_min_values_mode = 1
query_max_values_mode = 2
current_std_depth_control_group = advnc_mode.get_depth_control()
min_std_depth_control_group = advnc_mode.get_depth_control(query_min_values_mode)
max_std_depth_control_group = advnc_mode.get_depth_control(query_max_values_mode)
print("Depth Control Min Values: \n ", min_std_depth_control_group)
print("Depth Control Max Values: \n ", max_std_depth_control_group)
# Set some control with a new (median) value
current_std_depth_control_group.scoreThreshA = int((max_std_depth_control_group.scoreThreshA - min_std_depth_control_group.scoreThreshA) / 2)
advnc_mode.set_depth_control(current_std_depth_control_group)
print("After Setting new value, Depth Control: \n", advnc_mode.get_depth_control())
# Serialize all controls to a Json string
serialized_string = advnc_mode.serialize_json()
print("Controls as JSON: \n", serialized_string)
as_json_object = json.loads(serialized_string)
# We can also load controls from a json string
# The C++ JSON parser requires double-quotes for the json object so we need
# to replace the single quote of the pythonic json to double-quotes
json_string = str(as_json_object).replace("'", '\"')
advnc_mode.load_json(json_string)
except Exception as e:
print(e)
pass
|
LINKS = {
"GB_SAC_shape_file": "https://data.jncc.gov.uk/"
+ "data/52b4e00d-798e-4fbe-a6ca-2c5735ddf049/"
+ "GB-SAC-OSGB36-20190403.zip",
# All below from:
# https://environment.data.gov.uk/ecology/explorer/downloads/
# Data documentation (must read!) is here:
# https://environment.data.gov.uk/ecology/explorer/docs/
"NFPD_FWfish_counts": "https://environment.data.gov.uk/"
+ "ecology/explorer/downloads/FW_Fish_Counts.zip",
"NFPD_FWfish_banded_measurements": "https://environment"
+ ".data.gov.uk/ecology/explorer/downloads/"
+ "FW_Fish_Banded_Measurements.zip",
"NFPD_FWfish_bulk_measurements": "https://environment"
+ ".data.gov.uk/ecology/explorer/downloads/"
+ "FW_Fish_Bulk_Measurements.zip",
"NFPD_FWfish_data_types": "https://environment.data.gov.uk/"
+ "ecology/explorer/downloads/FW_Fish_Data_Types.zip",
"Biosys_FWriver_macroinvertebrates": "https://environment"
+ ".data.gov.uk/ecology/explorer/downloads/INV_OPEN_DATA.zip",
"Biosys_FWriver_macrophytes": "https://environment.data.gov"
+ ".uk/ecology/explorer/downloads/MACP_OPEN_DATA.zip",
"Biosys_FWriver_diatoms": "https://environment.data.gov.uk/"
+ "ecology/explorer/downloads/DIAT_OPEN_DATA.zip",
"Biosys_FWriver_taxon_info": "https://environment.data.gov"
+ ".uk/ecology/explorer/downloads/OPEN_DATA_TAXON_INFO.zip",
}
UNZIPPED_FILES = {
"GB_SAC_shape_file": [
"GB_SAC_OSGB36_20191031.shp",
"GB_SAC_OSGB36_20191031.shx",
],
"NFPD_FWfish_counts": ["FW_Fish_Counts.csv"],
"NFPD_FWfish_banded_measurements": [
"FW_Fish_Banded_Measurements.csv",
],
"NFPD_FWfish_bulk_measurements": [
"FW_Fish_Bulk_Measurements.csv",
],
"NFPD_FWfish_data_types": [
"FW_Fish_Data_Types.csv",
],
"Biosys_FWriver_macroinvertebrates": [
"INV_OPEN_DATA_METRICS.csv",
"INV_OPEN_DATA_SITE.csv",
"INV_OPEN_DATA_TAXA.csv",
],
"Biosys_FWriver_macrophytes": [
"MACP_OPEN_DATA_METRICS.csv",
"MACP_OPEN_DATA_SITE.csv",
"MACP_OPEN_DATA_TAXA.csv",
],
"Biosys_FWriver_diatoms": [
"DIAT_OPEN_DATA_METRICS.csv",
"DIAT_OPEN_DATA_SITE.csv",
"DIAT_OPEN_DATA_TAXA.csv",
],
"Biosys_FWriver_taxon_info": [
"OPEN_DATA_TAXON_INFO.csv",
],
}
|
from .shelves import LSMShelf, Shelf
from .dict import LSMDict
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python3
"""
"""
__all__ = ['issquare']
def issquare(n):
"""
:param n:
:return:
>>> issquare(256)
True
>>> issquare(255)
False
"""
i = 1
while n > 0:
n -= i
i += 2
return n == 0
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import password_strength as pwd
from django.core.validators import BaseValidator
from django.utils.translation import gettext, ungettext_lazy
class PolicyBaseValidator(BaseValidator):
def js_requirement(self):
return {}
class PolicyMinLengthValidator(PolicyBaseValidator):
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_value'
def __init__(self, *args, **kwargs):
super(PolicyMinLengthValidator, self).__init__(*args, **kwargs)
def clean(self, value):
return pwd.PasswordStats(value).length
def compare(self, value, limit_value):
return value < limit_value
def js_requirement(self):
return {'minlength': {
'minLength': self.limit_value,
'text': gettext('be at least minLength characters long'),
}}
class PolicyContainSpecialCharsValidator(PolicyBaseValidator):
message = ungettext_lazy(
'Your input should contain at least %(limit_value)d special character (it has %(show_value)d).',
'Your input should contain at least %(limit_value)d special characters (it has %(show_value)d).',
'limit_value')
code = 'special_length'
def __init__(self, *args, **kwargs):
super(PolicyContainSpecialCharsValidator, self).__init__(*args, **kwargs)
def clean(self, value):
return pwd.PasswordStats(value).special_characters
def compare(self, value, limit_value):
return value < limit_value
def js_requirement(self):
return {'containSpecialChars': {
'minLength': self.limit_value,
'text': gettext('Your input should contain at least minLength special character'),
'regex': "([^!%&@#$^*?_~])",
'regex_flags': 'g'
}}
class PolicyContainLowercaseValidator(PolicyBaseValidator):
message = ungettext_lazy(
'Your input should contain at least %(limit_value)d lower case character (it has %(show_value)d).',
'Your input should contain at least %(limit_value)d lower case characters (it has %(show_value)d).',
'limit_value')
code = 'lowercase_length'
def __init__(self, *args, **kwargs):
super(PolicyContainLowercaseValidator, self).__init__(*args, **kwargs)
def clean(self, value):
return pwd.PasswordStats(value).letters_lowercase
def compare(self, value, limit_value):
return value < limit_value
def js_requirement(self):
return {'containLowercase': {
'minLength': self.limit_value,
'regex': '[^a-z]',
'regex_flags': 'g',
'text': gettext("Your input should contain at least minLength lower case character")
}}
class PolicyContainUppercaseValidator(PolicyBaseValidator):
message = ungettext_lazy(
'Your input should contain at least %(limit_value)d upper case character (it has %(show_value)d).',
'Your input should contain at least %(limit_value)d upper case characters (it has %(show_value)d).'
'limit_value')
code = 'uppercase_length'
def __init__(self, *args, **kwargs):
super(PolicyContainUppercaseValidator, self).__init__(*args, **kwargs)
def clean(self, value):
return pwd.PasswordStats(value).letters_uppercase
def compare(self, value, limit_value):
return value < limit_value
def js_requirement(self):
return {'containUppercase': {
'minLength': self.limit_value,
'regex': '[^A-Z]',
'regex_flags': 'g',
'text': gettext("Your input should contain at least minLength upper case character")
}}
class PolicyContainNumbersValidator(PolicyBaseValidator):
message = ungettext_lazy(
'Your input should contain at least %(limit_value)d number (it has %(show_value)d).',
'Your input should contain at least %(limit_value)d numbers (it has %(show_value)d).',
'limit_value')
code = 'number_length'
def __init__(self, *args, **kwargs):
super(PolicyContainNumbersValidator, self).__init__(*args, **kwargs)
def clean(self, value):
return pwd.PasswordStats(value).numbers
def compare(self, value, limit_value):
return value < limit_value
def js_requirement(self):
return {'containNumbers': {
'minLength': self.limit_value,
'regex': '[^0-9]',
'regex_flags': 'g',
'text': gettext("Your input should contain at least minLength number")
}}
|
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
def remove_stopwords(words):
return [word for word in words if word not in stopwords.words('english')]
def lemmatize(words):
words = [WordNetLemmatizer().lemmatize(word, pos='n') for word in words]
words = [WordNetLemmatizer().lemmatize(word, pos='v') for word in words]
words = [WordNetLemmatizer().lemmatize(word, pos='a') for word in words]
return words
def tokenize_twitter(text):
return TweetTokenizer().tokenize(text)
def tokenize(text):
return remove_stopwords(lemmatize(tokenize_twitter(text))) |
#!/usr/bin/env python
# coding: utf-8
import csv
import random
import datetime
# Global data parameters
product_data = 'product_data.txt'
prod_availability = [[0.8, 0.85, 0.7, 0.6], [0.6, 0.75, 0.98], [0.85, 0.6], 1]
street_data = ['Athens_streets.txt', 'Thessaloniki_streets.txt', 'Volos_streets.txt']
cities = ['Athens', 'Thessaloniki', 'Volos']
city_weights = [5, 3, 2]
postal_codes = [range(11850,11860), range(54620,54625), range(38222,38225)]
aisles = 5
shelves = 3
online_store = 10
stores_by_loc = [[1,2,3,4],[5,6,7],[8,9]]
working_hours = [*[[8,21]]*5, [9,20]]
working_hours_str = ['Mon-Fri: 08-21, Sat: 09-20, Sun: CLOSED', 'Mon-Sun: OPEN 24h']
area = [[400, 500, 320, 250], [240, 350, 1000], [500, 250], 'NULL']
first_reg_date = datetime.date(2019,1,1)
last_reg_date = datetime.date(2020,5,1)
first_birth_date = datetime.date(1945,1,1)
last_birth_date = datetime.date(2000,1,1)
first_price_date = datetime.date(2019,1,2)
last_price_date = datetime.date(2020,6,1)
max_price_changes = 10
reg_customers = 200
unreg_customers = 100
# Import product data from product_data.txt
# Products belonging to different categories are separated by an empty line
# and a line '===== {Category_name} ====='
product_name = []
product_price = []
product_category = []
cat = 0
fp = open(product_data, 'r')
line = fp.readline()
while line:
if line != '\n':
tokens = line.split(',')
if (tokens[0][0] != '='):
product_category.append(cat)
product_name.append(tokens[0])
product_price.append(float(tokens[1]))
else:
cat += 1
line = fp.readline()
fp.close()
# random.sample returns unique elements
# barcodes need to be sorted so that product weights remain valid
product_barcode = sorted(random.sample(range(10**12,10**13),len(product_name)))
# Read street names from txt files and save them in a dictionary
city_dict = {}
for street_file,city,ps in zip(street_data, cities, postal_codes):
city_dict[city] = {}
with open(street_file,'r') as f:
city_dict[city]['Streets'] = []
line = f.readline()
while(line):
if line:
city_dict[city]['Streets'].append(line.replace(' \n',''))
line = f.readline()
city_dict[city]['Postal_codes'] = ps
def generate_random_datetime(start_datetime, end_datetime):
date_delta = (end_datetime - start_datetime).days
time_delta = (end_datetime - start_datetime).seconds
if not date_delta:
random_date = start_datetime + datetime.timedelta(seconds = random.randrange(time_delta))
elif not time_delta:
random_date = start_datetime + datetime.timedelta(days = random.randrange(date_delta))
else:
random_date = start_datetime + datetime.timedelta(days = random.randrange(date_delta),
seconds = random.randrange(time_delta))
return random_date
def generate_random_sorted_dates(start_date, end_date, count):
days_between_dates = (end_date - start_date).days
# Make sure that "count" random days can be returned
if (end_date - start_date).days < count:
return False
# Generate random dates until the number of unique random dates equals to count"
while True:
random_dates = []
for i in range(count):
random_dates.append(generate_random_datetime(start_date, end_date))
sorted_unique_dates = sorted(set(random_dates))
if len(sorted_unique_dates) == count:
return sorted_unique_dates
# Generate a timestamp for a given date at which a transaction is possible
# The online store (store_id == 10) is open 24/7
# The physical stores are open Mon-Fri: 8-21 and Sat: 9-20
def generate_random_working_datetime(date, store):
if store == online_store:
time = datetime.time(hour = random.randrange(0,23), minute = random.randrange(0,60),
second = random.randrange(0,60))
else:
day = date.weekday()
if day < len(working_hours):
opening = working_hours[day][0]
closing = working_hours[day][1]
time = datetime.time(hour = random.randrange(opening,closing-1), minute = random.randrange(0,60),
second = random.randrange(0,60))
else:
raise ValueError('The chosen store is closed at the given date')
return datetime.datetime.combine(date, time)
# Generate a pseudo-random price history consisting of a number of changes
def generate_price_history(start_price, changes):
price_hist_arr = [start_price]
for i in range(changes):
price_hist_arr.append(round(price_hist_arr[i]*random.uniform(0.8, 1.2),2))
if (random.random() < 0.1):
price_hist_arr[-1] = "NULL"
return price_hist_arr
# Retrieve the product price that was valid at the date the transaction occured
def get_current_price(price_info_dict, shop_date):
for pr,sd,ed in zip(price_info_dict['Price'], price_info_dict['Start_date'], price_info_dict['End_date']):
if sd <= shop_date:
if ed == 'NULL':
return pr
elif ed > shop_date:
return pr
raise ValueError('The product had no valid price at the given date')
# Choose the next store and shooping according to the shopping profile of each customer
# freq represents how often a customer shops on average
# store_pool are the stores the customer shops from
def get_next_date_store(previous_date, freq, store_pool, store_weight):
new_date = previous_date + datetime.timedelta(days = random.randrange(max(0,freq-2),freq+2))
chosen_store = random.choices(store_pool, weights = store_weight)[0]
while True:
# Online store is open every weekday and transactions can be performed
# Physical stores are closed on Sundays (weekday 6)
if chosen_store == online_store: # Accept every new date
shop_date = new_date
return shop_date, chosen_store
else:
if new_date.weekday() < len(working_hours): # Accept all weekdays except Sunday
shop_date = new_date
return shop_date, chosen_store
new_date = previous_date + datetime.timedelta(days = random.randrange(max(0,freq-2),freq+2))
# Generate pseudo-random products for a specific transaction using the fuctions specified above
# average_items shows the average number of items a specific customer buys
# prod_weight shows the customer's preference for the offered products
def generate_items(barcodes, average_items, price_dict, shop_date, prod_weight):
transaction_items = random.randint(max(1,average_items-5),average_items+5)
chosen_products = random.choices(barcodes, k = transaction_items, weights = prod_weight)
chosen_prod_dict = dict((x, dict(quantity = chosen_products.count(x))) for x in set(chosen_products))
for b,info in chosen_prod_dict.items():
info['price'] = get_current_price(price_dict[b], shop_date)
# Keep only the products that have a valid price at transaction date
temp_dict = {}
for b,info in chosen_prod_dict.items():
if info['price'] != 'NULL':
temp_dict[b] = info
return temp_dict
# Generete a shopping profile for each customer
def generate_prod_weights(shop_profile, pet, all_barcodes, barcodes_in_store):
# Profile: Fit single
# Shops mainly fresh and refrigerated products, some personal care and less liquor and homeware
if shop_profile < 0.15:
prod_weight = [10]*37 + [8]*32 + [2]*26 + [4]*17 + [1]*18
# Profile: Lazy single
# Shops mainly refrigerated and personal care, some vegetables and liquor and less homeware
elif shop_profile < 0.4:
prod_weight = [4]*37 + [8]*32 + [3]*26 + [6]*17 + [1]*18
# Profile: Family guy
# Shops mainly fresh and refrigerated products and personal care, some liquor and homeware
else:
prod_weight = [10]*37 + [8]*32 + [2]*26 + [7]*17 + [2]*18
if pet == 'dog':
prod_weight += [5]*4 + [0]*8
elif pet == 'cat':
prod_weight += [0]*4 + [5]*5 + [0]*3
elif pet == 'parrot':
prod_weight += [0]*9 + [5]*3
else:
prod_weight += [0]*12
# Keep only the weights for the products sold in the chosen store
weights_in_store = []
for b,w in zip(all_barcodes,prod_weight):
if b in barcodes_in_store:
weights_in_store.append(w)
return weights_in_store
# Define the number and ID of the stores a specific customer shops from
def generate_store_preference(store_profile):
stores = 0
for sub_arr in stores_by_loc:
stores += len(sub_arr)
if store_profile < 0.55: # Customer shops from only one physical store
store_pref = [random.randint(stores_by_loc[0][0],stores_by_loc[-1][-1])]
store_prob = [1]
elif store_profile < 0.7: # Customer shops from one physical store and online
store_pref = [random.randint(stores_by_loc[0][0],stores_by_loc[-1][-1]), online_store]
rand = random.uniform(0.7,0.95)
store_prob = [rand, 1-rand]
elif store_profile < 0.8: # Customer shops from 2 physical stores
store_pref = random.sample(stores_by_loc[random.randint(0,len(stores_by_loc)-1)], k = 2)
rand = random.uniform(0.7,0.95)
store_prob = [rand, 1-rand]
elif store_profile < 0.9: # Customer shops from 2 physical stores + online
store_pref = [*random.sample(stores_by_loc[random.randint(0,len(stores_by_loc)-1)], k = 2), online_store]
rand_1 = random.uniform(0.6, 0.8)
rand_2 = random.uniform(0.05, 0.15)
store_prob = [rand_1, rand_2, 1 - rand_1 - rand_2]
else: # Customer shops only online
store_pref = [online_store]
store_prob = [1]
return store_pref, store_prob
def generate_random_address(city):
address_dict = {}
address_dict['Street'] = random.choice(city_dict[city]['Streets'])
address_dict['Number'] = random.randint(1,200)
address_dict['Postal_code'] = random.choice(city_dict[city]['Postal_codes'])
address_dict['City'] = city
return address_dict
# Save store information in a dictionary
# Add information for physical stores
store_dict = {}
for store_team, area_team, city in zip(stores_by_loc, area, cities):
for s,a in zip(store_team, area_team):
store_dict[s] = {}
store_dict[s]['Area'] = a
store_dict[s]['Opening_hours'] = working_hours_str[0]
store_dict[s]['Address'] = generate_random_address(city)
# Add information for the online store, assuming it is co-located with the first store
store_dict[online_store] = {}
store_dict[online_store]['Area'] = 'NULL'
store_dict[online_store]['Opening_hours'] = working_hours_str[1]
store_dict[online_store]['Address'] = store_dict[stores_by_loc[0][0]]['Address']
# Information for the price history of a product
price_hist_dict = {
b: {
'Price':[p],
'Start_date':[first_reg_date],
'End_date':[]
} for b,p in zip(product_barcode, product_price)
}
for key, val in price_hist_dict.items():
price_change_dates = generate_random_sorted_dates(first_price_date,last_price_date,
random.randint(0,max_price_changes))
price_hist = generate_price_history(val['Price'][0], len(price_change_dates))
price_hist_dict[key]['Price'] = price_hist
price_hist_dict[key]['Start_date'] += price_change_dates
price_hist_dict[key]['End_date'] = price_change_dates + ['NULL']
# Generate pseudo-random information for many customers and save them to a dictionary
while True:
card_id = [random.randint(10**7, 10**8) for i in range(reg_customers)]
if len(card_id) == len(list(set(card_id))):
break
customer_name = ['Customer-{}'.format(i) for i in range(reg_customers)]
customer_sex = ['M' if random.random() < 0.5 else 'F' for i in range(reg_customers)]
reg_date = [generate_random_datetime(first_reg_date, last_reg_date) for i in range(reg_customers)]
customer_dob=[generate_random_datetime(first_birth_date,last_birth_date) for i in range(reg_customers)]
pet = []
for i in range(reg_customers):
rand = random.random()
if rand < 0.2:
pet.append('dog')
elif rand < 0.3:
pet.append('cat')
elif rand < 0.35:
pet.append('parrot')
else:
pet.append('NULL')
customer_dict = {
card: {
'Name': n,
'Sex': s,
'Points': 0,
'Registration_date': d,
'Pet': p,
'DoB': dob,
} for (card,n,s,d,p,dob) in zip(card_id,
customer_name,
customer_sex,
reg_date, pet,
customer_dob
)
}
offers_dict = {}
for store_team, avail_team in zip(stores_by_loc, prod_availability):
for s,av in zip(store_team, avail_team):
offers_dict[s] = {}
for b in random.sample(product_barcode, int(len(product_barcode)*av)):
offers_dict[s][b] = {
'Aisle': random.randint(1,aisles),
'Shelf': random.randint(1,shelves)
}
offers_dict[online_store] = {}
for b in random.sample(product_barcode, int(len(product_barcode)*prod_availability[-1])):
offers_dict[online_store][b] = {
'Aisle': 'NULL',
'Shelf': 'NULL'
}
# Generate pseudo-random transactions for all customers taking into account each customers profile
# (shop_freq, average_items, pet, shop_profile, payment_profile, store_pref, store_prob)
# as well as the availability of products at the chose store the chosen date
# and the price at the transaction date
transaction_dict = {}
for i in range(reg_customers):
shop_freq = random.randint(2,8)
average_items = random.randint(5,20)
customer_id = card_id[i]
reg_date = customer_dict[customer_id]['Registration_date']
pet = customer_dict[customer_id]['Pet']
shop_profile = random.random()
payment_profile = random.random()
store_pref, store_prob = generate_store_preference(random.random())
points = 0 # 1 point is given for 3 euros spent (no rewards are assumed)
# Start from registration date (or the next few days) and until last_price_date
shop_date, chosen_store = get_next_date_store(reg_date, 0, store_pref, store_prob)
while shop_date < last_price_date:
transaction_id = random.randint(10**12, 10**13)
payment_method = 'cash' if random.random() < payment_profile else 'credit_card'
barcode_in_store = sorted(list(offers_dict[chosen_store].keys()))
barcode_weight = generate_prod_weights(shop_profile, pet, product_barcode, barcode_in_store)
shopped_prod = generate_items(barcode_in_store, average_items, price_hist_dict, shop_date, barcode_weight)
amount = 0
quantity = []
for key,val in shopped_prod.items():
amount += val['price'] * val['quantity']
quantity.append(val['quantity'])
points += int(amount/3)
total_pieces = sum(quantity)
# Save all information on a dictionary
transaction_dict[transaction_id] = {
'timestamp': generate_random_working_datetime(shop_date, chosen_store),
'store_id': chosen_store,
'card_id': customer_id,
'payment_method': payment_method,
'products': list(shopped_prod.keys()),
'quantity': quantity,
'total_amount': amount,
'total_pieces': total_pieces
}
shop_date, chosen_store = get_next_date_store(shop_date, shop_freq, store_pref, store_prob)
customer_dict[customer_id]['Points'] = points
# The store a customer mainly shops from shows the place they live in
# Assume weighted distribution for customers who shop only online
if store_pref[0] < 5:
customer_dict[customer_id]['Address'] = generate_random_address('Athens')
elif store_pref[0] < 8:
customer_dict[customer_id]['Address'] = generate_random_address('Thessaloniki')
elif store_pref[0] < 10:
customer_dict[customer_id]['Address'] = generate_random_address('Volos')
else:
customer_dict[customer_id]['Address'] = generate_random_address(random.choices(['Athens','Thessaloniki','Volos'],
weights = [5, 3, 2],
k = 1)[0])
# Repeat the previous procedure, but assume that the customers are not registered and have no personal card
# As a result no customer information can be kept
# Assume that they have similar habits with registered customers
for i in range(unreg_customers):
shop_freq = random.randint(2,8)
average_items = random.randint(5,20)
reg_date = first_reg_date
pet = customer_dict[card_id[i]]['Pet']
shop_profile = random.random()
store_pref, store_prob = generate_store_preference(random.random())
shop_date, chosen_store = get_next_date_store(reg_date, 0, store_pref, store_prob)
while shop_date < last_price_date:
transaction_id = random.randint(10**12, 10**13)
payment_method = 'cash' if random.random() < 0.4 else 'credit_card'
barcode_in_store = sorted(list(offers_dict[chosen_store].keys()))
barcode_weight = generate_prod_weights(shop_profile, pet, product_barcode, barcode_in_store)
shopped_prod = generate_items(barcode_in_store, average_items, price_hist_dict, shop_date, barcode_weight)
amount = 0
quantity = []
for key,val in shopped_prod.items():
amount += val['price'] * val['quantity']
quantity.append(val['quantity'])
total_pieces = sum(quantity)
transaction_dict[transaction_id] = {
'card_id': 'NULL',
'timestamp': generate_random_working_datetime(shop_date, chosen_store),
'store_id': chosen_store,
'payment_method': payment_method,
'products': list(shopped_prod.keys()),
'quantity': quantity,
'total_amount': amount,
'total_pieces': total_pieces
}
shop_date, chosen_store = get_next_date_store(shop_date, shop_freq, store_pref, store_prob)
# Save all the generated data at csv files so it can be imported to the SQL database
with open('csv_files/store.csv','w') as store_csv:
store_writer = csv.writer(store_csv, delimiter=',', quotechar='"')
store_writer.writerow(['store_id','area','street_name','street_number','postal_code','city','opening_hours'])
for key,val in store_dict.items():
store_writer.writerow([key, val['Area'], val['Address']['Street'], val['Address']['Number'],
val['Address']['Postal_code'], val['Address']['City'], val['Opening_hours']])
with open('csv_files/product.csv', 'w') as data_csv:
data_writer = csv.writer(data_csv, delimiter=',', quotechar='"')
data_writer.writerow(['barcode','name','label','current_price','category_id'])
for c,b,n in zip(product_category, product_barcode, product_name):
p = price_hist_dict[b]['Price'][-1]
data_writer.writerow([b,n,random.randint(0,1),p,c])
with open('csv_files/price.csv', 'w') as hist_csv:
hist_writer = csv.writer(hist_csv, delimiter = ',', quotechar = '"')
hist_writer.writerow(['barcode', 'start_date', 'end_date', 'amount'])
for key,val in price_hist_dict.items():
for i in range(len(val['Price'])):
hist_writer.writerow([key, val['Start_date'][i], val['End_date'][i], val['Price'][i]])
with open('csv_files/customer.csv', 'w') as customer_csv:
customer_writer = csv.writer(customer_csv, delimiter = ',', quotechar = '"')
customer_writer.writerow(['card_id','name','reg_date','points','pet','sex','date_of_birth','street',
'number','postal_code','city'])
for key,val in customer_dict.items():
customer_writer.writerow([key, val['Name'], val['Registration_date'], val['Points'], val['Pet'],
val['Sex'], val['DoB'], val['Address']['Street'], val['Address']['Number'],
val['Address']['Postal_code'], val['Address']['City']])
with open('csv_files/offers_products.csv', 'w') as offers_csv:
offers_writer = csv.writer(offers_csv, delimiter = ',', quotechar = '"')
offers_writer.writerow(['store_id','barcode','aisle','shelf'])
for store,loc_dict in offers_dict.items():
for barcode,val in loc_dict.items():
offers_writer.writerow([store, barcode, val['Aisle'], val['Shelf']])
with open('csv_files/transaction.csv', 'w') as transaction_csv:
transaction_writer = csv.writer(transaction_csv, delimiter = ',', quotechar = '"')
transaction_writer.writerow(['transaction_id','total_amount','payment_method','timestamp','total_pieces',
'store_id','card_id'])
for key,val in transaction_dict.items():
transaction_writer.writerow([key, '{:.2f}'.format(val['total_amount']), val['payment_method'],
val['timestamp'], val['total_pieces'], val['store_id'], val['card_id']])
with open('csv_files/buy_products.csv', 'w') as buy_csv:
buy_writer = csv.writer(buy_csv, delimiter = ',', quotechar = '"')
buy_writer.writerow(['transaction_id','barcode','quantity'])
for key,val in transaction_dict.items():
for b,q in zip(val['products'], val['quantity']):
buy_writer.writerow([key, b, q])
category_name = ['Fresh products','Refrigerated products','Liquor','Personal care','Homeware','Pet products']
with open('csv_files/categories.csv', 'w') as categ_csv:
categ_writer = csv.writer(categ_csv, delimiter = ',', quotechar = '"')
categ_writer.writerow(['category_id','category_name'])
for cid,cname in zip(range(1,7),category_name):
categ_writer.writerow([cid, cname]) |
#!env python
from functools import wraps
import time
import collections
import threading
import enum
import json
from threading import Event
from threading import Lock
from itertools import count
import numpy as np
import logging
import queue
from threading import RLock as Lock
import argparse
import exceptions
#############
## Globals ##
#############
TIMEOUT_IN_SECONDS = None #600
POLL_SLEEP_MS = 100
REPORT_INTERVAL = 1
PLACEMENT_PERIOD = 2
DUMMY_EXEC_LATENCY = 0.000
DUMMY_LOAD_LATENCY = 0.000
DUMMY_UNLOAD_LATENCY = 0
KEEP_ALIVE_IN_SECONDS = 2
PLACEMENT_POLL_INTERVAL = 0.1
MODEL_INFO_FRESHNESS = 5.
#############
#############
##############
## Prefixes ##
##############
KEYSPACE_PREFIX = "__keyspace@0__:"
DB_FIELD_SEPARATOR = ":"
UPDATING_FLAG_NAME = "updating_metrics"
PLACEMENT_REQUEST_KEY = "requested-placements"
WORKER_URL_KEY = "workers"
MODEL_NAME_KEY = "models"
MODEL_PLACEMENT_PREFIX = f"model-placement{DB_FIELD_SEPARATOR}"
WORKER_PREFIX = f"worker{DB_FIELD_SEPARATOR}"
MODEL_PREFIX = f"model{DB_FIELD_SEPARATOR}"
STAT_PREFIX = f"statistic{DB_FIELD_SEPARATOR}"
WORKER_STAT_PREFIX = f"{STAT_PREFIX}worker{DB_FIELD_SEPARATOR}"
MODEL_STAT_PREFIX = f"{STAT_PREFIX}model{DB_FIELD_SEPARATOR}"
##############
##############
def getLogger(identifier=None, hide_debug=False):
#return logging.getLogger()
logFormatter = logging.Formatter(f"%(threadName)s %(asctime)s [%(levelname)-5.5s] %(message)s")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger = logging.getLogger()
logger.addHandler(consoleHandler)
if hide_debug:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
logger.setLevel(logging.WARNING)
return logger
def getParser(add_help=True, *args, **kwargs):
parser = argparse.ArgumentParser(add_help=add_help)
parser.add_argument('--redis_server', default="redis-server",
help='Redis Server name')
parser.add_argument('--redis_port', default=6379, type=int,
help='Redis Server port')
parser.add_argument('--workload_file', default="workload/workload.txt")
parser.add_argument('--model_description_file', default="workload/models.json")
parser.add_argument('--rng_seed', default=None, type=int,
help="RNG Seed. Default is no seed.")
## Options that may affect multiple containers
parser.add_argument('--max_concurrent_models', default=2, type=int,
help="Maximum number of models that can be loaded at once")
parser.add_argument('--num_workers_to_add', default=1, type=int,
help="Number of workers to add to the system")
parser.add_argument('--worker_memory', default=100, type=float,
help="Amount of memory on each worker. (In reality would be per-worked, but this is for the simulation.)")
parser.add_argument('--dummy_load', action="store_true",
help="Set to true to use a dummy load, with no loading or unloading of models or inferences.")
return parser
def fixWorkerURL(worker_url):
if not ":" in worker_url:
worker_url += ":50051"
return worker_url
def stripWorkerURL(worker_url):
if ":" in worker_url:
worker_url = worker_url[:worker_url.index(':')]
return worker_url
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
logging.info(f"TIMING: {f.__name__}({[str(i)[:100] for i in args], kw}): {te-ts}s")
return result
return wrap
def gather_info(f):
@wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
logging.info(f"STATISTICS: {f.__name__}({[str(i)[:100] for i in args], kw}): {te-ts}s")
return result
return wrap
def getData(img=None, format="FORMAT_NCHW", dtype=np.float32, c=3, h=224, w=224, scaling="INCEPTION", model="resnet"):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
#np.set_printoptions(threshold='nan')
from PIL import Image
if img is None:
img = Image.open("mug.jpg")
if c == 1:
sample_img = img.convert('L')
else:
sample_img = img.convert('RGB')
resized_img = sample_img.resize((w, h), Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:,:,np.newaxis]
typed = resized.astype(dtype)
if scaling == 'INCEPTION':
scaled = (typed / 128) - 1
elif scaling == 'VGG':
if c == 1:
scaled = typed - np.asarray((128,), dtype=dtype)
else:
scaled = typed - np.asarray((123, 117, 104), dtype=dtype)
else:
scaled = typed
# Swap to CHW if necessary
#if format == model_config.ModelInput.FORMAT_NCHW:
ordered = np.transpose(scaled, (2, 0, 1))
#else:
# ordered = scaled
if model == "inception":
ordered = np.transpose(ordered, (2, 1, 0))
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return json.dumps([[ordered.tolist()]])
def getModelInfo(db=None, json_file="workload/models.json", ):
with open(json_file) as json_fid:
model_stats = json.load(json_fid)
#logging.debug(f"model_stats: {model_stats}")
model_descriptions = {}
for model_dict in model_stats:
logging.debug(f"model_dict: {model_dict}")
model_name = model_dict["name"]
avg_exec_latency = model_dict["avg_exec_latency"]
avg_load_latency = model_dict["avg_load_latency"]
avg_unload_latency = model_dict["avg_unload_latency"]
loaded_size = model_dict["loaded_size"]
model_descriptions[model_name] = {
"load_latency" : avg_load_latency,
"exec_latency" : avg_exec_latency,
"unload_latency" : avg_unload_latency,
"loaded_size" : loaded_size,
}
if db is not None:
logging.debug("Setting model info")
db.set(f"{MODEL_STAT_PREFIX}{model_name}{DB_FIELD_SEPARATOR}avg_exec_latency", avg_exec_latency)
db.set(f"{MODEL_STAT_PREFIX}{model_name}{DB_FIELD_SEPARATOR}avg_load_latency", avg_load_latency)
db.set(f"{MODEL_STAT_PREFIX}{model_name}{DB_FIELD_SEPARATOR}avg_unload_latency", avg_unload_latency)
db.set(f"{MODEL_STAT_PREFIX}{model_name}{DB_FIELD_SEPARATOR}loaded_size", loaded_size)
else:
logging.debug("Not setting any model info")
return model_descriptions
class InferenceRequest(object):
_ids = count(1)
def __init__(self, model_name, data, id_num=None, allow_timeout=True):
if id_num is None:
self.id = next(self._ids)
else:
self.id = id_num
self.model_name = model_name
self.data = data
self.allow_timeout = allow_timeout
self.complete = Event()
#self.entry_time = time.time()
self.times = {
"entry_time" : time.time(),
"assignment_time" : 0.,
"execution_time" : 0.,
"completion_time" : 0.,
}
self.model_miss = False
self.response = None
#def __getattribute__(self, name):
# if object.__getattribute__(self, "allow_timeout"):
# if object.__getattribute__(self, "times")["entry_time"] + TIMEOUT_IN_SECONDS < time.time():
# raise exceptions.RequestTimeoutException("InferenceRequest invalid (timeout)")
# return object.__getattribute__(self, name)
def __repr__(self):
return f"<{self.__class__.__name__}: {self.id}, {self.model_name}, \"{self.response}\">"
def __str__(self):
return repr(self)
def isTimedOut(self):
return time.time() >= self.times["entry_time"] + TIMEOUT_IN_SECONDS
def toJSONString(self):
attr_dict = {
"id" : self.id,
"allow_timeout": self.allow_timeout,
"model_name" : self.model_name,
"data" : self.data,
"times" : self.times,
"response" : self.response,
"model_miss" : self.model_miss
}
return json.dumps(attr_dict)
@classmethod
def fromJSONString(cls, json_str):
attr_dict = json.loads(json_str)
new_request = cls(
model_name=attr_dict["model_name"],
data=attr_dict["data"],
id_num=attr_dict["id"],
allow_timeout=attr_dict["allow_timeout"],
)
new_request.times = attr_dict["times"]
new_request.response = attr_dict["response"]
new_request.model_miss = attr_dict["model_miss"]
return new_request
def markAssignment(self):
self.times["assignment_time"] = time.time()
def markExecution(self):
self.times["execution_time"] = time.time()
def markCompletion(self):
self.times["completion_time"] = time.time()
def markModelMiss(self):
self.model_miss = True
def mergeRequests(self, other):
self.times = other.times
self.response = other.response
def getResponse(self):
response_dict = {
"model" : self.model_name,
"response" : self.response,
"placement_delay" : self.times["assignment_time"] - self.times["entry_time"],
"queue_delay" : self.times["execution_time"] - self.times["assignment_time"],
"execution_delay" : self.times["completion_time"] - self.times["execution_time"],
"overall_latency" : time.time() - self.times["entry_time"],
"model_miss" : self.model_miss,
}
return json.dumps(response_dict)
class Metrics(object):
# TODO: make async
def __init__(self, db, prefix, names_of_metrics=[], last_used_metrics=[], report_interval=REPORT_INTERVAL):
self.db = db
self.prefix = prefix
self.report_interval = report_interval
self.names_of_metrics = names_of_metrics
self.metrics = {
metric_name : collections.defaultdict(int)
for metric_name in self.names_of_metrics
}
self.last_used_metrics = last_used_metrics
self.lock = Lock()
self.metrics_thread = threading.Timer(self.report_interval, self.pushMetrics)
self.metrics_thread.start()
def incrementMetricBy(self, metric_name, field_name, delta_value=1):
with self.lock:
self.metrics[metric_name][field_name] += delta_value
def pushMetrics(self):
#logging.info(f"pushMetrics")
# Restart for next metrics
self.metrics_thread = threading.Timer(self.report_interval, self.pushMetrics)
self.metrics_thread.start()
with self.lock:
metrics_to_report = self.metrics
self.metrics = {
metric_name : collections.defaultdict(int)
for metric_name in self.names_of_metrics
}
report_time = time.time()
pipe = self.db.pipeline()
for metric_name, metrics in metrics_to_report.items():
for field_name in metrics:
pipe.incrby( f"{self.prefix}{field_name}{DB_FIELD_SEPARATOR}{metric_name}", metrics[field_name] )
for metrics in self.last_used_metrics:
for field_name in metrics_to_report[metrics]:
pipe.set( f"{self.prefix}{field_name}{DB_FIELD_SEPARATOR}last_used" , report_time )
#pipe.execute()
results = pipe.execute()
class RedisInterface(object):
def __init__(self, redis_server="redis-server", redis_port=6379, *args, **kwargs):
import redis
self.db = redis.Redis(host=redis_server, port=redis_port)
try:
self.db.ping()
except redis.exceptions.ConnectionError as e:
print(f"Error connecting to Redis server @ {redis_server}. Is it started?")
print(e)
class ModelPlacements(object):
class Model(object):
def __init__(self, model_name, model_info=None):
self.name = model_name
if model_info is not None:
self.load_latency = model_info["load_latency"]
self.exec_latency = model_info["exec_latency"]
self.unload_latency = model_info["unload_latency"]
self.loaded_size = model_info["loaded_size"]
else:
self.load_latency = 0
self.exec_latency = 0
self.unload_latency = 0
self.loaded_size = 0
self.last_used = 0.0
def __str__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return self.name == other
def getLoadLatency(self):
return self.load_latency
def getExecLatency(self):
return self.exec_latency
def getUnloadLatency(self):
return self.unload_latency
def getSize(self):
return self.loaded_size
def getName(self):
return self.name
def __init__(self, *args, **kwargs):
self.models = set()
self.workers = set()
self.__workers_by_model = collections.defaultdict(set)
self.__models_by_worker = collections.defaultdict(set)
self.additions = queue.Queue()
self.removals = queue.Queue()
self.lock = Lock()
self.is_synced = True
def __str__(self):
return '|'.join([f"{m}:{self.__workers_by_model[m]}" for m in self.models])
def addModel(self, model):
self.models.add(model)
def addWorker(self, worker):
self.workers.add(worker)
def getModels(self):
return self.models
def getWorkers(self):
return self.workers
def sync(self):
with self.lock:
self.__models_by_worker = collections.defaultdict(set) #{ worker : set([]) for worker in self.workers }
for (model, workers) in self.__workers_by_model.items():
for worker in workers:
self.__models_by_worker[worker].add(model)
self.is_synced = True
def getModelsFromWorker(self, worker):
logging.debug("getModelsFromWorker()")
with self.lock:
if not self.is_synced:
self.sync()
return list(self.__models_by_worker[worker])
def getWorkersFromModel(self, model):
return list(self.__workers_by_model[model])
def getModelsByWorker(self):
with self.lock:
if not self.is_synced:
self.sync()
return self.__models_by_worker
def getWorkersByModel(self):
return self.__workers_by_model
def addModelToWorker(self, worker, model):
with self.lock:
self.__workers_by_model[model].add(worker)
self.is_synced = False
self.additions.put( (worker, model) )
def removeModelFromWorker(self, worker, model):
logging.debug(f"removeModelFromWorker(self, {worker}, {model})")
with self.lock:
self.__workers_by_model[model].remove(worker)
self.is_synced = False
self.removals.put( (worker, model) )
def getEmptyWorkers(self):
logging.debug(f"getEmptyWorkers")
return [w for w in self.getWorkers() if len(self.getModelsFromWorker(w)) == 0]
def getModelsInCache(self):
return [m for m in self.getModels() if len(self.getWorkersFromModel(m)) > 0]
def get_subsets_over_size(list_of_models, size_limit):
list_of_subsets = []
if len(list_of_models) == 0:
return list_of_subsets
for i, base_model in enumerate(list_of_models):
if base_model.getSize() > size_limit:
list_of_subsets.append([base_model])
else:
for subset in get_subsets_over_size(list_of_models[i+1:], size_limit-base_model.getSize()):
list_of_subsets.append([base_model] + subset)
return list_of_subsets
def main():
models_info = getModelInfo(json_file="workload/models.azure.json")
models = [ModelPlacements.Model(m_name, m_info) for m_name, m_info in models_info.items()]
input_models = sorted(models[:100], key=(lambda m: m.getSize()), reverse=True)
for m_set in get_subsets_over_size(input_models, 0.6):
print([str(m) for m in m_set])
pass
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
main()
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#importing Magics module
from Magics.macro import *
ref = 'axis6'
#Setting of the output file name
output = output(output_formats = ['png'],
output_name_first_page_number = "off",
output_name = ref)
#Setting the cartesian view
projection1 = mmap(page_y_position = 0.,
page_y_length = 4.,
subpage_y_position = 2.,
subpage_y_length = 1.,
subpage_frame = "off",
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = "1979-01-01",
subpage_x_date_max = "2017-05-01",
subpage_y_min = -20.,
subpage_y_max = 20.)
#Horizontal axis
horizontal = maxis(axis_orientation = "horizontal",
axis_type = "date",
axis_minor_tick = "on",
axis_grid = 'on',
axis_date_type = "climate",
axis_days_label_height = 0.4,
axis_hours_label_height = 0.4,
axis_months_label_height = 0.4,
axis_years_label_height = 0.4)
vertical = maxis(axis_orientation = "vertical",
axis_grid = 'on')
title1 = mtext(
text_lines = ["Using automatic labelling for long time serie..."],
text_justification = "left",
text_font_size = 0.5,
text_colour = "charcoal")
page1 = page()
#Setting the cartesian view
projection2 = mmap( page_id_line = 'off',
page_y_position = 13.,
page_y_length = 4.,
subpage_y_position= 2.,
subpage_y_length= 1.,
subpage_frame= "off",
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = "1907-06-02",
subpage_x_date_max = "2017-08-02",
subpage_y_min = -20.,
subpage_y_max = 20.)
title2 = mtext(
text_lines = [ "Using automatic labelling for 10 years time serie..."],
text_justification = "left",
text_font_size = 0.5,
text_colour = "charcoal")
page2 = page()
#Setting the cartesian view
projection3 = mmap( page_id_line = 'off',
page_y_position = 5.,
page_y_length = 4.,
subpage_y_position= 2.,
subpage_y_length= 1.,
subpage_frame= "off",
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = "1999-06-03",
subpage_x_date_max = "2017-08-03",
subpage_y_min = -20.,
subpage_y_max = 20.)
title3 = mtext(
text_lines = ["Using automatic labelling for One year time serie ..."],
text_justification = "left",
text_font_size = 0.5,
text_colour = "charcoal")
page3 = page()
#Setting the cartesian view
projection4 = mmap( page_id_line = 'off',
page_y_position = 9.,
page_y_length = 4.,
subpage_y_position= 2.,
subpage_y_length= 1.,
subpage_frame= "off",
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = "1820-06-12",
subpage_x_date_max = "2012-08-10",
subpage_y_min = -20.,
subpage_y_max = 20.)
title4 = mtext(
text_lines = ["Using automatic labelling for short time serie..."],
text_justification = "left",
text_font_size = 0.5,
text_colour = "charcoal")
page4 = page()
#Setting the cartesian view
projection5 = mmap( page_id_line = 'off',
page_y_position = 13.,
page_y_length = 4.,
subpage_y_position= 2.,
subpage_y_length= 1.,
subpage_frame= "off",
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'date',
subpage_y_axis_type = 'regular',
subpage_x_date_min = "2012-05-01 06:00",
subpage_x_date_max = "2050-05-02 12:00",
subpage_y_min = -20.,
subpage_y_max = 20.)
title5 = mtext(
text_lines = ["<font size='0.8'> Automatic Method to setup labelling of date axis: [axis_date_type = </font><font size='0.8' colour='navy'>automatic</font><font size='0.8'>]</font>",
"Using automatic labelling for very short time serie..."],
text_justification = "left",
text_font_size = 0.5,
text_colour = "charcoal")
#To the plot
plot(output, projection1, horizontal, vertical, title1,
page1, projection2, horizontal, title2,
page2, projection3, horizontal, title3,
page3, projection4, horizontal, title4,
page4, projection5, horizontal, title5,
)
|
import xml.etree.cElementTree as ET
import sqlite3
import vk_api
import utils as ut
import versions
import os
import random
from math import ceil
import events as ev
with open('token.txt', 'r') as f:
token = f.read()
vk_session = vk_api.VkApi(token=token)
vk = vk_session.get_api()
rulesofinternet = {
'1': "Do not talk about /b/",
'2': "Do NOT talk about /b/",
'3': "We are Anonymous",
'4': "Anonymous is legion",
'5': "Anonymous never forgives",
'6': "Anonymous can be a horrible, senseless, uncaring monster",
'7': "Anonymous is still able to deliver",
'8': "There are no real rules about posting",
'9': "There are no real rules about moderation either - enjoy your ban",
'10': "If you enjoy any rival sites - DON'T",
'11': "All your carefully picked arguments can easily be ignored",
'12': "Anything you say can and will be used against you",
'13': "Anything you say can be turned into something else - fixed",
'14': "Do not argue with trolls - it means that they win",
'15': "The harder you try the harder you will fail",
'16': "If you fail in epic proportions, it may just become a winning failure",
'17': "Every win fails eventually",
'18': "Everything that can be labeled can be hated",
'19': "The more you hate it the stronger it gets",
'20': "Nothing is to be taken seriously",
'21': "Original content is original only for a few seconds before getting old",
'22': "Copypasta is made to ruin every last bit of originality",
'23': "Copypasta is made to ruin every last bit of originality",
'24': "Every repost is always a repost of a repost",
'25': "Relation to the original topic decreases with every single post",
'26': "Any topic can be easily turned into something totally unrelated",
'27': "Always question a person's sexual preferences without any real reason",
'28': "Always question a person's gender - just in case it's really a man",
'29': "In the internet all girls are men and all kids are undercover FBI agents",
'30': "There are no girls on the internet",
'31': "TITS or GTFO - the choice is yours",
'32': "You must have pictures to prove your statements",
'33': "Lurk more - it's never enough",
'34': "There is porn of it, no exceptions",
'35': "If no porn is found at the moment, it will be made",
'36': "There will always be even more fucked up shit than what you just saw",
'37': "You cannot divide by zero (just because the calculator says so)",
'38': "No real limits of any kind apply here - not even the sky",
'39': "CAPSLOCK IS CRUISE CONTROL FOR COOL",
'40': "EVEN WITH CRUISE CONTROL YOU STILL HAVE TO STEER",
'41': "Desu isn't funny. Seriously guys. It's worse than Chuck Norris jokes.",
'42': "Nothing is Sacred",
'43': "The more beautiful and pure a thing is - the more satisfying it is to corrupt it",
'44': "Even one positive comment about Japanese things can make you a weeaboo",
'45': "When one sees a lion, one must get into the car",
'46': "There is always furry porn of it",
'47': "The pool is always closed",
'48': "?????",
'49': "PROFIT!",
}
class stats:
def __init__(self, plid):
data = sqlite3.connect(os.path.join('pl', '{}.db'.format(plid)))
c = data.cursor()
c.execute("SELECT x_pos FROM player")
self.x_pos = int("{[0]}".format(c.fetchone()))
c.execute("SELECT y_pos FROM player")
self.y_pos = int("{[0]}".format(c.fetchone()))
c.execute("SELECT money FROM player")
self.money = int("{[0]}".format(c.fetchone()))
def rofi(num):
if num in rulesofinternet:
return "{}: {}".format(num, rulesofinternet[str(num)])
else:
return "Index is out of range (total rules in the database: {})".format(len(rulesofinternet))
def register(plid, fname, lname):
if ut.isExist(plid):
return "U're already registered"
data = sqlite3.connect("lop.db")
c = data.cursor()
c.execute("INSERT INTO players VALUES (?, ?, ?)", [plid, f"{fname} {lname}", versions.latestVersion])
data.commit()
data.close()
player = sqlite3.connect(os.path.join('pl', '{}.db'.format(plid)))
c = player.cursor()
c.execute("CREATE TABLE player (x_pos INTEGER, y_pos INTEGER, money INTEGER)")
c.execute("INSERT INTO player VALUES (25, 25, 127, 0)")
c.execute("CREATE TABLE friends (id INTEGER, name TEXT, status INTEGER)")
c.execute("""CREATE TABLE inventory (number INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
desc TEXT NOT NULL,
type TEXT NOT NULL,
tier TEXT NOT NULL,
actions TEXT NOT NULL,
del BOOLEAN NOT NULL,
inTrade BOOLEAN)""")
c.execute("""CREATE TABLE trades (tradeNumber INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
tradeType TEXT NOT NULL,
tradeDesc TEXT NOT NULL,
tradeStatus TEXT NOT NULL,
name TEXT NOT NULL,
desc TEXT NOT NULL,
type TEXT NOT NULL,
tier TEXT NOT NULL,
actions TEXT NOT NULL,
del BOOLEAN NOT NULL,
senderID TEXT NOT NULL,
oiNum INTEGER NOT NULL)""")
player.commit()
player.close()
return f"Welcome to the game, {fname}"
def delete(plid):
if not ut.isExist(plid):
return "Register first"
data = sqlite3.connect("lop.db")
c = data.cursor()
c.execute("DELETE FROM players WHERE id=?", [plid])
data.commit()
data.close()
try:
os.remove(os.path.join("pl", f"{plid}.db"))
except FileNotFoundError:
print(f"db not found while deleting {plid}")
return "Account deleted. Seeya next time"
def addFriend(plid, fid):
if not ut.isExist(plid):
return "Register first"
if not ut.isExist(fid):
return "ID is not registered"
if str(plid) == str(fid):
return "You have no friends?"
player = sqlite3.connect(os.path.join('pl', f'{plid}.db'))
c = player.cursor()
c.execute("SELECT * FROM friends WHERE id=?", [fid])
answer = c.fetchone()
if answer is None:
c.execute("INSERT INTO friends VALUES (?, ?, ?)", [fid, searchByID(fid), "Requested"])
player.commit()
player.close()
friend = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = friend.cursor()
c.execute("INSERT INTO friends VALUES (?, ?, ?)", [plid, searchByID(plid), "Request"])
vk.messages.send(random_id=0, user_id=fid, message=f"""{searchByID(plid)} sent a friend request.
Enter "/addfriend {plid}" to accept it.
Enter "/denyrequest {plid}" to deny it.""")
friend.commit()
friend.close()
return "Request sent"
if answer[2] == "Requested":
return "Request already sended"
if answer[2] == "Request":
c.execute("UPDATE friends SET status='Accepted' WHERE id=?", [fid])
player.commit()
player.close()
friend = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = friend.cursor()
c.execute("UPDATE friends SET status='Accepted' WHERE id=?", [plid])
vk.messages.send(random_id=0, user_id=fid, message=f"{searchByID(plid)} has accepted friend request")
friend.commit()
friend.close()
return "Request accepted"
def showInventory(plid):
if not ut.isExist(plid):
return "Register first"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM inventory ORDER BY number")
items = c.fetchall()
if not items:
return "You have no items in inventory"
message = ""
for i in items:
message += f"{i[0]}. {i[1]} // {i[2]}\n"
return message
def showTradeInventory(plid):
if not ut.isExist(plid):
return "Register first"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM trades ORDER BY tradeNumber")
trades = c.fetchall()
if not trades:
return "You have no trades"
msg = ""
for i in trades:
msg += f"{i[0]}. Type: {i[1]}, Status: {i[3]}\nItem: {i[4]}\nDescription: {i[2]}\n\n"
return msg
def sendMoney(plid, fid, count):
if not ut.isExist(plid):
return "Register first"
if not ut.isExist(fid):
return "User is not found"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
st = stats(plid)
if st.money < int(count):
return "Not enough money to send"
c.execute("UPDATE player SET money = money - ?", [count])
data.commit()
data.close()
pl = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = pl.cursor()
c.execute("UPDATE player SET money = money + ?", [count])
pl.commit()
pl.close()
sender = vk.users.get(user_ids=plid, name_case="gen")[0]
vk.messages.send(random_id=0, user_id=fid, message=f"You got {count} credits from {sender['first_name']} {sender['last_name']}")
return "Your money were successfully sent to the player"
def sendGift(plid, fid, itemNumber, message):
if not ut.isExist(fid):
return "User is not found"
if not ut.inFriends(plid, fid):
return "User isn't in your friend list"
if not ut.inInventory(plid, itemNumber):
return "Item is not found"
if ut.inTrade(plid, itemNumber):
return "This item is already in trade"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM inventory WHERE number=?", [itemNumber])
sItem = c.fetchone()
message = f"{' '.join(message)}"
sender = vk.users.get(user_ids=plid, name_case="gen")[0]
receiver = vk.users.get(user_ids=fid, name_case="dat")[0]
c.execute("""INSERT INTO trades (tradeType, tradeDesc, tradeStatus, name, desc, type, tier, actions, del, senderID, oiNum)
VALUES ("Gift", ?, "Sended", ?, ?, ?, ?, ?, ?, ?, ?)""", [f"Gift to {receiver['first_name']} {receiver['last_name']}: {message}",
sItem[1], sItem[2], sItem[3], sItem[4], sItem[5], sItem[6], plid, itemNumber])
c.execute("UPDATE inventory SET inTrade=1 WHERE number=?", [itemNumber])
data.commit()
data.close()
fdata = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = fdata.cursor()
c.execute("""INSERT INTO trades (tradeType, tradeDesc, tradeStatus, name, desc, type, tier, actions, del, senderID, oiNum)
VALUES ("Gift", ?, "Awaiting", ?, ?, ?, ?, ?, ?, ?, ?)""", [f"Gift from {sender['first_name']} {sender['last_name']}: {message}",
sItem[1], sItem[2], sItem[3], sItem[4], sItem[5], sItem[6], plid, itemNumber])
c.execute("SELECT tradeNumber FROM trades ORDER BY tradeNumber DESC LIMIT 1")
tradeNum = c.fetchone()[0]
fdata.commit()
fdata.close()
vk.messages.send(random_id=0, user_id=fid, message=f"""You got a Gift from {sender['first_name']} {sender['last_name']} with message: {message}
Enter /acceptGift {tradeNum} to accept it
Or enter /rejectGift {tradeNum} to reject it""")
return "Trade request sended."
def acceptGift(plid, tradeNumber):
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM trades WHERE tradeNumber=?", [tradeNumber])
item = c.fetchone()
if item is None:
return "Trade request isn't found"
c.execute("INSERT INTO inventory (name, desc, type, tier, actions, del, inTrade) VALUES (?, ?, ?, ?, ?, ?, 0)", [item[4], item[5], item[6], item[7], item[8], item[9]])
c.execute("UPDATE trades SET tradeStatus='Accepted' WHERE tradeNumber=?", [tradeNumber])
data.commit()
data.close()
fdata = sqlite3.connect(os.path.join("pl", f"{item[10]}.db"))
c = fdata.cursor()
c.execute("DELETE FROM inventory WHERE number=?", [item[11]])
c.execute("UPDATE trades SET tradeStatus='Accepted' WHERE oiNum=?", [item[11]])
c.execute("SELECT tradeNumber FROM trades WHERE oiNum=?", [item[11]])
tr = c.fetchone()[0]
fdata.commit()
fdata.close()
vk.messages.send(random_id=0, user_id=item[10], message=f"Your gift (trade№: {tr}) was accepted")
return "Gift accepted and added to your inventory"
def rejectGift(plid, tradeNumber):
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM trades WHERE tradeNumber=?", [tradeNumber])
tr = c.fetchone()
if tr is None:
return "Trade request isn't found"
c.execute("UPDATE trades SET tradeStatus='Rejected' WHERE tradeNumber=?", [tradeNumber])
data.commit()
data.close()
fdata = sqlite3.connect(os.path.join("pl", f"{tr[10]}.db"))
c = fdata.cursor()
c.execute("UPDATE trades SET tradeStatus='Rejected' WHERE oiNum=?", [tr[11]])
c.execute("UPDATE inventory SET inTrade=0 WHERE number=?", [tr[11]])
c.execute("SELECT tradeNumber FROM trades WHERE oiNum=?", [tr[11]])
tr = c.fetchone()[0]
fdata.commit()
fdata.close()
vk.messages.send(random_id=0, user_id=tr[10], message=f"Your gift (trade№: {tr}) was accepted")
return "Gift rejected"
def showShopList(plid):
coords = getCoords(plid)
x = coords[0]
y = coords[1]
if not os.path.exists(os.path.join("npc", f"merchant-{x}{y}.db" )):
return "Here's no merchant on this square"
data = sqlite3.connect(os.path.join("npc", f"merchant-{x}{y}.db"))
c = data.cursor()
c.execute("SELECT * FROM inventory")
if c.fetchone() is None:
if ev.refillMerchant(plid) is False:
if random.randint(1, 100) <= 50:
return ev.removeMerchant(plid)
else:
return "Merchant doesn't have any items now. Check him later"
else:
return "Merchant just got new items! Reenter the command to check them"
c.execute("SELECT * FROM inventory")
it = c.fetchall()
msg = "Merchant shop list:\n"
for i in it:
msg += f"{i[0]}. {i[1]} // Price: {i[7]}\n"
data.close()
return msg
def itemDesc(plid, itemNumber):
if not ut.isExist(plid):
return "User is not found"
if not ut.inInventory(plid, itemNumber):
return "Item is not found"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT name, desc FROM inventory WHERE number=?", [itemNumber])
desc = c.fetchone()
data.close()
return f"{desc[0]} ({desc[1]})"
def buyItem(plid, itemNumber):
coords = getCoords(plid)
x = coords[0]
y = coords[1]
if not os.path.exists(os.path.join("npc", f"merchant-{x}{y}.db")):
return "Here's no merchant on this square"
st = stats(plid)
data = sqlite3.connect(os.path.join("npc", f"merchant-{x}{y}.db"))
c = data.cursor()
c.execute("SELECT * FROM inventory")
c.execute("SELECT price FROM inventory WHERE number=?", [itemNumber])
if c.fetchone() is None:
return "Wrong item number"
c.execute("SELECT price FROM inventory WHERE number=?", [itemNumber])
price = int(c.fetchone()[0])
if st.money < price:
return "You can't buy this item"
c.execute("SELECT * FROM inventory WHERE number=?", [itemNumber])
item = c.fetchone()
c.execute("DELETE FROM inventory WHERE number=?", [itemNumber])
pldata = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = pldata.cursor()
c.execute("INSERT INTO inventory (name, desc, type, tier, actions, del, inTrade) VALUES (?, ?, ?, ?, ?, ?, 0)",
[item[1], item[2], item[3], item[4], item[5], item[6]])
c.execute("""INSERT INTO trades (tradeType, tradeDesc, tradeStatus, name, desc, type, tier, actions, del, senderID, oiNum)
VALUES ("Purchase", ?, "Done", ?, ?, ?, ?, ?, ?, ?, ?)""",
[f"Purchase for {price} riphs", item[1], item[2], item[3], item[4], item[5], item[6], f"merchant-{x}{y}", item[0]])
data.commit()
data.close()
pldata.commit()
pldata.close()
return "Item bought"
def sellItem(plid, itemNumber):
coords = getCoords(plid)
x = coords[0]
y = coords[1]
if not os.path.exists(os.path.join("npc", f"merchant-{x}{y}.db")):
return "Here's no merchant on this square"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
if not ut.inInventory(plid, itemNumber):
return "Wrong item number"
c.execute("SELECT * FROM inventory WHERE number=?", [itemNumber])
item = c.fetchone()
if item[4] == "Common":
mp = 1
if item[4] == "Uncommon":
mp = 1.2
if item[4] == "Rare":
mp = 1.4
if item[4] == "Exclusive":
mp = 1.8
if item[4] == "Absolute":
mp = 2.2
actions = item[5].split(" | ")
mdata = sqlite3.connect(os.path.join("npc", f"merchant-{x}{y}.db"))
mc = mdata.cursor()
mc.execute("SELECT * FROM spList WHERE name=?", [item[1]])
if mc.fetchone() is None:
price = random.randint(len(actions)+8, ceil(len(actions)+10*mp))
mc.execute("INSERT INTO spList VALUES(?, ?)", [item[2], price])
mdata.commit()
else:
mc.execute("SELECT price FROM spList WHERE name=?", [item[2]])
price = int(mc.fetchone()[0])
mdata.close()
c.execute("DELETE FROM inventory WHERE itemNumber=?", [itemNumber])
c.execute("UPDATE player SET money = money + ?", [price])
c.execute(f"""INSERT INTO trades (tradeType, tradeDesc, tradeStatus, name, desc, type, tier, actions, del, senderID, oiNum)
VALUES ('Sale', ?, 'Done', ?, ?, ?, ?, ?, ?, ?, ?)""",
[f"Selling item for {price}", item[1], item[2], item[3], item[4], item[5], item[6], f"merchant-{x}{y}", itemNumber])
data.commit()
data.close()
return f"Item sold, you got {price}"
def actions(plid):
tree = ET.parse("session.tmx")
root = tree.getroot()
cds = getCoords(plid)
x = cds[0]
y = cds[1]
acts = []
acts.append("Save -- You can save the current position")
acts.append("Leave -- You can leave from the game")
acts.append(">You can move in any directions")
for objects in root.findall('objectgroup'):
if objects.attrib['name'] == "Merchants":
for m in objects:
if m.attrib['x'] == str(x) and m.attrib['y'] == str(y):
acts.append("Check -- You can check the itemlist")
acts.append("Buy -- You can buy an item")
if objects.attrib['name'] == "Players":
for p in objects:
if p.attrib['x'] == str(x) and p.attrib['y'] == str(y):
if p.attrib['name'] != str(plid):
acts.append(">You can try to interact with other players on this square")
if objects.attrib['name'] == "Chests":
for ch in objects:
if ch.attrib['x'] == str(x) and ch.attrib['y'] == str(y):
acts.append(f"Open -- You can open the {ch.attrib['type'][6:]} Chest")
if ut.inTradeZone(plid):
acts.append(">You're in the trade zone. Note that.")
acts.sort()
return "\n".join(acts)
def putUpForAuc(plid, itemNumber, price): # in dev.
if not ut.inInventory(plid, itemNumber):
return "Wrong item number"
if not ut.inTradeZone(plid):
return "You must be in trade zone to put the item up"
if ut.inTrade(plid, itemNumber):
return "This item is already in some trade"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("UPDATE inventory SET inTrade=1 WHERE number=?", [itemNumber])
def removeFriend(plid, fid):
if not ut.isExist(plid):
return "Register first"
if not ut.isExist(fid) and str(fid) != "???":
return "User is not found"
data = sqlite3.connect(os.path.join("pl", f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM friends WHERE id=?", [fid])
if c.fetchone() is None:
return "User is not in your friends list"
c.execute("SELECT * FROM friends WHERE id=?", [fid])
ans = c.fetchone()
if ans[2] == "Accepted":
c.execute("SELECT name FROM friends WHERE id=?", [fid])
if c.fetchone()[0] == "Cassette":
return "You can't remove this cute thing from friend list"
c.execute("DELETE FROM friends WHERE id=?", [fid])
data.commit()
data.close()
friend = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = friend.cursor()
c.execute("DELETE FROM friends WHERE id=?", [plid])
vk.messages.send(random_id=0, user_id=fid, message=f"{searchByID(plid)} has removed you from friend list. :c\nUse \"/addfriend {plid}\" to send friend request")
friend.commit()
friend.close()
return f"User has been removed from your friend list. \nUse \"/addfriend {fid}\" to send friend request"
else:
return "Please, use \"/denyrequest\" to cancel or deny friend request."
def denyFriendRequest(plid, fid):
if not ut.isExist(plid):
return "Register first"
data = sqlite3.connect(os.path.join('pl', f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM friends WHERE id=?", [fid])
answer = c.fetchone()
if answer is None:
return "This user isn't sent a request"
if answer[2] == "Request":
c.execute("DELETE FROM friends WHERE id=?", [fid])
data.commit()
data.close()
friend = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = friend.cursor()
c.execute("DELETE FROM friends WHERE id=?", [plid])
vk.messages.send(random_id=0, user_id=fid, message=f"{searchByID(plid)} has denied friend request")
friend.commit()
friend.close()
return "Request denied"
if answer[2] == "Requested":
c.execute("DELETE FROM friends WHERE id=?", [fid])
data.commit()
data.close()
friend = sqlite3.connect(os.path.join("pl", f"{fid}.db"))
c = friend.cursor()
c.execute("DELETE FROM friends WHERE id=?", [plid])
vk.messages.send(random_id=0, user_id=fid, message=f"{searchByID(plid)} has canceled friend request")
friend.commit()
friend.close()
return "Request canceled"
if answer[2] == "Accepted":
return "Request already accepted."
def friendList(plid):
if not ut.isExist(plid):
return "Register first"
data = sqlite3.connect(os.path.join('pl', f"{plid}.db"))
c = data.cursor()
c.execute("SELECT * FROM friends WHERE status='Accepted'")
message = "Mutual friends:"
if c.fetchone() is None:
message += "\nYou don't have any friends ;c"
else:
c.execute("SELECT * FROM friends WHERE status='Accepted'")
friends = c.fetchall()
for f in friends:
message += f"\n{f[1]} ({f[0]})"
c.execute("SELECT * FROM friends WHERE status='Requested'")
message += "\n\nPending acceptance/rejection:"
if c.fetchone() is None:
message += "\nNo requests sent"
else:
c.execute("SELECT * FROM friends WHERE status='Requested'")
awaiting = c.fetchall()
for a in awaiting:
message += f"\n{a[1]} ({a[0]})"
c.execute("SELECT * FROM friends WHERE status='Request'")
message += "\n\nPending for reply:"
if c.fetchone() is None:
message += "\nNo requests sent"
else:
c.execute("SELECT * FROM friends WHERE status='Request'")
repl = c.fetchall()
for r in repl:
message += f"\n{r[1]} ({r[0]})"
return message
def searchByID(id):
if not ut.isExist(id):
return "User is not found"
data = sqlite3.connect("lop.db")
c = data.cursor()
c.execute("SELECT name FROM players WHERE id=?", [id])
name = c.fetchone()
data.close()
return name[0]
def playersOnTile(plid):
tree = ET.parse("session.tmx")
root = tree.getroot()
pos = getCoords(plid)
x = int(pos[0])
y = int(pos[1])
tileplayers = []
for i in root.findall('objectgroup[@name="Players"]'):
for players in i:
if int(players.attrib['x']) == x and int(players.attrib['y']) == y:
tileplayers.append(players.attrib['name'])
if not tileplayers:
return "There's no players on tile"
else:
message = "Players on tile:"
for i in tileplayers:
message = message + f"\n{searchByID(i)}"
return message
def save(plid):
data = sqlite3.connect(os.path.join('pl', f"{plid}.db"))
c = data.cursor()
pos = getCoords(plid)
c.execute("UPDATE player SET x_pos=?, y_pos=?", [pos[0], pos[1]])
data.commit()
data.close()
return "Position saved."
def playerToMap(plid):
pos = getCoords(plid)
tree = ET.parse("session.tmx")
root = tree.getroot()
for i in root.findall('objectgroup[@name="Players"]'):
ET.SubElement(i, 'object', {'id': str(plid), 'name': str(plid), 'type': 'Player', 'x': str(pos[0]), 'y': str(pos[1]), 'width': '1', 'height': '1'})
for i in root.findall('objectgroup[@name="Players"]'):
for p in i:
if p.attrib['name'] == str(plid):
ET.SubElement(p, 'ellipse')
tree.write('session.tmx', 'UTF-8')
def mapLeave(plid):
tree = ET.parse("session.tmx")
root = tree.getroot()
data = sqlite3.connect(os.path.join('pl', '{}.db'.format(plid)))
c = data.cursor()
position = getCoords(plid)
c.execute("UPDATE player SET x_pos={}, y_pos={}".format(position[0], position[1]))
data.commit()
data.close()
for objects in root.findall('objectgroup[@name="Players"]'):
for x in objects:
if x.attrib['name'] == str(plid):
objects.remove(x)
tree.write('session.tmx', 'UTF-8')
def getCoords(plid):
"""
Returns X and Y position for player (tuple)
"""
if not ut.isExist(plid):
return "Register first"
tree = ET.parse("session.tmx")
root = tree.getroot()
for i in root.findall("objectgroup[@name=\"Players\"]"):
for pos in i:
if pos.attrib["name"] == str(plid):
x = pos.attrib['x']
y = pos.attrib['y']
return x, y
st = stats(plid)
return st.x_pos, st.y_pos
if __name__ == '__main__':
pass
|
import os
import pathlib
import tempfile
import unittest
from datetime import datetime
from django.apps import apps
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import connection
from django.db.utils import DataError, IntegrityError
from django.shortcuts import reverse
from django.test import TestCase
from faker import Faker
from freezegun import freeze_time
from model_bakery import baker
from common.constants import models as constants
from roleplay.enums import DomainTypes, RoleplaySystems, SiteTypes
Domain = apps.get_model(constants.DOMAIN_MODEL)
Place = apps.get_model(constants.PLACE_MODEL)
PlayerInSession = apps.get_model(constants.ROLEPLAY_PLAYER_IN_SESSION)
Race = apps.get_model(constants.RACE_MODEL)
RaceUser = apps.get_model(constants.USER_RACE_RELATION)
Session = apps.get_model(constants.SESSION_MODEL)
User = apps.get_model(constants.USER_MODEL)
connection_engine = connection.features.connection.settings_dict.get('ENGINE', None)
fake = Faker()
class TestDomain(TestCase):
@classmethod
def setUpTestData(cls):
cls.model = Domain
def test_str_ok(self):
domain = baker.make(self.model)
domain_type = DomainTypes(domain.domain_type)
self.assertEqual(str(domain), f'{domain.name} [{domain_type.label.title()}]')
def test_ok(self):
entries = fake.pyint(min_value=1, max_value=100)
baker.make(self.model, entries)
self.assertEqual(entries, self.model.objects.count())
@freeze_time('2020-01-01')
def test_image_upload_ok(self):
tmpfile = tempfile.NamedTemporaryFile(mode='w', suffix='.jpg', dir='./tests/', delete=False)
image_file = tmpfile.name
with open(tmpfile.name, 'rb') as image_data:
image = SimpleUploadedFile(name=image_file, content=image_data.read(), content_type='image/jpeg')
place = baker.make(self.model)
place.image = image
place.save()
expected_path = '/media/roleplay/domain/2020/01/01/{}/{}'.format(place.pk, image.name)
expected_path = pathlib.Path(expected_path)
self.assertIn(str(expected_path), place.image.path)
tmpfile.close()
os.unlink(tmpfile.name)
os.unlink(place.image.path)
@unittest.skipIf('sqlite3' in connection_engine, 'SQLite takes Varchar as Text')
def test_max_name_length_ko(self):
name = fake.password(length=26)
with self.assertRaises(DataError) as ex:
self.model.objects.create(name=name)
self.assertRegex(str(ex.exception), r'.*value too long.*')
def test_name_none_ko(self):
with self.assertRaises(IntegrityError) as ex:
self.model.objects.create(name=None)
self.assertRegex(str(ex.exception), r'.*(null|NULL).*(constraint|CONSTRAINT).*')
def test_is_domain_ok(self):
instance = baker.make(self.model, domain_type=DomainTypes.DOMAIN)
self.assertTrue(instance.is_domain)
def test_is_domain_ko(self):
instance = baker.make(self.model, domain_type=DomainTypes.SUBDOMAIN)
self.assertFalse(instance.is_domain)
def test_is_subdomain_ok(self):
instance = baker.make(self.model, domain_type=DomainTypes.SUBDOMAIN)
self.assertTrue(instance.is_subdomain)
def test_is_subdomain_ko(self):
instance = baker.make(self.model, domain_type=DomainTypes.DOMAIN)
self.assertFalse(instance.is_subdomain)
class TestPlace(TestCase):
@classmethod
def setUpTestData(cls):
cls.model = Place
cls.enum = SiteTypes
def test_str_ok(self):
place = baker.make(self.model)
self.assertEqual(str(place), place.name)
def test_ok(self):
entries = fake.pyint(min_value=1, max_value=100)
baker.make(self.model, entries)
self.assertEqual(entries, self.model.objects.count())
@freeze_time('2020-01-01')
def test_image_upload_ok(self):
tmpfile = tempfile.NamedTemporaryFile(mode='w', suffix='.jpg', dir='./tests/', delete=False)
image_file = tmpfile.name
with open(tmpfile.name, 'rb') as image_data:
image = SimpleUploadedFile(name=image_file, content=image_data.read(), content_type='image/jpeg')
place = baker.make(self.model)
place.image = image
place.save()
expected_path = '/media/roleplay/place/2020/01/01/{}/{}'.format(place.pk, image.name)
expected_path = pathlib.Path(expected_path)
self.assertIn(str(expected_path), place.image.path)
tmpfile.close()
os.unlink(tmpfile.name)
os.unlink(place.image.path)
def test_images_ok(self):
images = []
for _ in range(0, 3):
tmpfile = tempfile.NamedTemporaryFile(mode='w', suffix='.jpg', dir='./tests/', delete=False)
image_file = tmpfile.name
with open(tmpfile.name, 'rb') as image_data:
image = SimpleUploadedFile(name=image_file, content=image_data.read(), content_type='image/jpeg')
images.append(image)
tmpfile.close()
os.unlink(tmpfile.name)
parent = None
for image in images:
place = self.model.objects.create(name=fake.country(), parent_site=parent)
place.image = image
place.save()
parent = place
obj_images = self.model.objects.first().images()
self.assertEqual(len(images), len(obj_images))
for place in self.model.objects.all():
os.unlink(place.image.path)
# TODO: Refactor this test so is not that complex
def test_nested_world_ok(self): # noqa
universe = self.model.objects.create(name='Universe', site_type=self.enum.WORLD)
world = self.model.objects.create(name='World', site_type=self.enum.WORLD, parent_site=universe)
self.assertIn(world, universe.get_worlds())
continents = []
for _ in range(0, 3):
continents.append(
self.model.objects.create(name=fake.country(), site_type=self.enum.CONTINENT, parent_site=world)
)
countries = []
seas = []
rivers = []
unusuals = []
for continent in continents:
self.assertIn(continent, world.get_continents())
countries.append(
self.model.objects.create(
name=fake.country(), site_type=self.enum.COUNTRY, parent_site=continent
)
)
seas.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.SEA, parent_site=continent)
)
rivers.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.RIVER, parent_site=continent)
)
unusuals.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.UNUSUAL, parent_site=continent)
)
for sea in seas:
self.assertIn(sea, world.get_seas())
for river in rivers:
self.assertIn(river, world.get_rivers())
for unusual in unusuals:
self.assertIn(unusual, world.get_unusuals())
islands = []
cities = []
mountains = []
mines = []
deserts = []
tundras = []
hills = []
metropolis = []
for country in countries:
self.assertIn(country, world.get_countries())
islands.append(
self.model.objects.create(name=fake.country(), site_type=self.enum.ISLAND, parent_site=country)
)
cities.append(
self.model.objects.create(name=fake.city(), site_type=self.enum.CITY, parent_site=country)
)
mountains.append(
self.model.objects.create(
name=fake.country(), site_type=self.enum.MOUNTAINS, parent_site=country
)
)
deserts.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.DESERT, parent_site=country)
)
hills.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.HILLS, parent_site=country)
)
tundras.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.TUNDRA, parent_site=country)
)
mines.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.MINES, parent_site=country)
)
metropolis.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.METROPOLIS, parent_site=country)
)
forests = []
for island in islands:
self.assertIn(island, world.get_islands())
forests.append(
self.model.objects.create(name=fake.name(), site_type=self.enum.FOREST, parent_site=island)
)
for m in metropolis:
self.assertIn(m, world.get_metropolis())
villages = []
towns = []
for city in cities:
self.assertIn(city, world.get_cities())
villages.append(
self.model.objects.create(name=fake.city(), site_type=self.enum.VILLAGE, parent_site=city)
)
towns.append(
self.model.objects.create(name=fake.city(), site_type=self.enum.TOWN, parent_site=city)
)
houses = []
for village in villages:
self.assertIn(village, world.get_villages())
houses.append(
self.model.objects.create(name=fake.city(), site_type=self.enum.HOUSE, parent_site=village)
)
for town in towns:
self.assertIn(town, world.get_towns())
for house in houses:
self.assertIn(house, world.get_houses())
for mountain in mountains:
self.assertIn(mountain, world.get_mountains())
for mine in mines:
self.assertIn(mine, world.get_mines())
for desert in deserts:
self.assertIn(desert, world.get_deserts())
for hill in hills:
self.assertIn(hill, world.get_hills())
for forest in forests:
self.assertIn(forest, world.get_forests())
for tundra in tundras:
self.assertIn(tundra, world.get_tundras())
@unittest.skipIf('sqlite3' in connection_engine, 'SQLite takes Varchar as Text')
def test_max_name_length_ko(self):
name = fake.password(length=101)
with self.assertRaises(DataError) as ex:
self.model.objects.create(name=name)
self.assertRegex(str(ex.exception), r'.*value too long.*')
def test_name_none_ko(self):
with self.assertRaises(IntegrityError) as ex:
self.model.objects.create(name=None)
self.assertRegex(str(ex.exception), r'.*(null|NULL).*(constraint|CONSTRAINT).*')
def test_is_house_ok(self):
place = baker.make(self.model, site_type=self.enum.HOUSE)
self.assertTrue(place.is_house)
def test_is_town_ok(self):
place = baker.make(self.model, site_type=self.enum.TOWN)
self.assertTrue(place.is_town)
def test_is_village_ok(self):
place = baker.make(self.model, site_type=self.enum.VILLAGE)
self.assertTrue(place.is_village)
def test_is_city_ok(self):
place = baker.make(self.model, site_type=self.enum.CITY)
self.assertTrue(place.is_city)
def test_is_metropolis_ok(self):
place = baker.make(self.model, site_type=self.enum.METROPOLIS)
self.assertTrue(place.is_metropolis)
def test_is_forest_ok(self):
place = baker.make(self.model, site_type=self.enum.FOREST)
self.assertTrue(place.is_forest)
def test_is_hills_ok(self):
place = baker.make(self.model, site_type=self.enum.HILLS)
self.assertTrue(place.is_hills)
def test_is_mountains_ok(self):
place = baker.make(self.model, site_type=self.enum.MOUNTAINS)
self.assertTrue(place.is_mountains)
def test_is_mines_ok(self):
place = baker.make(self.model, site_type=self.enum.MINES)
self.assertTrue(place.is_mines)
def test_is_river_ok(self):
place = baker.make(self.model, site_type=self.enum.RIVER)
self.assertTrue(place.is_river)
def test_is_sea_ok(self):
place = baker.make(self.model, site_type=self.enum.SEA)
self.assertTrue(place.is_sea)
def test_is_desert_ok(self):
place = baker.make(self.model, site_type=self.enum.DESERT)
self.assertTrue(place.is_desert)
def test_is_tundra_ok(self):
place = baker.make(self.model, site_type=self.enum.TUNDRA)
self.assertTrue(place.is_tundra)
def test_is_unusual_ok(self):
place = baker.make(self.model, site_type=self.enum.UNUSUAL)
self.assertTrue(place.is_unusual)
def test_is_island_ok(self):
place = baker.make(self.model, site_type=self.enum.ISLAND)
self.assertTrue(place.is_island)
def test_is_country_ok(self):
place = baker.make(self.model, site_type=self.enum.COUNTRY)
self.assertTrue(place.is_country)
def test_is_continent_ok(self):
place = baker.make(self.model, site_type=self.enum.CONTINENT)
self.assertTrue(place.is_continent)
def test_is_world_ok(self):
place = baker.make(self.model, site_type=self.enum.WORLD)
self.assertTrue(place.is_world)
def test_resolve_icon(self):
for site_type in self.model.ICON_RESOLVERS.keys():
obj = self.model.objects.create(name=fake.country(), site_type=site_type)
expected_url = '<span class="{}"></span>'.format(self.model.ICON_RESOLVERS.get(site_type, ''))
self.assertEqual(expected_url, obj.resolve_icon())
def test_user_but_no_owner_save_ko(self):
user = baker.make(User)
with self.assertRaises(IntegrityError) as ex:
self.model.objects.create(
name=fake.city(),
user=user
)
self.assertEqual(str(ex.exception), 'a private world must have owner.')
def test_user_but_no_owner_clean_ko(self):
user = baker.make(User)
world = self.model.objects.create(
name=fake.city(),
user=user,
owner=user
)
world.owner = None
with self.assertRaises(ValidationError) as ex:
world.clean()
ex = ex.exception
self.assertIn('user', ex.error_dict)
message = ex.error_dict['user'][0].message
self.assertEqual(message, 'a private world must have owner.')
class TestRace(TestCase):
@classmethod
def setUpTestData(cls):
cls.model = Race
cls.m2m_model = RaceUser
def test_create_ok(self):
instance = self.model.objects.create(name=fake.word(), description=fake.paragraph())
self.model.objects.get(pk=instance.pk)
def test_create_with_owner_ok(self):
instance = self.model.objects.create(name=fake.word(), description=fake.paragraph())
users = baker.make(constants.USER_MODEL, 3)
instance.add_owners(*users)
owners = instance.owners
result = all(user in owners for user in users)
self.assertTrue(result)
def test_str_ok(self):
instance = self.model.objects.create(name=fake.word(), description=fake.paragraph())
expected = f'{instance.name} [{instance.pk}]'
self.assertEqual(expected, str(instance))
class TestRaceUser(TestCase):
@classmethod
def setUpTestData(cls):
cls.model = RaceUser
def setUp(self):
self.user = baker.make(constants.USER_MODEL)
self.race = baker.make(constants.RACE_MODEL)
def test_str_ok(self):
instance = self.model.objects.create(user=self.user, race=self.race)
expected = f'{instance.user.username} <-> {instance.race.name}'
self.assertEqual(expected, str(instance))
class TestSession(TestCase):
@classmethod
def setUpTestData(cls):
cls.model = Session
cls.user = baker.make(constants.USER_MODEL)
cls.chat = baker.make(constants.CHAT_MODEL)
cls.world = baker.make(constants.PLACE_MODEL, site_type=SiteTypes.WORLD)
def test_str_ok(self):
name = fake.word()
instance = self.model.objects.create(
name=name,
chat=self.chat,
system=RoleplaySystems.PATHFINDER,
world=self.world,
)
system = RoleplaySystems(instance.system)
expected = f'{name} [{system.label.title()}]'
self.assertEqual(expected, str(instance))
def test_save_without_chat_ok(self):
instance = self.model.objects.create(
name=fake.word(),
system=RoleplaySystems.PATHFINDER,
world=self.world,
)
instance.save()
self.assertIsNotNone(instance.chat)
self.assertIn(instance.name, instance.chat.name)
def test_clean_non_world_ko(self):
place = baker.make(constants.PLACE_MODEL, site_type=SiteTypes.CITY)
session = self.model(
name=fake.word(),
description=fake.paragraph(),
next_game=datetime.now(),
system=RoleplaySystems.PATHFINDER,
world=place,
)
with self.assertRaisesRegex(ValidationError, 'world must be a world'):
session.clean()
def test_clean_no_world_given_ko(self):
session = self.model(
name=fake.word(),
description=fake.paragraph(),
next_game=datetime.now(),
system=RoleplaySystems.PATHFINDER,
)
with self.assertRaisesRegex(ValidationError, 'session hasn\'t any world'):
session.clean()
def test_add_game_masters_ok(self):
iterations = fake.pyint(min_value=1, max_value=10)
users = baker.make(_model=User, _quantity=iterations)
instace = baker.make(self.model, world=self.world)
queries = (
'Bulk Create',
)
with self.assertNumQueries(len(queries)):
instace.add_game_masters(*users)
expected = iterations
result = PlayerInSession.objects.filter(
player__in=users,
is_game_master=True
).count()
self.assertEqual(expected, result)
def test_property_game_masters_ok(self):
iterations = fake.pyint(min_value=1, max_value=10)
users = baker.make(_model=User, _quantity=iterations)
instace = baker.make(self.model, world=self.world)
instace.add_game_masters(*users)
expected = iterations
result = instace.game_masters.count()
self.assertEqual(expected, result)
def test_get_absolute_url(self):
instance = baker.make(self.model, world=self.world)
expected_url = reverse('roleplay:session:detail', kwargs={'pk': instance.pk})
url = instance.get_absolute_url()
self.assertEqual(expected_url, url)
class TestPlayerInSession(TestCase):
@classmethod
def setUpTestData(cls):
cls.model = PlayerInSession
cls.world = baker.make(Place, site_type=SiteTypes.WORLD)
cls.session = baker.make(Session, world=cls.world)
def setUp(self):
self.instance = baker.make(self.model, session=self.session)
def test_str_ok(self):
expected_str = '%(player)s in %(session)s (Game Master: %(is_game_master)s)' % {
'player': self.instance.player,
'session': self.instance.session,
'is_game_master': self.instance.is_game_master,
}
result_str = str(self.instance)
self.assertEqual(expected_str, result_str)
|
import sublime, sublime_plugin
import os.path
import sys
import struct
def GetShortPath(path):
target = ''
try:
with open(path, 'rb') as stream:
content = stream.read()
# skip first 20 bytes (HeaderSize and LinkCLSID)
# read the LinkFlags structure (4 bytes)
lflags = struct.unpack('I', content[0x14:0x18])[0]
position = 0x18
# if the HasLinkTargetIDList bit is set then skip the stored IDList
# structure and header
if (lflags & 0x01) == 1:
position = struct.unpack('H', content[0x4C:0x4E])[0] + 0x4E
last_pos = position
position += 0x04
# get how long the file information is (LinkInfoSize)
length = struct.unpack('I', content[last_pos:position])[0]
# skip 12 bytes (LinkInfoHeaderSize, LinkInfoFlags, and VolumeIDOffset)
position += 0x0C
# go to the LocalBasePath position
lbpos = struct.unpack('I', content[position:position+0x04])[0]
position = last_pos + lbpos
# read the string at the given position of the determined length
size= (length + last_pos) - position - 0x02
temp = struct.unpack('c' * size, content[position:position+size])
target = b''.join(temp).decode(sys.getfilesystemencoding())
except:
# could not read the file
pass
return target
class OpenLnkFile(sublime_plugin.EventListener):
def on_load(self, view):
window = view.window()
path = view.file_name()
ext = os.path.splitext(path)[1]
if ext==".lnk":
window.focus_view(view)
window.run_command('close_file')
real_path = GetShortPath(path)
if os.path.isdir(real_path):
data = window.project_data()
if not data:
data = {}
if "folders" not in data:
data["folders"] = []
if {'path': real_path} not in data["folders"]:
data["folders"].append({'path': real_path})
window.set_project_data(data)
else:
window.open_file(GetShortPath(path))
|
#!/usr/bin/env python3
# CGIのヘッダ
print("Content-Type : text/html; charset=utf-8")
print("")
#表示するメッセージ
print("<h1>Hello!!!</h1>") |
#!/usr/bin/env python3
from PIL import Image
import os
import re
src = "./supplier-data/images/"
dst = "./supplier-data/images/"
def main():
# read all images
fileslist = []
for root, dirs, files in os.walk(src):
for name in files:
if str(name).endswith(".tiff"):
fileslist.append(name)
# print(fileslist)
for image in fileslist:
im = Image.open(src + image)
final_name = re.sub(r".tiff$", ".jpeg", image)
final_name = dst + final_name
print(src + image + " => " + final_name)
im.resize((600,400)).convert("RGB").save(final_name, 'jpeg')
if __name__ == "__main__":
main()
|
import json
import os
import subprocess
from ..models import XcTarget, XcProject, XcGroup, XcFile
from ..parsers import XcProjectParser, SwiftCodeParser
# Absolute path of this project root folder.
root_path = __file__
for i in range(0, 4):
root_path = os.path.dirname(root_path)
# Models
class XcModelsFixture():
def any_target(self,
name='MyXcTarget',
target_type=XcTarget.Type.APPLICATION,
product_name='MyXcProduct',
resource_files=set()):
return XcTarget(name=name, target_type=target_type, product_name=product_name, resource_files=resource_files, build_configurations=list())
def any_project(self):
targets = set([self.any_target()])
return XcProject('/', 'MyXcProject', build_configurations=list(), targets=targets, groups=list(), files=set())
def any_group(self, group_path='/MyGroup', filepath='/MyGroup', groups=list(), files=set()):
return XcGroup(group_path, filepath, groups=groups, files=files)
def any_file(self):
return XcFile('/MyFile')
# Xcode sample project
class SampleXcodeProjectFixture():
@property
def project_folder_path(self):
""" Absolute path of the folder containing `.xcodeproj` of the Xcode project sample contained in this project. """
return os.path.join(root_path, 'SampleiOSApp')
# Parsers
class XcProjectParserFixture():
@property
def sample_xc_project_parser(self):
path = SampleXcodeProjectFixture().project_folder_path
project_parser = XcProjectParser(path, verbose=False)
project_parser.load()
return project_parser
class SwiftCodeParserFixture():
def any_swift_code_parser(self, swift_code, base_discriminant='', type_counter=0):
command = ['sourcekitten', 'structure', '--text', swift_code]
result = subprocess.run(command, capture_output=True)
swift_structure = json.loads(result.stdout)
root_substructures = swift_structure.get('key.substructure', []).copy()
parser = SwiftCodeParser(substructures=root_substructures, base_discriminant=base_discriminant, type_counter=type_counter)
parser.parse()
return parser
# Generators
class XcProjectGraphGeneratorFixture():
@property
def test_build_folder(self):
return os.path.join(root_path, 'build', 'test')
def any_graph_filepath(self, filename):
return os.path.join(self.test_build_folder, filename)
|
from defines import *
class Deposit:
def __init__(self, data):
self.amount = data[1]
self.user_name = data[2]
self.old_credit = data[3]
self.new_credit = data[4]
self.consumption = data[5]
self.Type = data[6]
self.date = data[7]
def get_tye_string(self):
if self.Type == PayPal:
return paypal_label
if self.Type == Bar:
return cash_label
if self.Type == Material:
return material_label
else:
return self.Type
|
# Generated by Django 3.1.7 on 2021-03-02 21:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autodo', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='car',
name='image',
),
migrations.AlterField(
model_name='car',
name='color',
field=models.CharField(max_length=7),
),
]
|
from gym_iOTA.envs.iOTA_env import IotaEnv
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, codecs
def main():
delimiter = sys.argv[1]
fileNames = sys.argv[2:]
files = []
for fileName in fileNames:
files.append(codecs.open(fileName, "r", "utf-8"))
ongoing = True
while (ongoing):
lines = []
for file in files:
line = file.readline()
if (line == ""):
ongoing = False
break
lines.append(line.rstrip())
if (ongoing):
print delimiter.join(lines)
if __name__ == "__main__":
main()
|
from django.contrib import admin
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.db import models
from django.forms import CheckboxSelectMultiple
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
def upload_path(instance, filename):
extension = filename.split('.')[-1]
return f'products/{instance.category}/{instance.slug}.{extension}'
class ProductUsageUnits(models.Model):
unit = models.ForeignKey(verbose_name=_('Unit'), to='food.Unit', limit_choices_to={'type': 'usage'}, default=None)
value = models.PositiveSmallIntegerField(verbose_name=_('Value'), default=None)
def __str__(self):
return f'{self.value} {self.unit}'
class ProductShoppingUnits(models.Model):
unit = models.ForeignKey(verbose_name=_('Unit'), to='food.Unit', limit_choices_to={'type': 'shopping'}, default=None)
value = models.PositiveSmallIntegerField(verbose_name=_('Value'), default=None)
def __str__(self):
return f'{self.value} {self.unit}'
class Product(models.Model):
TYPE_CHOICES = [
('custom', _('Custom Made')),
('brand', _('Brand Product')),
('gourmet', _('Gourmet Food')),
('restaurant', _('Restaurant'))]
FORM_CHOICES = [
('solid', _('Solid')),
('liquid', _('Liquid'))]
CATEGORY_CHOICES = [
('other', _('Other')),
('fruits', _('Fruits')),
('vegetables', _('Vegetables')),
('meat', _('Meat'))]
name = models.CharField(verbose_name=_('Name'), max_length=255, db_index=True, default=None)
slug = models.SlugField(verbose_name=_('Slug'), editable=False, db_index=True, default=None)
image = models.ImageField(verbose_name=_('Image'), upload_to=upload_path, null=True, blank=True, default=None)
type = models.CharField(verbose_name=_('Type'), choices=TYPE_CHOICES, max_length=30, db_index=True, default=None)
category = models.CharField(verbose_name=_('Category'), choices=CATEGORY_CHOICES, max_length=30, db_index=True, default=None)
tags = models.ManyToManyField(verbose_name=_('Tags'), to='food.Tag', limit_choices_to={'type': 'product'}, blank=True, default=None)
modification_date = models.DateTimeField(verbose_name=_('Modification Date'), auto_now=True)
modification_author = models.ForeignKey(verbose_name=_('Modification Author'), to='auth.User', default=None)
measurements_physical_form = models.CharField(verbose_name=_('Phisical Form'), choices=FORM_CHOICES, max_length=20, db_index=True, blank=True, null=True, default=None)
measurements_usage_unit = models.ForeignKey(verbose_name=_('Usage Unit'), to='food.Unit', related_name='usage_unit', blank=True, null=True, default=None)
measurements_shopping_unit = models.ForeignKey(verbose_name=_('Shopping Unit'), to='food.Unit', related_name='shopping_unit', blank=True, null=True, default=None)
measurements_volume = models.DecimalField(verbose_name=_('Volume'), help_text=_('ml'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
measurements_weight = models.DecimalField(verbose_name=_('Net Weight'), help_text=_('g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
calories = models.PositiveSmallIntegerField(verbose_name=_('Calories'), help_text=_('kcal'), blank=True, null=True, default=None)
roughage = models.DecimalField(verbose_name=_('Roughage'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
cooking_waste = models.DecimalField(verbose_name=_('Cooking Waste'), decimal_places=2, max_digits=3, validators=[MinValueValidator(0.0), MaxValueValidator(1.0)], blank=True, null=True, default=None)
cooking_factor = models.IntegerField(verbose_name=_('Cooking Factor'), blank=True, null=True, default=None)
cooking_product = models.ForeignKey(verbose_name=_('From Product'), to='food.Product', blank=True, null=True, default=None)
proteins = models.DecimalField(verbose_name=_('Proteins'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
proteins_animal = models.DecimalField(verbose_name=_('Animal Proteins'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
proteins_plant = models.DecimalField(verbose_name=_('Plant Proteins'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
fats = models.DecimalField(verbose_name=_('Fats'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
fats_saturated = models.DecimalField(verbose_name=_('Saturated Fats'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
fats_monounsaturated = models.DecimalField(verbose_name=_('Monounsaturated Fats'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
fats_polyunsaturated = models.DecimalField(verbose_name=_('Polyunsaturated Fats'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
cholesterol = models.DecimalField(verbose_name=_('Cholesterol'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
carbohydrates = models.DecimalField(verbose_name=_('Carbohydrates'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
carbohydrates_sugars = models.DecimalField(verbose_name=_('Sugars'), help_text=_('g/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_folic_acid = models.DecimalField(verbose_name=_('Folic Acid'), help_text=_('µg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_a = models.DecimalField(verbose_name=_('Vitamin A'), help_text=_('µg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_b1 = models.DecimalField(verbose_name=_('Vitamin B1'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_b2 = models.DecimalField(verbose_name=_('Vitamin B2'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_b6 = models.DecimalField(verbose_name=_('Vitamin B6'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_b12 = models.DecimalField(verbose_name=_('Vitamin B12'), help_text=_('µg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_c = models.DecimalField(verbose_name=_('Vitamin C'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_d = models.DecimalField(verbose_name=_('Vitamin D'), help_text=_('µg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_e = models.DecimalField(verbose_name=_('Vitamin E'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
vitamins_pp = models.DecimalField(verbose_name=_('Vitamin PP'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_zinc = models.DecimalField(verbose_name=_('Zinc'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_phosphorus = models.DecimalField(verbose_name=_('Phosphorus'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_iodine = models.DecimalField(verbose_name=_('Iodine'), help_text=_('µg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_magnesium = models.DecimalField(verbose_name=_('Magnesium'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_copper = models.DecimalField(verbose_name=_('Copper'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_potasium = models.DecimalField(verbose_name=_('Potasium'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_selenium = models.DecimalField(verbose_name=_('Selenium'), help_text=_('µg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_sodium = models.DecimalField(verbose_name=_('Sodium'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_calcium = models.DecimalField(verbose_name=_('Calcium'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
minerals_iron = models.DecimalField(verbose_name=_('Iron'), help_text=_('mg/100g'), decimal_places=2, max_digits=5, blank=True, null=True, default=None)
def __str__(self):
return f'{self.name}'
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
class Meta:
ordering = ['-name']
verbose_name = _('Product')
verbose_name_plural = _('Products')
class Admin(admin.ModelAdmin):
change_list_template = 'admin/change_list_filter_sidebar.html'
formfield_overrides = {models.ManyToManyField: {'widget': CheckboxSelectMultiple}}
ordering = ['-name']
list_display = ['name', 'type', 'category', 'modification_author', 'modification_date', 'display_tags']
list_filter = ['type', 'category', 'tags', 'modification_author', 'modification_date']
search_fields = ['name']
fieldsets = [
(_('General'), {'fields': ['name', 'type', 'category', 'tags', 'image']}),
(_('Measurements'), {'fields': ['measurements_physical_form', 'measurements_usage_unit', 'measurements_shopping_unit', 'measurements_volume', 'measurements_weight']}),
(_('Cooking'), {'fields': ['cooking_waste', 'cooking_factor', 'cooking_product']}),
(_('Nutrition'), {'fields': ['calories', 'roughage']}),
(_('Proteins'), {'fields': ['proteins', 'proteins_animal', 'proteins_plant']}),
(_('Fats'), {'fields': ['fats', 'fats_saturated', 'fats_monounsaturated', 'fats_polyunsaturated', 'cholesterol']}),
(_('Carbohydrates'), {'fields': ['carbohydrates', 'carbohydrates_sugars']}),
(_('Vitamins'), {'fields': ['vitamins_folic_acid', 'vitamins_a', 'vitamins_b1', 'vitamins_b2', 'vitamins_b6', 'vitamins_b12', 'vitamins_c', 'vitamins_d', 'vitamins_e', 'vitamins_pp']}),
(_('Minerals'), {'fields': ['minerals_zinc', 'minerals_phosphorus', 'minerals_iodine', 'minerals_magnesium', 'minerals_copper', 'minerals_potasium', 'minerals_selenium', 'minerals_calcium', 'minerals_iron']}),
]
def display_tags(self, obj):
return ", ".join([tag.name for tag in obj.tags.all()])
display_tags.short_description = _('Tags')
def save_model(self, request, obj, form, change):
obj.modification_author = User.objects.get(id=request.user.id)
super().save_model(request, obj, form, change)
|
Subsets and Splits