content
stringlengths 10
4.9M
|
---|
<gh_stars>1-10
package scheduler
import "go-learn/spider/zhenaiwang/v3/engine"
type SimpleScheduler struct {
workerChan chan engine.Request
}
func (s *SimpleScheduler) Submit(request engine.Request) {
// send request down to worker chan
go func() {
s.workerChan <- request
}()
}
// 把初始请求发送给 Scheduler
func (s *SimpleScheduler) ConfigMasterWorkerChan(in chan engine.Request) {
s.workerChan = in
}
|
<filename>src/main/java/com/github/xfslove/smssp/message/sgip12/UnBindRespMessage.java
package com.github.xfslove.smssp.message.sgip12;
import com.github.xfslove.smssp.message.Sequence;
import io.netty.buffer.ByteBuf;
/**
* @author hanwen
* created at 2018/8/28
*/
public class UnBindRespMessage implements SgipMessage {
private final SgipHead head;
public UnBindRespMessage(SequenceNumber sequenceNumber) {
this.head = new SgipHead(SgipConstants.COMMAND_ID_UNBIND_RESP, sequenceNumber);
}
public UnBindRespMessage(Sequence<SequenceNumber> sequence) {
this.head = new SgipHead(SgipConstants.COMMAND_ID_UNBIND_RESP, sequence.next());
}
@Override
public SgipHead getHead() {
return head;
}
@Override
public int getLength() {
return 0;
}
@Override
public void write(ByteBuf out) {
// nothing
}
@Override
public void read(ByteBuf in) {
// nothing
}
@Override
public String toString() {
return "UnBindRespMessage{" +
"head=" + head +
'}';
}
}
|
The former captain of the Etowah County, Alabama Rescue Squad, has been charged with criminally negligent homicide in the death of fellow rescuer Vicky Ryan in a April 25 water rescue attempt. Michael Bettis, 49, faces the Class A misdemeanor charge and was booked into jail Tuesday. Bond was set at $2,500.
More: Captain Heard Screams While in Middle of TV Interview About Ongoing Operation (via Statter911.com)
Rescuers were searching for a missing kayaker who was swept into the water. They crews deployed rescue boats, one of which got too close to a dam and was swept over followed soon thereafter by a second rescue boat. In all, nine rescuers ended up in the Big Wills Creek including Ryan, 46, Al.com reported.
Ryan died at the hospital soon after being pulled from the water. Three other rescuers were hurt. Al.com reported that Ryan’s husband was also a member of the squad. She also leaves behind a son.
The body of the original victim crews were deployed to find was recovered the next day.
An investigation by the state’s law enforcement agency indicated the death was the result of a boat with an inexperienced operator, WBRC.com reported.
More Coverage:
Video: |
// public virtual [base kpTool]
void kpToolColorPicker::releasedAllButtons ()
{
setUserMessage (haventBegunDrawUserMessage ());
} |
# python 2/3 compatibility
from __future__ import division, print_function
import sys
import os.path
import numpy
import pandas
import copy
import difflib
import scipy
import collections
import json
# package imports
import rba
from .rba import RbaModel, ConstraintMatrix, Solver
from .rba_SimulationData import RBA_SimulationData
from .rba_SimulationParameters import RBA_SimulationParameters
from .rba_ModelStructure import RBA_ModelStructure
from .rba_Problem import RBA_Problem
from .rba_Matrix import RBA_Matrix
from .rba_LP import RBA_LP
from .rba_FBA import RBA_FBA
from .rba_LogBook import RBA_LogBook
class RBA_Session(object):
"""
Top level of the RBA API.
Attributes
----------
xml_dir : str
Current Growth rate as numeric value
model : rba.RbaModel
Current Growth rate as numeric value
matrices : rba.ConstraintMatrix
Current Growth rate as numeric value
solver : rba.Solver
Current Growth rate as numeric value
Problem : rbatools.RBA_Problem
Current Growth rate as numeric value
Medium : dict
Current Growth rate as numeric value
ModelStructure : rbatools.RBA_ModelStructure
Current Growth rate as numeric value
Results : dict
Current Growth rate as numeric value
Parameters : dict
Current Growth rate as numeric value
SimulationData : rbatools.RBA_SimulationData
Current Growth rate as numeric value
SimulationParameters : rbatools.RBA_SimulationParameters
Current Growth rate as numeric value
Methods
----------
__init__(xml_dir)
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
rebuild_from_model()
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
reloadModel()
Reloads model from xml-files and then rebuild computational model-representation (matrix).
recordResults(runName)
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
recordParameters(runName)
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
clearResults()
Removes all previosly recorded results and deletes own 'Results'-attribute.
clearParameters()
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
writeResults(session_name='', digits=10)
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
returnExchangeFluxes()
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
ConstraintSaturation(constraints=None)
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
setMedium(changes)
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
setMu(Mu)
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
doSolve(runName='DontSave')
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
findMaxGrowthRate(precision=0.0005, max=4, start_value=None, recording=False)
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
knockOut(gene)
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
FeasibleRange(variables=None)
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
ParetoFront(variable_X, variable_Y, N=10, sign_VY='max')
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
def __init__(self, xml_dir):
"""
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
"""
self.xml_dir = xml_dir
self.LogBook = RBA_LogBook('Controler')
if not hasattr(self, 'ModelStructure'):
if os.path.isfile(str(self.xml_dir+'/ModelStructure.json')):
self.ModelStructure = RBA_ModelStructure()
with open(str(self.xml_dir+'/ModelStructure.json'), 'r') as myfile:
data = myfile.read()
self.ModelStructure.fromJSON(inputString=data)
else:
self.build_ModelStructure()
self.model = RbaModel.from_xml(input_dir=xml_dir)
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.LogBook.addEntry('Model loaded from {}.'.format(self.xml_dir))
self.Problem = RBA_Problem(solver=self.solver)
medium = pandas.read_csv(xml_dir+'/medium.tsv', sep='\t')
self.Medium = dict(zip(list(medium.iloc[:, 0]), [float(i)
for i in list(medium.iloc[:, 1])]))
self.Mu = self.Problem.Mu
self.ExchangeMap = buildExchangeMap(self)
def build_ModelStructure(self):
self.ModelStructure = RBA_ModelStructure()
self.ModelStructure.fromFiles(xml_dir=self.xml_dir)
self.ModelStructure.exportJSON(path=self.xml_dir)
def addExchangeReactions(self):
"""
Adds explicit exchange-reactions of boundary-metabolites to RBA-problem, named R_EX_ followed by metabolite name (without M_ prefix).
"""
Mets_external = [m.id for m in self.model.metabolism.species if m.boundary_condition]
Mets_internal = [m.id for m in self.model.metabolism.species if not m.boundary_condition]
Reactions = [r.id for r in self.model.metabolism.reactions]
full_S = rba.core.metabolism.build_S(
Mets_external+Mets_internal, self.model.metabolism.reactions)
S_M_ext = full_S[:len(Mets_external), ].toarray()
col_indices_toremove = []
for i in range(S_M_ext.shape[1]):
s_col_uniques = list(set(list(S_M_ext[:, i])))
if len(s_col_uniques) == 1:
if s_col_uniques[0] == 0:
col_indices_toremove.append(i)
RemainingReactions = [i for i in Reactions if Reactions.index(
i) not in col_indices_toremove]
S_ext = numpy.delete(S_M_ext, col_indices_toremove, axis=1)
A = numpy.concatenate((S_ext, numpy.eye(len(Mets_external))), axis=1, out=None)
ColNames = RemainingReactions+[str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]
# print(str('R_EX_'+i.split('M_')[-1]))
LBs = list([self.Problem.LP.LB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[-10000]*len(Mets_external))
UBs = list([self.Problem.LP.UB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[10000]*len(Mets_external))
b = [0]*len(Mets_external)
f = list([self.Problem.LP.f[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[0]*len(Mets_external))
ExchangeMatrix = RBA_Matrix()
ExchangeMatrix.A = scipy.sparse.coo_matrix(A)
ExchangeMatrix.b = numpy.array([0]*len(Mets_external))
ExchangeMatrix.f = numpy.array(f)
ExchangeMatrix.LB = numpy.array(LBs)
ExchangeMatrix.UB = numpy.array(UBs)
ExchangeMatrix.row_signs = ['E']*len(Mets_external)
ExchangeMatrix.row_names = Mets_external
ExchangeMatrix.col_names = ColNames
ExchangeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=ExchangeMatrix)
self.ExchangeReactionMap = dict(
zip(Mets_external, [str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]))
def rebuild_from_model(self):
"""
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
"""
self.LogBook.addEntry('Model rebuilt.')
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.Problem = RBA_Problem(solver=self.solver)
self.setMedium(changes=self.Medium)
def reloadModel(self):
"""
Reloads model from xml-files and then rebuild computational model-representation (matrix).
"""
self.LogBook.addEntry('Model reloaded from {}.'.format(self.xml_dir))
self.model = RbaModel.from_xml(input_dir=self.xml_dir)
self.rebuild_from_model()
def recordResults(self, runName):
"""
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Solution recorded under {}.'.format(runName))
if not hasattr(self, 'Results'):
self.Results = {'Reactions': pandas.DataFrame(index=list(self.ModelStructure.ReactionInfo.Elements.keys())),
'Enzymes': pandas.DataFrame(index=list(self.ModelStructure.EnzymeInfo.Elements.keys())),
'Processes': pandas.DataFrame(index=[self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery' for i in self.ModelStructure.ProcessInfo.Elements.keys()]),
'Proteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinMatrix['Proteins'])),
'ProtoProteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinGeneMatrix['ProtoProteins'])),
'Constraints': pandas.DataFrame(index=self.Problem.LP.row_names),
'SolutionType': pandas.DataFrame(index=['SolutionType']),
'Mu': pandas.DataFrame(index=['Mu']),
'ObjectiveFunction': pandas.DataFrame(index=self.Problem.LP.col_names),
'ObjectiveValue': pandas.DataFrame(index=['ObjectiveValue']),
'ExchangeFluxes': pandas.DataFrame(index=list(self.ExchangeMap.keys()))}
Exchanges = self.returnExchangeFluxes()
for i in Exchanges.keys():
self.Results['ExchangeFluxes'].loc[i, runName] = Exchanges[i]
self.Results['Reactions'][runName] = self.Results['Reactions'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Reactions'].index)})
self.Results['Enzymes'][runName] = self.Results['Enzymes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Enzymes'].index)})
self.Results['Processes'][runName] = self.Results['Processes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Processes'].index)})
self.Results['Constraints'][runName] = self.Results['Constraints'].index.map(
{i: self.Problem.DualValues[i] for i in self.Problem.LP.row_names})
self.Results['Proteins'][runName] = self.Results['Proteins'].index.map(
ProteomeRecording(self, runName))
self.Results['ProtoProteins'][runName] = self.Results['ProtoProteins'].index.map(
ProtoProteomeRecording(self, runName, self.Results['Proteins']))
self.Results['SolutionType'][runName] = self.Problem.SolutionType
self.Results['Mu'][runName] = self.Problem.Mu
self.Results['ObjectiveFunction'][runName] = list(self.Problem.getObjective().values())
self.Results['ObjectiveValue'][runName] = self.Problem.ObjectiveValue
def recordParameters(self, runName):
"""
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Coefficients recorded under {}.'.format(runName))
EnzymeCapacities = self.get_parameter_values(
parameter_type='enzyme_efficiencies', species=None, output_format='dict')
ProcessCapacities = self.get_parameter_values(
parameter_type='machine_efficiencies', species=None, output_format='dict')
CompartmentCapacities = self.get_parameter_values(
parameter_type='maximal_densities', species=None, output_format='dict')
TargetValues = self.get_parameter_values(
parameter_type='target_values', species=None, output_format='dict')
if not hasattr(self, 'Parameters'):
self.Parameters = {'EnzymeEfficiencies_FW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'EnzymeEfficiencies_BW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'ProcessEfficiencies': pandas.DataFrame(index=list(ProcessCapacities.keys())),
'CompartmentCapacities': pandas.DataFrame(index=list(CompartmentCapacities.keys())),
'Medium': pandas.DataFrame(index=self.Medium.keys()),
'TargetValues': pandas.DataFrame(index=[TargetValues[i]['Target_id'] for i in list(TargetValues.keys())])}
self.Parameters['EnzymeEfficiencies_FW'][runName] = self.Parameters['EnzymeEfficiencies_FW'].index.map({i: list(
EnzymeCapacities[i]['Forward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['EnzymeEfficiencies_BW'][runName] = self.Parameters['EnzymeEfficiencies_BW'].index.map({i: list(
EnzymeCapacities[i]['Backward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['ProcessEfficiencies'][runName] = self.Parameters['ProcessEfficiencies'].index.map(
{i: list(ProcessCapacities[i].values())[0] for i in list(ProcessCapacities.keys()) if len(list(ProcessCapacities[i].values())) > 0})
self.Parameters['CompartmentCapacities'][runName] = self.Parameters['CompartmentCapacities'].index.map(
{i: list(CompartmentCapacities[i].values())[0] for i in list(CompartmentCapacities.keys()) if len(list(CompartmentCapacities[i].values())) > 0})
self.Parameters['Medium'][runName] = self.Parameters['Medium'].index.map(self.Medium)
self.Parameters['TargetValues'][runName] = self.Parameters['TargetValues'].index.map(
{TargetValues[i]['Target_id']: list(TargetValues[i]['Target_value'].values())[0] for i in list(TargetValues.keys()) if len(list(TargetValues[i]['Target_value'].values())) > 0})
def clearResults(self):
"""
Removes all previosly recorded results and deletes own 'Results'-attribute.
"""
self.LogBook.addEntry('Results cleared.')
delattr(self, 'Results')
def clearParameters(self):
"""
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
"""
self.LogBook.addEntry('Parameters cleared.')
delattr(self, 'Parameters')
def writeResults(self, session_name='', digits=5, loggingIntermediateSteps=False):
"""
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
"""
self.LogBook.addEntry('Data written under {}.'.format(session_name))
if hasattr(self, 'Results'):
self.Results['uniqueReactions'] = mapIsoReactions(Controller=self)
self.Results['SolutionType'] = self.Results['SolutionType']
self.Results['Mu'] = self.Results['Mu'].round(digits)
self.Results['ObjectiveFunction'] = self.Results['ObjectiveFunction'].loc[(
self.Results['ObjectiveFunction'] != 0).any(axis=1)].round(digits)
self.Results['ObjectiveValue'] = self.Results['ObjectiveValue'].round(digits)
self.Results['Proteins'] = self.Results['Proteins'].round(digits)
self.Results['uniqueReactions'] = self.Results['uniqueReactions'].round(digits)
self.Results['Reactions'] = self.Results['Reactions'].round(digits)
self.Results['Enzymes'] = self.Results['Enzymes'].round(digits)
self.Results['Processes'] = self.Results['Processes'].round(digits)
self.Results['Constraints'] = self.Results['Constraints'].round(digits)
self.Results['ExchangeFluxes'] = self.Results['ExchangeFluxes'].round(digits)
self.SimulationData = RBA_SimulationData(StaticData=self.ModelStructure)
self.SimulationData.fromSimulationResults(Controller=self, session_name=session_name)
if hasattr(self, 'Parameters'):
self.Parameters['EnzymeEfficiencies_FW'] = self.Parameters['EnzymeEfficiencies_FW'].round(
digits)
self.Parameters['EnzymeEfficiencies_BW'] = self.Parameters['EnzymeEfficiencies_BW'].round(
digits)
self.Parameters['ProcessEfficiencies'] = self.Parameters['ProcessEfficiencies'].round(
digits)
self.Parameters['CompartmentCapacities'] = self.Parameters['CompartmentCapacities'].round(
digits)
self.Parameters['TargetValues'] = self.Parameters['TargetValues'].round(digits)
self.Parameters['Medium'] = self.Parameters['Medium'].loc[(
self.Parameters['Medium'] != 0).any(axis=1)].round(digits)
self.SimulationParameters = RBA_SimulationParameters(StaticData=self.ModelStructure)
self.SimulationParameters.fromSimulationResults(Controller=self)
def returnExchangeFluxes(self):
"""
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
"""
out = {}
for j in self.ExchangeMap.keys():
netflux = 0
for k in self.ExchangeMap[j].keys():
netflux += self.ExchangeMap[j][k]*self.Problem.SolutionValues[k]
if netflux != 0:
out[j] = netflux
return(out)
def ConstraintSaturation(self, constraints=None):
"""
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
"""
if constraints is None:
ConstraintsInQuestion = self.Problem.LP.row_names
else:
if isinstance(constraints, list):
ConstraintsInQuestion = constraints
elif isinstance(constraints, str):
ConstraintsInQuestion = [constraints]
if len(list(constraints)) > 0:
if isinstance(constraints[0], list):
ConstraintsInQuestion = constraints[0]
if isinstance(constraints[0], str):
ConstraintsInQuestion = [constraints[0]]
if len(list(constraints)) == 0:
ConstraintsInQuestion = self.Problem.LP.row_names
rhs = self.Problem.getRighthandSideValue(ConstraintsInQuestion)
lhs = self.Problem.calculateLefthandSideValue(ConstraintsInQuestion)
RHS = list(rhs.values())
LHS = list(lhs.values())
Out = pandas.DataFrame(columns=['LHS', 'RHS', 'Saturation'], index=ConstraintsInQuestion)
for i in ConstraintsInQuestion:
lhval = LHS[self.Problem.LP.rowIndicesMap[i]]
rhval = RHS[self.Problem.LP.rowIndicesMap[i]]
sat = numpy.nan
if rhval != 0:
sat = lhval/rhval
Out.loc[i, 'LHS'] = lhval
Out.loc[i, 'RHS'] = rhval
Out.loc[i, 'Saturation'] = sat
self.LogBook.addEntry(
'Saturation of constraint {} determined to be {}.'.format(i, sat))
return(Out)
def setMedium(self, changes, loggingIntermediateSteps=False):
"""
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
"""
for species in (changes.keys()):
self.Medium[species] = float(changes[species])
self.Problem.ClassicRBAmatrix.set_medium(self.Medium)
self.Problem.ClassicRBAmatrix.build_matrices(self.Mu)
inputMatrix = RBA_Matrix()
inputMatrix.loadMatrix(matrix=self.Problem.ClassicRBAmatrix)
self.Problem.LP.updateMatrix(matrix=inputMatrix, Ainds=MediumDependentCoefficients_A(
self), Binds=[], CTinds=[], LBinds=None, UBinds=None)
def setMu(self, Mu, loggingIntermediateSteps=False):
"""
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
"""
self.LogBook.addEntry('Growth-rate changed:{} --> {}'.format(self.Mu, float(Mu)))
self.Problem.setMu(Mu=float(Mu), ModelStructure=self.ModelStructure,
logging=loggingIntermediateSteps)
self.Mu = float(Mu)
def doSolve(self, runName='DontSave', loggingIntermediateSteps=False):
"""
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
"""
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
if runName is not 'DontSave':
if runName is 'Auto':
if hasattr(self, 'Results'):
name = str(self.Results['Reactions'].shape[1]+1)
if not hasattr(self, 'Results'):
name = '1'
if runName is not 'Auto':
name = runName
self.recordResults(runName=name)
def findMaxGrowthRate(self, precision=0.0005, max=4, start_value=None, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
"""
minMu = 0
maxMu = max
if start_value is None:
testMu = minMu
else:
testMu = start_value
iteration = 0
while (maxMu - minMu) > precision:
self.setMu(Mu=testMu)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
self.recordResults('DichotomyMu_iteration_'+str(iteration))
minMu = testMu
else:
maxMu = testMu
testMu = numpy.mean([maxMu, minMu])
self.LogBook.addEntry('Maximal growth-rate found to be: {}.'.format(minMu))
if minMu == max:
print('Warning: Maximum growth rate might exceed specified range. Try rerunning this method with larger max-argument.')
self.setMu(Mu=minMu)
self.Problem.solveLP(logging=False)
self.Problem.SolutionType = 'GrowthRate_maximization'
return(minMu)
def knockOut(self, gene, loggingIntermediateSteps=False):
"""
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
"""
if type(gene) is str:
genes = [gene]
if type(gene) is list:
genes = gene
isoform_genes = [g for g in genes if g in list(self.ModelStructure.ProteinInfo.Elements.keys(
))]+[i for g in genes for i in self.ModelStructure.ProteinInfo.Elements.keys() if self.ModelStructure.ProteinInfo.Elements[i]['ProtoID'] == g]
for g in isoform_genes:
self.LogBook.addEntry('Gene {} knocked out.'.format(g))
ConsumersEnzymes = self.ModelStructure.ProteinInfo.Elements[g]['associatedEnzymes']
for i in ConsumersEnzymes:
LikeliestVarName = difflib.get_close_matches(i, self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
ConsumersProcess = self.ModelStructure.ProteinInfo.Elements[g]['SupportsProcess']
for i in ConsumersProcess:
LikeliestVarName = difflib.get_close_matches(
str(self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery'), self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
def FeasibleRange(self, variables=None, loggingIntermediateSteps=False):
"""
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
"""
if variables is None:
VariablesInQuestion = self.Problem.LP.col_names
else:
if isinstance(variables, list):
VariablesInQuestion = variables
elif isinstance(variables, str):
VariablesInQuestion = [variables]
out = {}
for i in VariablesInQuestion:
min = numpy.nan
max = numpy.nan
self.Problem.clearObjective(logging=loggingIntermediateSteps)
self.Problem.setObjectiveCoefficients(
inputDict={i: 1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
min = self.Problem.SolutionValues[i]
self.Problem.setObjectiveCoefficients(
inputDict={i: -1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = self.Problem.SolutionValues[i]
out.update({i: {'Min': min, 'Max': max}})
self.LogBook.addEntry(
'Feasible-range of {} determined to be between {} and {}.'.format(i, min, max))
return(out)
def ParetoFront(self, variable_X, variable_Y, N=10, sign_VY='max', loggingIntermediateSteps=False):
"""
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
if variable_X not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
if variable_Y not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
FR = self.FeasibleRange(variable_X)
cMin = FR[variable_X]['Min']
cMax = FR[variable_X]['Max']
concentrations = [float(cMin+(cMax-cMin)*i/N) for i in range(N+1)]
Out = pandas.DataFrame(columns=[variable_X, variable_Y])
oldLB = self.Problem.getLB(variable_X)
oldUB = self.Problem.getUB(variable_X)
iteration = -1
for conc in concentrations:
iteration += 1
self.Problem.setLB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.clearObjective(logging=loggingIntermediateSteps)
if sign_VY == 'max':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: -1}, logging=loggingIntermediateSteps)
if sign_VY == 'min':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: 1}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = abs(self.Problem.ObjectiveValue)
else:
max = numpy.nan
self.Problem.setLB(inputDict=oldLB, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict=oldUB, logging=loggingIntermediateSteps)
Out.loc[iteration, variable_X] = conc
Out.loc[iteration, variable_Y] = max
self.LogBook.addEntry(
'Pareto-front between {} and {} determined.'.format(variable_X, variable_Y))
return(Out)
### !!! Docstring ###
def buildFBA(self, type='classic', objective='classic', maintenanceToBM=False):
"""
Derives and constructs FBA-problem from RBA-problem and stores it under attribute 'FBA'.
Parameters
----------
type : str
objective : str
maintenanceToBM : boolean
"""
RBAproblem = self.Problem.LP
A = RBAproblem.A.toarray()
if type == 'classic':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith('R_') and not i.startswith('M_') and not i.endswith('_synthesis')]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if 'enzyme' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('M_')]
elif type == 'parsi':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith(
'R_') and not i.startswith('M_') and not i.endswith('_synthesis')]+[RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('R_') and not i.startswith('M_')]
if objective == 'classic':
if 'R_maintenance_atp' in RBAproblem.col_names:
Cols2remove.append(RBAproblem.col_names.index('R_maintenance_atp'))
Anew = numpy.delete(A, Cols2remove, axis=1)
col_namesNew = list(numpy.delete(RBAproblem.col_names, Cols2remove))
LBnew = numpy.delete(RBAproblem.LB, Cols2remove)
UBnew = numpy.delete(RBAproblem.UB, Cols2remove)
fNew = numpy.delete(RBAproblem.f, Cols2remove)
Anew2 = numpy.delete(Anew, Rows2remove, axis=0)
row_namesNew = list(numpy.delete(RBAproblem.row_names, Rows2remove))
row_signsNew = list(numpy.delete(RBAproblem.row_signs, Rows2remove))
bNew = numpy.delete(RBAproblem.b, Rows2remove)
trnaInds = [i for i in range(len(row_namesNew)) if row_namesNew[i].startswith(
'M_') and 'trna' in row_namesNew[i]]
# bNew[trnaInds] = 0
if objective == 'targets':
col_namesNew.append('R_BIOMASS_targetsRBA')
LBnew = numpy.append(LBnew, 0)
UBnew = numpy.append(UBnew, 10000)
fNew = numpy.append(fNew, 0)
BMrxnCol = numpy.ones((len(row_namesNew), 1))
BMrxnCol[:, 0] = bNew
if maintenanceToBM:
MaintenanceTarget = LBnew[col_namesNew.index('R_maintenance_atp')]
BMrxnCol[row_namesNew.index('M_atp_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h2o_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_adp_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_pi_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h_c')] -= MaintenanceTarget
LBnew[col_namesNew.index('R_maintenance_atp')] = 0
Anew2 = numpy.append(Anew2, -BMrxnCol, axis=1)
bNew = numpy.array([0]*Anew2.shape[0])
Matrix1 = RBA_Matrix()
Matrix1.A = scipy.sparse.coo_matrix(Anew2)
Matrix1.b = bNew
Matrix1.LB = LBnew
Matrix1.UB = UBnew
Matrix1.row_signs = row_signsNew
Matrix1.row_names = row_namesNew
Matrix1.col_names = col_namesNew
Matrix1.f = fNew
if type == 'classic':
Matrix1.b = numpy.array([0]*len(row_signsNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
elif type == 'parsi':
MetaboliteRows = {i: Matrix1.row_names.index(
i) for i in Matrix1.row_names if i.startswith('M_')}
EnzymeCols = {i: Matrix1.col_names.index(
i) for i in Matrix1.col_names if i.startswith('R_') and '_enzyme' in i}
Matrix2 = RBA_Matrix()
Matrix2.A = scipy.sparse.coo_matrix(numpy.zeros((len(MetaboliteRows), len(EnzymeCols))))
Matrix2.b = numpy.array(Matrix1.b[list(MetaboliteRows.values())])
Matrix2.LB = numpy.array(Matrix1.LB[list(EnzymeCols.values())])
Matrix2.UB = numpy.array(Matrix1.UB[list(EnzymeCols.values())])
Matrix2.f = numpy.array(Matrix1.f[list(EnzymeCols.values())])
Matrix2.row_signs = [Matrix1.row_signs[i] for i in list(MetaboliteRows.values())]
Matrix2.row_names = list(MetaboliteRows.keys())
Matrix2.col_names = list(EnzymeCols.keys())
Matrix2.mapIndices()
Matrix1.b = numpy.array([0]*len(bNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
LP1.updateMatrix(Matrix2)
self.FBA = RBA_FBA(LP1)
def findMinMediumConcentration(self, metabolite, precision=0.00001, max=100, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the minimal feasible concentration of
growth-substrate in medium, at a previously set growth-rate.
Parameters
----------
metabolite : str
ID of metabolite in medium.
precision : float
Numberic precision with which minimum is approximated.
Default : 0.00001
max : float
Defines the highest concentration rate to be screened for.
Default=100
recording : bool
Records intermediate feasible solutions
while approaching the minimum concentration.
Default : False
Returns
-------
minimum feasible growth-substrate concentration as float.
"""
minConc = 0.0
maxConc = max
testConc = minConc
iteration = 0
oldConc = self.Medium[metabolite]
while (maxConc - minConc) > precision:
self.setMedium(changes={metabolite: testConc})
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
run_name = 'Dichotomy_'+metabolite+'_' + \
str(testConc)+'_iteration_'+str(iteration)
self.recordResults(run_name)
maxConc = testConc
else:
minConc = testConc
testConc = numpy.mean([maxConc, minConc])
self.LogBook.addEntry(
'Minimal required {} concentration found to be: {}.'.format(metabolite, maxConc))
self.setMedium(changes={metabolite: oldConc})
return(maxConc)
def addProtein(self, input):
"""
Adds representation of individual proteins to problem.
Parameters
----------
input : dict or str
If input is str it has to be the ID of a protein in the model.
Then this protein is added to the problem an creates:
One constraint named Protein_'ID' (equality).
One variable named TotalLevel_'ID' representing the total amount.
One variable named Free_'ID'_'respectiveCompartment', this
represents the fraction of the protein not assuming any function.
It however consumes resources for synthesis (precursors and processes),
which are the same as defined in the model files.
And takes up space i the compartment as specified in the model-files
for the protein.
If input is dict it has to have two keys; 'ID' and 'UnusedProteinFraction'.
By specifying this input one can define that the unused franction of the protein
can also reside in other compartments and which processes it requires.
The value to 'ID' is the ID of a protein in the model.
The value to 'UnusedProteinFraction' is another dictionary.
This can have several keys which must be model-compartments.
For each of the keys the value is a dict holding IDs of model-processes as Keys
and process requirements as Values (numerical).
This specifies which processes each of the compartment-species of the protein
requires.
This generates the same constraint and TotalLevel-variable as with the simple input,
however a variable representing each of the compartment-species for the unused fraction
is added and incorporates the specific process requirements.
E.g: input = {'ID': 'proteinA',
'UnusedProteinFraction':{'Cytoplasm':{'Translation':100}, {'Folding':10}],
'Membrane':{'Translation':100}, {'Folding':20}, {'Secretion':100}
}
}
This adds 'proteinA' to the model, where the unused fraction can reside either in
the Cytoplasm or the Membrane. However while the cytosolic-species only requires the
processes 'Translation' and 'Folding'; the membrane-bound species also requires 'Secretion'
and occupies more folding capacity.
Then the constraint 'Protein_proteinA' is added and the 3 variables
'TotalLevel_proteinA', 'Free_proteinA_Cytoplasm' and 'Free_proteinA_Membrane'.
"""
if type(input) is str:
input = {'ID': input}
if 'ID' not in list(input.keys()):
print('Error, no protein ID provided')
return
if input['ID'] not in list(self.ModelStructure.ProteinInfo.Elements.keys()):
print('Error, protein not in model')
return
if 'UnusedProteinFraction' not in list(input.keys()):
input.update({'UnusedProteinFraction':
{self.ModelStructure.ProteinInfo.Elements[input['ID']]['Compartment']:
self.ModelStructure.ProteinInfo.Elements[input['ID']]['ProcessRequirements']}})
self.LogBook.addEntry('Protein {} added with specifications {}.'.format(
input['ID'], str(json.dumps(input))))
Muindexlist = []
## Building RBA_Matrix-object for new constraint-row, representing protein ##
UsedProtein = RBA_Matrix()
UsedProtein.A = scipy.sparse.coo_matrix(
buildUsedProteinConstraint(Controler=self, protein=input['ID']))
UsedProtein.b = numpy.array([float(0)])
UsedProtein.f = numpy.array(self.Problem.LP.f)
UsedProtein.LB = numpy.array(self.Problem.LP.LB)
UsedProtein.UB = numpy.array(self.Problem.LP.UB)
UsedProtein.row_signs = ['E']
UsedProtein.row_names = ['Protein_'+input['ID']]
UsedProtein.col_names = self.Problem.LP.col_names
## Add used protein row to problem ##
self.Problem.LP.addMatrix(matrix=UsedProtein)
## Add used protein row to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UsedProtein)
## Building RBA_Matrix-object for new variable-col, representing total level of protein ##
TotProtein = RBA_Matrix()
TotProtein.A = scipy.sparse.coo_matrix(numpy.array(numpy.matrix(
numpy.array([float(0)]*self.Problem.LP.A.shape[0]+[float(-1)])).transpose()))
TotProtein.f = numpy.array([float(0)])
TotProtein.LB = numpy.array([float(0)])
TotProtein.UB = numpy.array([float(100000.0)])
TotProtein.b = numpy.array(list(self.Problem.LP.b)+list(UsedProtein.b))
TotProtein.row_signs = self.Problem.LP.row_signs+UsedProtein.row_signs
TotProtein.row_names = self.Problem.LP.row_names+UsedProtein.row_names
TotProtein.col_names = ['TotalLevel_'+input['ID']]
## Add total protein col to problem ##
self.Problem.LP.addMatrix(matrix=TotProtein)
## Add total protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=TotProtein)
## Building RBA_Matrix-object for new variable-col,##
## representing each compartment-species of the protein ##
for comp_species in list(input['UnusedProteinFraction'].keys()):
## Initiate RBA_Matrix object##
UnusedProtein = RBA_Matrix()
UnusedProtein.col_names = ['Free_'+input['ID']+'_'+comp_species]
## Extract required processes for protein and the respective demand ##
ProcIDs = list(input['UnusedProteinFraction'][comp_species].keys())
Preq = list(input['UnusedProteinFraction'][comp_species].values())
ProcessCost = dict(
zip([self.ModelStructure.ProcessInfo.Elements[k]['ID'] for k in ProcIDs], Preq))
## Get required charged trna buildingblocks and their stoichiometry in protein ##
composition = self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAcomposition']
## Extract the composition of charged trnas in terms of metabolic species ##
species = self.ModelStructure.ProcessInfo.Elements['Translation']['Components']
## Determine required metabolites and their stoichiometry in protein ##
MetaboliteCost = buildCompositionofUnusedProtein(
species=species, composition=composition)
## Assemble process and metabolite requirements into stoichiometric coloumn vector ##
## And add to RBA_Matrix object ##
colToAdd = numpy.array(numpy.matrix(numpy.array(list(MetaboliteCost.values())+list(ProcessCost.values()) +
[float(1)]+[self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAnumber']])).transpose())
UnusedProtein.A = scipy.sparse.coo_matrix(colToAdd)
## Add other information to RBA_Matrix object ##
UnusedProtein.row_names = list(MetaboliteCost.keys())+[str(pc+'_capacity') for pc in list(
ProcessCost.keys())]+['Protein_'+input['ID']]+[str(comp_species + '_density')]
UnusedProtein.b = numpy.zeros(len(UnusedProtein.row_names))
UnusedProtein.row_signs = ['E']*len(UnusedProtein.row_names)
UnusedProtein.LB = numpy.array([float(0)])
UnusedProtein.UB = numpy.array([float(100000.0)])
UnusedProtein.f = numpy.array([float(0)])
self.ProteinDilutionIndices = list(
zip(list(MetaboliteCost.keys()), UnusedProtein.col_names*len(list(MetaboliteCost.keys()))))
## Add free protein col to problem ##
self.Problem.LP.addMatrix(matrix=UnusedProtein)
## Add free protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UnusedProtein)
## Find coefficients of unused protein column, subject to dilution (Metabolite and Process cost) ##
## And add them to MuDepIndices_A ##
nonZeroEntries = numpy.where(UnusedProtein.A != 0)[0]
self.Problem.MuDepIndices_A += [(UnusedProtein.row_names[i], UnusedProtein.col_names[0]) for i in nonZeroEntries if UnusedProtein.row_names[i]
!= 'Protein_'+input['ID'] and UnusedProtein.row_names[i] not in self.Problem.CompartmentDensities]
self.setMu(self.Problem.Mu)
## !!! ##
def eukaryoticDensities(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
Signs = ['L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L']
totalAA = 3.1*0.71
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*len(Compartments)+['E']
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
AlipidsA = numpy.zeros((7, len(Compartments)))
Alipids = RBA_Matrix()
Alipids.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs += ['E', 'E', 'E', 'E', 'E', 'E', 'E']
Alipids.b = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Alipids.LB = numpy.array([float(0)]*len(Compartments))
Alipids.UB = numpy.array([float(1)]*len(Compartments))
Alipids.f = numpy.array([float(0)]*len(Compartments))
AlipidsA[Alipids.row_names.index('M_pc_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000883*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00005852*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mIM')] = -0.00003377*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00000873*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'),
Alipids.col_names.index('F_mIM')] = -0.00002*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000039*totalAA
AlipidsA[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = -0.008547*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'),
('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM')]
AlipidsA[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = -0.000636*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0004822*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mOM')] = -0.0001289*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000167*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'), Alipids.col_names.index(
'F_mOM')] = -0.00004467*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000696*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c',
'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')]
Alipids.A = scipy.sparse.coo_matrix(AlipidsA)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), (
'M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), (
'M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
## !!! ##
def eukaryoticDensities2(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.69
m_mIM = 1.11
m_mIMS = 0.7
m_mOM = 7.2
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments)+1)
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
ConstraintMatrix = numpy.zeros((7, 0))
Alipids = RBA_Matrix()
Alipids.col_names = []
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([])
Alipids.UB = numpy.array([])
Alipids.f = numpy.array([])
MudepIndices = []
for pc in self.ModelStructure.ProcessInfo.Elements.keys():
if self.ModelStructure.ProcessInfo.Elements[pc]['ID'] not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
# Alipids.LB = numpy.array(list(Alipids.LB).append(list(self.Problem.LP.LB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.UB = numpy.array(list(Alipids.UB).append(list(self.Problem.LP.UB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.f = numpy.array(list(Alipids.f).append(list(self.Problem.LP.f)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
for p in self.ModelStructure.ProcessInfo.Elements[pc]['Composition'].keys():
lE = sum(list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values(
)))*self.ModelStructure.ProcessInfo.Elements[pc]['Composition'][p]
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ergst_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
ConstraintMatrix = ConstraintMatrixNew
for e in self.ModelStructure.EnzymeInfo.Elements.keys():
if e not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(e)
# xnew = list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)]
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)])])
# Alipids.LB = numpy.array(list(Alipids.LB).append(xnew))
# Alipids.UB = numpy.array(list(Alipids.UB).append(
# list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)]))
# Alipids.f = numpy.array(list(Alipids.f).append(
# list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)]))
for p in self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'].keys():
lE = sum(
list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values()))
lE *= self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'][p]['StochFac']
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
MudepIndices += ('M_ergst_c', e)
ConstraintMatrix = ConstraintMatrixNew
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(ConstraintMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.LP.updateMatrix(MuOneMatrix, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities3(self, totalAA=3.1, VolumeFraction=False, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
# A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
# A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
OccupationMatrix = RBA_Matrix()
# A = numpy.ones((len(Compartments)+1, len(Compartments)))
A = -numpy.eye(len(Compartments))
# Eye = -numpy.eye(len(Compartments))
# A[0:len(Compartments), :] = Eye
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
# OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1)+[float(totalAA)])
OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1))
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
# OccupationMatrix.row_signs = ['E']*(len(Compartments))+['L']
OccupationMatrix.row_signs = ['E']*(len(Compartments))
# OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
# 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalProtein']
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
CompartmentMatrix = RBA_Matrix()
if VolumeFraction:
A = numpy.eye(len(Compartments))*5/float(totalAA)
else:
A = numpy.eye(len(Compartments))/float(totalAA)
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments))
# CompartmentMatrix.row_signs = ['E']*(len(Compartments))
CompartmentMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume',
'm_volume', 'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume']
CompartmentMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
CompartmentMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
VolumeMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
# A[len(Compartments), [1, 5, 6, 8, 9]] = 0
# A[len(Compartments), 8] = 0
VolumeMatrix.A = scipy.sparse.coo_matrix(A)
VolumeMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
VolumeMatrix.f = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.LB = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.UB = numpy.array([float(1)]*len(Compartments))
VolumeMatrix.row_signs = ['L']*(len(Compartments))+['E']
# VolumeMatrix.row_signs = ['E']*(len(Compartments))+['E']
VolumeMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume', 'm_volume',
'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume', 'TotalVolume']
VolumeMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
if not CompartmentRelationships:
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
VolumeMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
VolumeMatrix.row_signs += ['E', 'E', 'E']
VolumeMatrix.b = numpy.array(list(VolumeMatrix.b)+[float(0)]*3)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_mOM')] = -m_mOM
VolumeMatrix.A = scipy.sparse.coo_matrix(Anew)
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
PC_vM = 0.0003635
PE_vM = 0.4156
PI_vM = 0.0001297
PS_vM = 0.00003435
CL_vM = 0.0000068
PA_vM = 0.0000186
ES_vM = 0.0142
PC_n = 0.000055
PE_n = 0.000035
PI_n = 0.000017
PS_n = 0.0000072
CL_n = 0.0
PA_n = 0.0000031
ES_n = 0.0086
PC_gM = 0.00043
PE_gM = 0.00044
PI_gM = 0.00041
PS_gM = 0.0
CL_gM = 0.00022
PA_gM = 0.0
ES_gM = 0.0
PC_n = 0.0
PE_n = 0.0
PI_n = 0.0
PS_n = 0.0
CL_n = 0.0
PA_n = 0.0
ES_n = 0.0
PC_gM = 0.0
PE_gM = 0.0
PI_gM = 0.0
PS_gM = 0.0
CL_gM = 0.0
PA_gM = 0.0
ES_gM = 0.0
PC_vM = 0.0
PE_vM = 0.0
PI_vM = 0.0
PS_vM = 0.0
CL_vM = 0.0
PA_vM = 0.0
ES_vM = 0.0
PC_mIM = 0.0
PE_mIM = 0.0
PI_mIM = 0.0
PS_mIM = 0.0
CL_mIM = 0.0
PA_mIM = 0.0
ES_mIM = 0.0
PC_mOM = 0.0
PE_mOM = 0.0
PI_mOM = 0.0
PS_mOM = 0.0
CL_mOM = 0.0
PA_mOM = 0.0
ES_mOM = 0.0
Alipids = RBA_Matrix()
Alipids.col_names = ['F_mIM', 'F_mOM', 'F_vM', 'F_n', 'F_gM']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([0, 0, 0, 0, 0])
Alipids.UB = numpy.array([1, 1, 1, 1, 1])
Alipids.f = numpy.array([0, 0, 0, 0, 0])
LipidMatrix = numpy.zeros((7, 5))
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mIM')] = PC_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mIM')] = PE_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mIM')] = PI_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mIM')] = PS_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mIM')] = CL_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mIM')] = PA_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = ES_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = PC_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mOM')] = PE_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mOM')] = PI_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mOM')] = PS_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mOM')] = CL_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mOM')] = PA_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mOM')] = ES_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_vM')] = PC_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_vM')] = PE_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_vM')] = PI_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_vM')] = PS_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_vM')] = CL_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_vM')] = PA_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_vM')] = ES_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_n')] = PC_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_n')] = PE_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_n')] = PI_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_n')] = PS_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_n')] = CL_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_n')] = PA_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_n')] = ES_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_gM')] = PC_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_gM')] = PE_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_gM')] = PI_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_gM')] = PS_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_gM')] = CL_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_gM')] = PA_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_gM')] = ES_gM/totalAA
MudepIndices = [('M_pc_SC_c', i) for i in Alipids.col_names]+[('M_pe_SC_c', i) for i in Alipids.col_names]+[('M_ptd1ino_SC_c', i) for i in Alipids.col_names]+[('M_ps_SC_c', i)
for i in Alipids.col_names]+[('M_clpn_SC_m', i) for i in Alipids.col_names]+[('M_pa_SC_c', i) for i in Alipids.col_names]+[('M_ergst_c', i) for i in Alipids.col_names]
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(LipidMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities4(self, CompartmentRelationships=True):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
m_mIM = 0.5
m_mIMS = 1
m_mOM = 5
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': 'AAres_PG_nucleus_DNA'})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'O_total': {'Equation': 'amino_acid_concentration_total - AAres_PG_secreted_Euk', 'Variables': ['amino_acid_concentration_total', 'AAres_PG_secreted_Euk']}})
self.Problem.MuDependencies['FromMatrix']['b'].remove('n_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('vM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIMS_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('m_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('erM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mOM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('x_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('cM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('gM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('c_density')
## !!! ##
def eukaryoticDensities_calibration(self, CompartmentRelationships=False, mitoProportions={}, amino_acid_concentration_total='amino_acid_concentration_total'):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA_parameter = amino_acid_concentration_total
totalAA = 3.1
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
if len(list(mitoProportions.keys())) == 3:
m_mIM = mitoProportions['m_mIM']
m_mIMS = mitoProportions['m_mIMS']
m_mOM = mitoProportions['m_mOM']
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': {'Equation': '-nonenzymatic_proteins_n/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_n', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIM_density': {
'Equation': '-nonenzymatic_proteins_mIM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'vM_density': {
'Equation': '-nonenzymatic_proteins_vM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_vM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIMS_density': {
'Equation': '-nonenzymatic_proteins_mIMS/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIMS', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'m_density': {'Equation': '-nonenzymatic_proteins_m/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_m', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'erM_density': {
'Equation': '-nonenzymatic_proteins_erM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_erM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mOM_density': {
'Equation': '-nonenzymatic_proteins_mOM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mOM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'x_density': {'Equation': '-nonenzymatic_proteins_x/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_x', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'cM_density': {
'Equation': '-nonenzymatic_proteins_cM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_cM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'gM_density': {
'Equation': '-nonenzymatic_proteins_gM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_gM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'c_density': {'Equation': '-nonenzymatic_proteins_c/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_c', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'O_total': {'Equation': '{} - nonenzymatic_proteins_Secreted/inverse_average_protein_length'.format(totalAA_parameter), 'Variables': [
totalAA_parameter, 'nonenzymatic_proteins_Secreted', 'inverse_average_protein_length']}})
# !!! deal with hardcoded parameter_names... !!!
def estimate_specific_Kapps(self, proteomicsData, flux_bounds, mu, biomass_function=None, target_biomass_function=True, parsimonious_fba=True):
"""
Parameters
----------
proteomicsData : pandas.DataFrame (in mmol/gDW)
flux_bounds : pandas.DataFrame (in mmol/(gDW*h))
mu : float (in 1/h)
biomass_function : str
target_biomass_function : bool
atp_maintenance_to_biomassfunction : bool
eukaryotic : bool
"""
from scipy.stats.mstats import gmean
old_model = copy.deepcopy(self.model)
for i in self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements:
if i.species == 'average_protein_c':
new_agg = rba.xml.parameters.Aggregate(id_='total_protein', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='amino_acid_concentration_total'))
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='inverse_average_protein_length'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_protein'
else:
self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements.remove(
i)
for i in self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements:
if i.species == 'mrna':
new_agg = rba.xml.parameters.Aggregate(id_='total_rna', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='RNA_massfraction_CarbonLimitation'))
new_agg.function_references.append(
rba.xml.parameters.FunctionReference(function='RNA_inversemillimolarweight'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_rna'
else:
self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements.remove(
i)
self.rebuild_from_model()
self.setMedium(self.Medium)
self.addExchangeReactions()
self.setMu(mu)
if target_biomass_function:
self.buildFBA(objective='targets', maintenanceToBM=True)
BMfunction = 'R_BIOMASS_targetsRBA'
else:
self.buildFBA(objective='classic', maintenanceToBM=False)
BMfunction = biomass_function
for j in [i for i in self.Medium.keys() if self.Medium[i] == 0]:
Exrxn = 'R_EX_'+j.split('M_')[-1]+'_e'
self.FBA.setUB({Exrxn: 0})
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.clearObjective()
self.FBA.setObjectiveCoefficients({BMfunction: -1})
self.FBA.solveLP()
BMfluxOld = self.FBA.SolutionValues[BMfunction]
if parsimonious_fba:
self.FBA.parsimonise()
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.setLB({BMfunction: BMfluxOld})
self.FBA.setUB({BMfunction: BMfluxOld})
self.FBA.solveLP()
FluxDistribution = pandas.DataFrame(index=list(
self.FBA.SolutionValues.keys()), columns=['FluxValues'])
FluxDistribution['FluxValues'] = list(self.FBA.SolutionValues.values())
BMfluxNew = self.FBA.SolutionValues[BMfunction]
ProtoIDmap = {}
for i in self.ModelStructure.ProteinInfo.Elements.keys():
ProtoID = self.ModelStructure.ProteinInfo.Elements[i]['ProtoID']
if ProtoID in list(proteomicsData['ID']):
if not pandas.isna(proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]):
if proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0] != 0:
if ProtoID in ProtoIDmap.keys():
ProtoIDmap[ProtoID]['ModelProteins'].append(i)
else:
ProtoIDmap.update(
{ProtoID: {'ModelProteins': [i], 'CopyNumber': proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]}})
ReactionMap = {}
for i in self.ModelStructure.ReactionInfo.Elements.keys():
if '_duplicate_' in i:
continue
else:
if i in list(FluxDistribution.index):
if FluxDistribution.loc[i, 'FluxValues'] != 0:
ReactionMap.update({i: {'ModelReactions': list(
[i]+self.ModelStructure.ReactionInfo.Elements[i]['Twins']), 'Flux': FluxDistribution.loc[i, 'FluxValues']}})
IsoReaction2ProtoReaction = {}
for i in ReactionMap.keys():
for j in ReactionMap[i]['ModelReactions']:
IsoReaction2ProtoReaction[j] = i
EnzymeMap = {}
for i in self.ModelStructure.EnzymeInfo.Elements.keys():
if self.ModelStructure.EnzymeInfo.Elements[i]['Reaction'] in IsoReaction2ProtoReaction:
CompositionDict = {self.ModelStructure.ProteinInfo.Elements[j]['ProtoID']: self.ModelStructure.EnzymeInfo.Elements[
i]['Subunits'][j] for j in self.ModelStructure.EnzymeInfo.Elements[i]['Subunits'].keys()}
ProtoReaction = IsoReaction2ProtoReaction[self.ModelStructure.EnzymeInfo.Elements[i]['Reaction']]
CopyNumbers = []
Stoichiometries = []
EnzymeNumbers = []
for j in CompositionDict.keys():
if j in ProtoIDmap.keys():
CopyNumbers.append(ProtoIDmap[j]['CopyNumber'])
Stoichiometries.append(CompositionDict[j])
EnzymeNumbers.append(ProtoIDmap[j]['CopyNumber']/CompositionDict[j])
GM_enzymenumber = 0
if len(EnzymeNumbers) > 0:
GM_enzymenumber = gmean(numpy.array(EnzymeNumbers))
EnzymeMap.update(
{i: {'ProtoReaction': ProtoReaction, 'EnzymeNumber': GM_enzymenumber}})
EnzymeMap2 = {}
for i in ReactionMap.keys():
totalIsoEnzymeNumber = 0
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
totalIsoEnzymeNumber += EnzymeMap[respectiveEnzyme]['EnzymeNumber']
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
concentration = EnzymeMap[respectiveEnzyme]['EnzymeNumber']
if concentration != 0:
if numpy.isfinite(concentration):
specificFlux = ReactionMap[i]['Flux'] * \
EnzymeMap[respectiveEnzyme]['EnzymeNumber']/totalIsoEnzymeNumber
EnzymeMap2.update({respectiveEnzyme: {'CopyNumber': EnzymeMap[respectiveEnzyme]['EnzymeNumber'],
'Concentration': concentration, 'Flux': specificFlux, 'Kapp': abs(specificFlux/concentration)}})
self.model = old_model
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
for i in EnzymeMap2.keys():
# if EnzymeMap2[i]['CopyNumber'] == 0:
# continue
out.loc[i, 'Enzyme_ID'] = i
out.loc[i, 'CopyNumber'] = EnzymeMap2[i]['CopyNumber']
out.loc[i, 'Concentration'] = EnzymeMap2[i]['Concentration']
out.loc[i, 'Flux'] = EnzymeMap2[i]['Flux']
out.loc[i, 'Kapp'] = EnzymeMap2[i]['Kapp']
return(out)
def estimate_default_Kapps(self, target_mu, compartment_densities_and_PGs=None, flux_bounds=None, plateau_limit=4, mu_approximation_precision=0.005, transporter_to_lumen_coefficient=10, default_kapp_LB=0, default_kapp_UB=1000000, start_val=200000, densities_to_fix=None, eukaryotic=False):
"""
Parameters
----------
target_mu : float
compartment_densities_and_PGs : pandas.DataFrame
flux_bounds : pandas.DataFrame
"""
orig_enz = self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value
out = pandas.DataFrame()
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
self.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
self.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
kapp_LB = default_kapp_LB
if default_kapp_UB is not None:
kapp_UB = default_kapp_UB
else:
kapp_UB = orig_enz*1000
# new_kapp = (kapp_UB+kapp_LB)/2
if start_val is not None:
new_kapp = start_val
else:
new_kapp = orig_enz
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus = []
Mus_Error = []
Kapps = []
last_Mu = numpy.nan
plateau_count = 0
if abs(target_mu - Mu_pred) > mu_approximation_precision:
while abs(target_mu - Mu_pred) > mu_approximation_precision:
if plateau_count >= plateau_limit:
break
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = new_kapp
self.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = transporter_to_lumen_coefficient*new_kapp
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus_Error.append(abs(target_mu - Mu_pred))
Mus.append(Mu_pred)
Kapps.append(new_kapp)
if Mu_pred > target_mu:
new_kapp_prelim = kapp_LB+(0.5*abs(kapp_LB-new_kapp))
kapp_UB = new_kapp
elif Mu_pred < target_mu:
new_kapp_prelim = kapp_UB-(0.5*abs(new_kapp-kapp_UB))
kapp_LB = new_kapp
new_kapp = new_kapp_prelim
if len(Mus) > 2:
if Mus[-2] == Mu_pred:
plateau_count += 1
else:
plateau_count = 0
else:
Mus.append(Mu_pred)
Mus_Error.append(abs(target_mu - Mu_pred))
Kapps.append(
self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value)
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
out['Mu'] = Mus
out['delta_Mu'] = Mus_Error
out['default_efficiency'] = Kapps
out['default_transporter_efficiency'] = [transporter_to_lumen_coefficient*i for i in Kapps]
return(out)
def inject_default_kapps(self, default_kapp, default_transporter_kapp):
if numpy.isfinite(default_kapp):
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapp
if numpy.isfinite(default_transporter_kapp):
self.model.parameters.functions._elements_by_id[
'default_transporter_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_transporter_kapp
self.rebuild_from_model()
def inject_process_capacities(self, process_efficiencies):
"""
Parameters
----------
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
for i in process_efficiencies.index:
if numpy.isfinite(process_efficiencies.loc[i, 'Value']):
if process_efficiencies.loc[i, 'Process'] in self.model.processes.processes._elements_by_id.keys():
if not pandas.isna(process_efficiencies.loc[i, 'Value']):
self.model.processes.processes._elements_by_id[process_efficiencies.loc[i,
'Process']].machinery.capacity.value = process_efficiencies.loc[i, 'Parameter']
const = rba.xml.parameters.Function(process_efficiencies.loc[i, 'Parameter'], 'constant', parameters={
'CONSTANT': process_efficiencies.loc[i, 'Value']}, variable=None)
if process_efficiencies.loc[i, 'Parameter'] not in self.model.parameters.functions._elements_by_id.keys():
self.model.parameters.functions.append(const)
else:
self.model.parameters.functions._elements_by_id[const.id].parameters._elements_by_id[
'CONSTANT'].value = process_efficiencies.loc[i, 'Value']
self.rebuild_from_model()
def inject_specific_kapps(self, specific_kapps, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame
"""
parameterized = []
if 'Enzyme_ID' in list(specific_kapps.columns):
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if numpy.isfinite(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = self.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in self.model.parameters.functions._elements_by_id.keys():
self.model.parameters.functions.append(const)
else:
# self.model.parameters.functions._elements_by_id[const.id] = const
self.model.parameters.functions._elements_by_id[
const.id].parameters._elements_by_id['CONSTANT'].value = val
count = 0
# self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapp
for e in self.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
self.rebuild_from_model()
def get_parameter_definition(self, parameter):
if parameter in self.model.parameters.functions._elements_by_id.keys():
function = self.model.parameters.functions._elements_by_id[parameter]
expression = parse_function(function)
elif parameter in self.model.parameters.aggregates._elements_by_id.keys():
function_id_list = get_function_list_of_aggregate(
aggregate=self.model.parameters.aggregates._elements_by_id[parameter])
expression = parse_aggregate(aggregate=self.model.parameters.aggregates._elements_by_id[parameter], function_list=[
self.model.parameters.functions._elements_by_id[f_id] for f_id in function_id_list])
else:
return({})
return(expression)
def get_parameter_value(self, parameter):
if parameter in self.model.parameters.functions._elements_by_id.keys():
function = self.model.parameters.functions._elements_by_id[parameter]
expression = parse_function_with_parameter_values(function)
elif parameter in self.model.parameters.aggregates._elements_by_id.keys():
function_id_list = get_function_list_of_aggregate(
aggregate=self.model.parameters.aggregates._elements_by_id[parameter])
expression = parse_aggregate_with_parameter_values(aggregate=self.model.parameters.aggregates._elements_by_id[parameter], function_list=[
self.model.parameters.functions._elements_by_id[f_id] for f_id in function_id_list])
else:
return({parameter: numpy.nan})
variable_values = {}
for v in expression[parameter]['Variables']:
if v == 'growth_rate':
variable_values[v] = self.Mu
elif v in self.Medium.keys():
variable_values[v] = self.Medium[v]
elif v.endswith('_e'):
if v[:-2] in self.Medium.keys():
variable_values[v] = self.Medium[v[:-2]]
else:
variable_values = {}
return({parameter: numpy.nan})
result = evaluate_expression(expression_dictionary=expression,
variable_values=variable_values)
return(result)
def get_parameter_values(self, parameter_type, species=None, output_format='dict'):
if parameter_type == 'medium_composition':
if species is None:
results = self.Medium
elif type(species) is str:
results = {species: self.Medium[species]}
elif type(species) is list:
results = {sp: self.Medium[sp] for sp in species}
elif parameter_type == 'machine_efficiencies':
if species is None:
parameter_names = {process_name: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[
process_name]['ID']].machinery.capacity.value for process_name in self.ModelStructure.ProcessInfo.Elements.keys()}
elif type(species) is str:
parameter_names = {
species: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[species]['ID']].machinery.capacity.value}
elif type(species) is list:
parameter_names = {
sp: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[sp]['ID']].machinery.capacity.value for sp in species}
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]) for pn in parameter_names}
elif parameter_type == 'enzyme_efficiencies' or parameter_type == 'enzyme_efficiencies_forward' or parameter_type == 'enzyme_efficiencies_backward':
if species is None:
parameter_names = {enzyme_name: {'Forward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].forward_efficiency, 'Backward': self.model.enzymes.enzymes._elements_by_id[
enzyme_name].backward_efficiency} for enzyme_name in self.ModelStructure.EnzymeInfo.Elements.keys()}
elif type(species) is str:
parameter_names = {species: {'Forward': self.model.enzymes.enzymes._elements_by_id[
species].forward_efficiency, 'Backward': self.model.enzymes.enzymes._elements_by_id[species].backward_efficiency}}
elif type(species) is list:
parameter_names = {enzyme_name: {'Forward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].forward_efficiency,
'Backward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].backward_efficiency} for enzyme_name in species}
if parameter_type == 'enzyme_efficiencies':
results = {pn: {'Forward': self.get_parameter_value(parameter=parameter_names[pn]['Forward']), 'Backward': self.get_parameter_value(
parameter=parameter_names[pn]['Backward'])} for pn in parameter_names.keys()}
elif parameter_type == 'enzyme_efficiencies_forward':
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]['Forward']) for pn in parameter_names.keys()}
elif parameter_type == 'enzyme_efficiencies_backward':
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]['Backward']) for pn in parameter_names.keys()}
elif parameter_type == 'maximal_densities':
density_dict = {i.compartment: self.get_parameter_value(
parameter=i.upper_bound) for i in self.model.density.target_densities}
if species is None:
results = density_dict
elif type(species) is str:
results = {species: density_dict[species]
for sp in [species] if sp in density_dict.keys()}
elif type(species) is list:
results = {sp: density_dict[sp] for sp in species if sp in density_dict.keys()}
elif parameter_type == 'target_values':
target_dict = {self.ModelStructure.TargetInfo.Elements[target_ID]['TargetEntity']: {'Target_id': target_ID, 'Target_value': self.get_parameter_value(
parameter=self.ModelStructure.TargetInfo.Elements[target_ID]['TargetValue'])} for target_ID in self.ModelStructure.TargetInfo.Elements.keys()}
if species is None:
results = target_dict
elif type(species) is str:
results = {species: target_dict[species]
for sp in [species] if sp in target_dict.keys()}
elif type(species) is list:
results = {sp: target_dict[sp] for sp in species if sp in target_dict.keys()}
if output_format == 'dict':
return(results)
if output_format == 'json':
return(json.dumps(results))
def get_parameter_value_from_model(function, parameter_ID):
return(function.parameters._elements_by_id[parameter_ID].value)
def make_paramter_function_specific(function_ID, parameter, return_normal=False):
if return_normal:
return(str(parameter))
else:
return(str('{}__parameter__{}'.format(function_ID, parameter)))
def parse_function(function):
independent_variable = function.variable
function_ID = function.id
if function.type == 'constant':
eq = make_paramter_function_specific(
function_ID=function_ID, parameter='CONSTANT', return_normal=True)
latex_string = str(make_paramter_function_specific(
function_ID=function_ID, parameter='CONSTANT', return_normal=True))
function_parameter_values = {'CONSTANT': get_parameter_value_from_model(
function=function, parameter_ID='CONSTANT')}
elif function.type == 'exponential':
eq = 'e**({}*{})'.format(make_paramter_function_specific(function_ID=function_ID,
parameter='RATE', return_normal=True), str(independent_variable))
latex_string = str('e^{'+str(make_paramter_function_specific(function_ID=function_ID,
parameter='RATE', return_normal=True)) + ' '+str(independent_variable)+'}')
function_parameter_values = {'RATE': get_parameter_value_from_model(
function=function, parameter_ID='RATE')}
elif function.type == 'linear':
eq = str('{}+{}*{}'.format(make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_CONSTANT', return_normal=True),
make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_COEF', return_normal=True), str(independent_variable)))
latex_string = str(make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_CONSTANT', return_normal=True) +
make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_COEF', return_normal=True)+' '+str(independent_variable))
function_parameter_values = {'LINEAR_CONSTANT': get_parameter_value_from_model(function=function, parameter_ID='LINEAR_CONSTANT'),
'LINEAR_COEF': get_parameter_value_from_model(function=function, parameter_ID='LINEAR_COEF'),
'X_MIN': get_parameter_value_from_model(function=function, parameter_ID='X_MIN'),
'X_MAX': get_parameter_value_from_model(function=function, parameter_ID='X_MAX'),
'Y_MIN': get_parameter_value_from_model(function=function, parameter_ID='Y_MIN'),
'Y_MAX': get_parameter_value_from_model(function=function, parameter_ID='Y_MAX'), }
elif function.type == 'michaelisMenten':
eq = str('{}*{}/({}+{})'.format(make_paramter_function_specific(function_ID=function_ID, parameter='kmax', return_normal=True),
str(independent_variable), str(independent_variable), make_paramter_function_specific(function_ID=function_ID, parameter='Km', return_normal=True)))
function_parameter_values = {'kmax': get_parameter_value_from_model(function=function, parameter_ID='kmax'),
'Km': get_parameter_value_from_model(function=function, parameter_ID='Km'),
'Y_MIN': get_parameter_value_from_model(function=function, parameter_ID='Y_MIN')}
return({function_ID: {'Type': function.type, 'Equation': eq, 'Variables': [str(independent_variable)], 'Function_parameters': function_parameter_values}})
def parse_function_with_parameter_values(function):
independent_variable = function.variable
function_ID = function.id
if function.type == 'constant':
return({function_ID: {'Equation': '{}'.format(str(get_parameter_value_from_model(function=function, parameter_ID='CONSTANT'))), 'Variables': []}})
elif function.type == 'exponential':
return({function_ID: {'Equation': '{}**({}*{})'.format(str(numpy.e), str(get_parameter_value_from_model(function=function, parameter_ID='RATE')), str(independent_variable)), 'Variables': [str(independent_variable)]}})
elif function.type == 'linear':
return({function_ID: {'Equation': str('{}+{}*{}'.format(str(get_parameter_value_from_model(function=function, parameter_ID='LINEAR_CONSTANT')), str(get_parameter_value_from_model(function=function, parameter_ID='LINEAR_COEF')), str(independent_variable))), 'Variables': [str(independent_variable)]}})
elif function.type == 'michaelisMenten':
return({function_ID: {'Equation': str('{}*{}/({}+{})'.format(str(get_parameter_value_from_model(function=function, parameter_ID='kmax')), str(independent_variable), str(get_parameter_value_from_model(function=function, parameter_ID='Km')), str(independent_variable))), 'Variables': [str(independent_variable)]}})
def get_parameter_of_function(function, parameter):
return(function.parameters._elements_by_id[parameter])
def join_functions_multiplicatively(parsed_function_list):
term_list = []
variable_list = []
for function in parsed_function_list:
function_ID = list(function.keys())[0]
term_list.append(str('('+function[function_ID]['Equation']+')'))
variable_list += function[function_ID]['Variables']
return({'Type': 'Aggregate', 'Equation': '*'.join(term_list), 'Variables': list(set(variable_list))})
def get_function_list_of_aggregate(aggregate):
return([agg.function for agg in aggregate.function_references._elements])
def parse_aggregate_with_parameter_values(aggregate, function_list):
aggregate_ID = aggregate.id
if aggregate.type == 'multiplication':
parsed_function_list = [parse_function_with_parameter_values(
function) for function in function_list]
return({aggregate_ID: join_functions_multiplicatively(parsed_function_list=parsed_function_list)})
else:
return({aggregate_ID: {'Equation': '', 'Variables': []}})
def parse_aggregate(aggregate, function_list):
aggregate_ID = aggregate.id
if aggregate.type == 'multiplication':
parsed_function_list = [parse_function(function) for function in function_list]
result = {aggregate_ID: join_functions_multiplicatively(
parsed_function_list=parsed_function_list)}
result[aggregate_ID]['Multiplicative Terms'] = [f.id for f in function_list]
return(result)
else:
return({aggregate_ID: {'Type': 'Aggregate', 'Equation': '', 'Variables': [], 'Multiplicative Terms': []}})
# def transform_to_latex(equation):
#
def MediumDependentCoefficients_A(Controler):
out = {}
MedDepRxns = [list(i.keys()) for i in list(Controler.ExchangeMap.values())]
MedDepRxnsFlatted = list(set([item for sublist in MedDepRxns for item in sublist]))
for i in Controler.ModelStructure.EnzymeConstraintsInfo.Elements.keys():
if Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction'] in MedDepRxnsFlatted:
nonConst = False
for j in Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['CapacityParameter']:
if list(j.values())[0]['FunctionType'] != 'constant':
nonConst = True
if nonConst:
if Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction'] in list(out.keys()):
out[Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]
['AssociatedReaction']].append(i)
else:
out.update(
{Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction']: [i]})
return([(out[i][0], Controler.ModelStructure.ReactionInfo.Elements[i]['Enzyme'])for i in out.keys()])
def QualitativeMediumChange(Controller, changes, species):
QualitativeMediumChange = False
if float(Controller.Medium[species]) == float(0):
if float(changes[species]) != float(0):
boundValue = 1000.0
QualitativeMediumChange = True
else:
return([QualitativeMediumChange])
if float(Controller.Medium[species]) != float(0):
if float(changes[species]) == float(0):
boundValue = 0.0
QualitativeMediumChange = True
else:
return([QualitativeMediumChange])
return([QualitativeMediumChange, float(boundValue)])
def ProtoProteomeRecording(Controller, run, Proteinlevels):
out = {}
for i in list(Controller.ModelStructure.ProteinGeneMatrix['ProtoProteins']):
row_ind = list(Controller.ModelStructure.ProteinGeneMatrix['ProtoProteins']).index(i)
nonZero = list(numpy.nonzero(
Controller.ModelStructure.ProteinGeneMatrix['Matrix'][row_ind, :])[0])
level = 0
for j in nonZero:
id = Controller.ModelStructure.ProteinGeneMatrix['Proteins'][j]
level += Proteinlevels.loc[id, run]
out.update({i: level})
return(out)
def ProteomeRecording(Controller, run):
EnzDF = pandas.DataFrame(index=Controller.Problem.Enzymes)
PrcDF = pandas.DataFrame(index=Controller.Problem.Processes)
EnzDF[run] = [Controller.Problem.SolutionValues[i]for i in Controller.Problem.Enzymes]
PrcDF[run] = [Controller.Problem.SolutionValues[i]for i in Controller.Problem.Processes]
ProteinProteinMatrix = numpy.array(
Controller.ModelStructure.ProteinMatrix['Matrix']).astype(numpy.float64)
C = Controller.ModelStructure.ProteinMatrix['Consumers']
Consumers = []
for i in C:
if i.startswith('P_'):
# Consumers.append(str(i+'_machinery'))
Consumers.append(str(i))
if not i.startswith('P_'):
Consumers.append(i)
Proteins = Controller.ModelStructure.ProteinMatrix['Proteins']
DF = pandas.concat([EnzDF, PrcDF], axis=0)
ProteinLevels = pandas.DataFrame(index=Proteins)
vector = numpy.nan_to_num(DF[run].reindex(Consumers))
Level = ProteinProteinMatrix.dot(vector)
ProteinLevels[run] = Level
addedProts = [col for col in Controller.Problem.LP.col_names if col.startswith('TotalLevel_')]
if len(addedProts) > 0:
for p in addedProts:
protID = p.split('TotalLevel_')[1]
ProteinLevels[run].loc[protID] = Controller.Problem.SolutionValues[p]
return(dict(zip(list(ProteinLevels.index), list(ProteinLevels[run]))))
def mapIsoReactions(Controller):
if hasattr(Controller, 'Results'):
out = pandas.DataFrame()
for run in list(Controller.Results['Reactions'].columns):
rf = dict(zip(list(Controller.Results['Reactions'].index), list(
Controller.Results['Reactions'][run])))
rf = {k: v for k, v in rf.items() if v != 0.}
rf_merged = collections.defaultdict(float)
for reac_id, flux_val in rf.items():
if "duplicate" in reac_id:
last_idx = reac_id.index('duplicate') - 1
rf_merged[reac_id[:last_idx]] += flux_val
else:
rf_merged[reac_id] += flux_val
if len(list(out)) == 0:
out[run] = list(rf_merged.values())
out.index = list(rf_merged.keys())
else:
runDF = pandas.DataFrame(list(rf_merged.values()),
index=list(rf_merged.keys()), columns=[run])
runDF = runDF.reindex(list(set(list(out.index)).union(
set(list(rf_merged.keys())))), fill_value=0)
out = out.reindex(list(set(list(out.index)).union(
set(list(rf_merged.keys())))), fill_value=0)
out = out.join(runDF, how='outer')
return(out)
def buildExchangeMap(Controller):
"""
Returns a map of all metabolites, the corresponding transport-reactions and stoichiometires;
exchanged with the medium.
{Metabolite1 : {ExchangeReaction1 : stoch-coefficient1 , ExchangeReaction2 : stoch-coefficient2},
{Metabolite2 : {ExchangeReaction1 : stoch-coefficient1 , ExchangeReaction2 : stoch-coefficient2}}
Metabolite1 - ... MetaboliteN : All metabolite-species in the medium (see medium.tsv file)
ExchangeReaction1 - ... ExchangeReactionN : All metabolic reactions, which exchange the respective metabolite with the medium.
stoch-coefficient : Stochiometric coefficient with which the respective metabolite is exchanged by the corresponding reaction.
(Negative when reaction transports metabolite out of the cell; and positive when inside the cell.)
Parameters
----------
Controller : rbatools.NewControler.RBA_newControler
Returns
-------
Dict.
"""
BoundaryMetabolites = [i for i in list(Controller.ModelStructure.MetaboliteInfo.Elements.keys(
)) if Controller.ModelStructure.MetaboliteInfo.Elements[i]['boundary']]
ExchangeMap = {}
for bM in BoundaryMetabolites:
for rxn in Controller.ModelStructure.MetaboliteInfo.Elements[bM]['ReactionsInvolvedWith']:
Reactants = list(
Controller.ModelStructure.ReactionInfo.Elements[rxn]['Reactants'].keys())
Products = list(Controller.ModelStructure.ReactionInfo.Elements[rxn]['Products'].keys())
if len(list(set(list(Reactants+Products)))) > 1:
for met in list(set(list(Reactants+Products))):
# if met != bM:
if met == bM:
MediumSpecies = findExchangeMetInMedium(met, Controller.Medium)
if met in Reactants:
stochCoeff = - \
Controller.ModelStructure.ReactionInfo.Elements[rxn]['Reactants'][met]
elif met in Products:
stochCoeff = Controller.ModelStructure.ReactionInfo.Elements[rxn]['Products'][met]
if MediumSpecies in list(ExchangeMap.keys()):
ExchangeMap[MediumSpecies].update({rxn: stochCoeff})
else:
ExchangeMap[MediumSpecies] = {rxn: stochCoeff}
return(ExchangeMap)
def findExchangeMetInMedium(metabolite, Medium):
"""
Returns the most likely species in the Medium, for any Metabolic species.
Parameters
----------
metabolite : str
Medium : dict
-------
Most likely ID as str
"""
if metabolite.endswith('_e'):
out = difflib.get_close_matches('_e'.join(metabolite.split('_e')[: -1]), Medium, 1)
else:
out = difflib.get_close_matches(metabolite, Medium, 1)
if len(out) > 0:
return(out[0])
else:
return('')
def buildCompositionofUnusedProtein(species, composition):
products = {}
reactants = {}
for j in composition.keys():
stoch = composition[j]
if len(list(species[j]['Products'].keys())) > 0:
for k in species[j]['Products'].keys():
if k in products:
products[k] += species[j]['Products'][k]*stoch
else:
products.update({k: species[j]['Products'][k]*stoch})
if len(list(species[j]['Reactants'].keys())) > 0:
for k in species[j]['Reactants'].keys():
if k in reactants:
reactants[k] += species[j]['Reactants'][k]*stoch
else:
reactants.update({k: species[j]['Reactants'][k]*stoch})
uniqueMets = list(set(list(products.keys())+list(reactants.keys())))
NetMets = {}
for j in uniqueMets:
if j in list(products.keys()):
produced = products[j]
elif j not in list(products.keys()):
produced = 0
if j in list(reactants.keys()):
consumed = reactants[j]
elif j not in list(reactants.keys()):
consumed = 0
NetStoch = produced-consumed
NetMets.update({j: float(NetStoch)})
return(NetMets)
def buildUsedProteinConstraint(Controler, protein):
ProteinConstraint = numpy.zeros(Controler.Problem.LP.A.shape[1])
ConsumersEnzymes = Controler.ModelStructure.ProteinInfo.Elements[protein]['associatedEnzymes']
ConsumersProcess = Controler.ModelStructure.ProteinInfo.Elements[protein]['SupportsProcess']
for j in ConsumersEnzymes:
StochFactor = Controler.ModelStructure.EnzymeInfo.Elements[j]['Subunits'][protein]['StochFac']
LikeliestVarName = difflib.get_close_matches(j, Controler.Problem.LP.col_names, 1)[0]
ColIndex = Controler.Problem.LP.colIndicesMap[LikeliestVarName]
ProteinConstraint[ColIndex] = float(StochFactor)
for j in ConsumersProcess:
LikeliestVarName = difflib.get_close_matches(str(
Controler.ModelStructure.ProcessInfo.Elements[j]['ID']+'_machinery'), Controler.Problem.LP.col_names, 1)[0]
StochFactor = Controler.ModelStructure.ProcessInfo.Elements[j]['Composition'][protein]
ColIndex = Controler.Problem.LP.colIndicesMap[LikeliestVarName]
ProteinConstraint[ColIndex] = float(StochFactor)
return(ProteinConstraint)
def determineCoefficient(x, changes, species):
multiplicativeFactors = []
for k in x:
result = 1
type = list(k.values())[0]['FunctionType']
pars = list(k.values())[0]['FunctionParameters']
if type == 'constant':
result = numpy.float64(pars['C'])
if type == 'exponential':
L = 1
if 'Lambda' in list(pars.keys()):
L = numpy.float64(pars['Lambda'])
result = numpy.exp(float(changes[species])*L)
if type == 'indicator':
maxi = numpy.inf
mini = -numpy.inf
if 'xMax' in list(pars.keys()):
maxi = numpy.float64(pars['xMax'])
if 'xMin' in list(pars.keys()):
mini = numpy.float64(pars['xMin'])
result = (float(changes[species]) > mini) and (float(changes[species]) < maxi)
if type == 'linear':
X_maxi = numpy.inf
X_mini = -numpy.inf
Y_maxi = numpy.inf
Y_mini = -numpy.inf
A = 1
C = 0
if 'A' in list(pars.keys()):
A = numpy.float64(pars['A'])
if 'C' in list(pars.keys()):
C = numpy.float64(pars['C'])
if 'xMin' in list(pars.keys()):
X_mini = numpy.float64(pars['xMin'])
if 'xMax' in list(pars.keys()):
X_maxi = numpy.float64(pars['xMax'])
if 'yMin' in list(pars.keys()):
Y_mini = numpy.float64(pars['yMin'])
if 'yMax' in list(pars.keys()):
Y_maxi = numpy.float64(pars['yMax'])
X = float(changes[species])
if float(changes[species]) < X_mini:
X = X_mini
if float(changes[species]) > X_maxi:
X = X_maxi
Y = A*X + C
result = Y
if Y < Y_mini:
result = Y_mini
if Y > Y_maxi:
result = Y_maxi
if type == 'michaelisMenten':
Y_mini = -numpy.inf
KM = 0
VM = 1
if 'Km' in list(pars.keys()):
KM = numpy.float64(pars['Km'])
if 'Vmax' in list(pars.keys()):
VM = numpy.float64(pars['Vmax'])
if 'yMin' in list(pars.keys()):
Y_mini = numpy.float64(pars['yMin'])
Y = VM*float(changes[species])/(float(changes[species])+KM)
result = Y
if Y < Y_mini:
result = Y_mini
if type == 'competitiveInhibition':
Y_mini = -numpy.inf
KM = 0
VM = 1
KI = 0
I = 0
if 'Ki' in list(pars.keys()):
KI = numpy.float64(pars['Ki'])
if 'I' in list(pars.keys()):
I = numpy.float64(pars['I'])
if 'Km' in list(pars.keys()):
KM = numpy.float64(pars['Km'])
if 'Vmax' in list(pars.keys()):
VM = numpy.float64(pars['Vmax'])
if 'yMin' in list(pars.keys()):
Y_mini = numpy.float64(pars['yMin'])
Y = VM*float(changes[species])/(float(changes[species])+KM*(1+I/KI))
result = Y
if Y < Y_mini:
result = Y_mini
if type == 'inverse':
C = 1
if 'C' in list(pars.keys()):
C = numpy.float64(pars['C'])
result = 1
if float(changes[species]) is not 0:
result = C/float(changes[i])
multiplicativeFactors.append(result)
value = numpy.prod(numpy.array(multiplicativeFactors))
return(float(value))
|
/**
* Reads the response out to String
*/
public String readResponse(HttpResponse response) throws ParseException, IOException {
String responseEntity = EntityUtils.toString(response.getEntity());
LOG.debug("readResponse entity : {}", responseEntity);
return responseEntity;
} |
#include <iostream>
#include <algorithm>
#include <functional>
using namespace std;
int main() {
int n, a[100], s = 0, ms = 0, c;
cin >> n;
for (c = 0; c < n; ++c) {
cin >> a[c];
s += a[c];
}
sort(a, a + n, greater<int>());
for (c = 0; ms <= s; ++c) {
ms += a[c];
s -= a[c];
}
cout << c;
}
|
Share
Shadow of Evil casts up to four players as classic film noir archtypes — the Magician, the Femme Fatale, the Cop, and the Boxer — who are pulled into strange and terrible circumstances by a mysterious figure known as the Shadow Man. Jeff Goldblum (Jurassic Park, The Fly), Heather Graham (The Hangover I-III, Boogie Nights), Neil McDonough (Captain America: The First Avenger, Band of Brothers), Ron Perlman (Hellboy, Pacific Rim), and Robert Picardo (Star Trek: Voyager, The Wonder Years) lend their voices and likenesses to the characters.
A cooperative survival zombie mode has been a regular feature in Call of Duty games since Nazi Zombies was added as an Easter egg in Call of Duty: World at War. That initial mode was a relatively simple exercise in surviving for as many waves as possible, but subsequent iterations have grown increasingly complex in terms of gameplay and narrative.
“The development team has packed Shadows of Evil with more gameplay and fun than any other experience before it,” explains Treyarch and Activision’s press release, “and has an appropriately twisted and deep narrative, performed by fantastic acting talent, that will keep our fans spinning.”
Just because it’s gone back into the past, don’t expect merely prosaic tommy guns. The trailer offers glimpses of a few gadgets, including a rocket-propelled shield, and also shows off a number of paranormal abilities that the players will be able to access from some sort of magical gumball machine. One of these turns the player temporarily into a ravaging tentacle monster. And those will come in handy because there are more than just zombies to contend with this time around. You’ll also be dealing with monstrous, Lovecraftian creatures.
Call of Duty: Black Ops III, including Shadows of Evil, is scheduled to launch worldwide on November 6, 2015 for PlayStation 3, PlayStation 4, Xbox 360, Xbox One, and PC. Pre-ordering on PS4 will earn you entry into the multiplayer beta, which launches on August 19, according to a tweet from the developer. |
Attenuating Catastrophic Forgetting by Joint Contrastive and Incremental Learning
In class incremental learning, discriminative models are trained to classify images while adapting to new instances and classes incrementally. Training a model to adapt to new classes without total access to previous class data, however, leads to the known problem of catastrophic forgetting of the previously learnt classes. To alleviate this problem, we show how we can build upon recent progress on contrastive learning methods. In particular, we develop an incremental learning approach for deep neural networks operating both at classification and representation level which alleviates forgetting and learns more general features for data classification. Experiments performed on several datasets demonstrate the superiority of the proposed method with respect to well known state-of-the-art methods. |
// GetNotifications reads all notifications for a given user from the database.
func (store *Store) UnseenNotificationsCount(userID globalid.ID) (int, error) {
count := 0
err := store.Get(&count, `SELECT COUNT(*) FROM notifications WHERE user_id=$1 AND deleted_at IS NULL AND seen_at IS NULL`, userID)
if err != nil {
return 0, errors.Wrap(err, "UnseenNotificationsCount failed")
}
return count, nil
} |
def between_segments(cls, start, finish):
for segment in cls.between_dates(start.start,
finish.start,
start.num_segments):
yield segment |
def optimalRescaling(trajs):
N = trajs.shape[0]
FF = np.tensordot(trajs, trajs, (1, 1))
NFF = FF - N*np.eye(N)*FF
w = scipy.linalg.inv(NFF) @ np.ones(N)
return w / np.sum(w) |
def main():
n, m = map(int, input().split())
ab = [list(map(int, input().split())) for _ in [0]*m]
q = int(input())
vdc = [list(map(int, input().split())) for _ in [0]*q]
g = [[] for _ in [0]*n]
[g[a-1].append(b-1) for a, b in ab]
[g[b-1].append(a-1) for a, b in ab]
dist = [[0]*11 for _ in [0]*n]
dist2 = [0]*n
for v, d, c in vdc[::-1]:
v -= 1
if dist[v][d] != 0:
continue
dist[v][d] = c
if dist2[v] == 0:
dist2[v] = c
if d > 0:
q = [(v, d, c)]
while q:
v, d, c = q.pop()
for i in g[v]:
if dist[i][d-1] == 0:
dist[i][d-1] = c
if dist2[i] == 0:
dist2[i] = c
if d > 1:
q.append((i, d-1, c))
for i in dist2:
print(i)
main()
|
Mar 13 2016 by Tessa Green
Many college students use Rate My Professor as a tool to create the perfect schedule with the best professors. It is a site where students anonymously rate their professors from their college; however, many students do not realize that this site might be discrediting many well qualified female and minority professors. On March 4th, Inside Higher ED reported a new study showing that students tend to give higher ratings to white male professors.
This new study was published in the journal PLOS ONE. It stated that in their anonymous reviews on the site students would use words such as "genius" or "brilliant" to describe their white male professors more than their female and minority professors. So, why is this the case? Well, it turns out that those described as "genius" and "brilliant" were in fields, such as physics, that are unfortunately lacking in female and minority professors. These findings could also lead to a deadly cycle. According to Inside Higher Ed, in the education system teacher evaluations are highly valued when it comes to hiring and promoting. If white male professors are getting higher reviews then their female and minority coworkers, then this shows why there is a lack of diversity within certain fields.
So, does this mean that there is a bias against female and minority professors on sites such as Rate My Professor? Well there is definitely a pretty large gender and race gap on the site. Students will eventually lean away from this harmful stereotype, but until then it might be smart to use caution and reason before trusting the reviews on these kind of websites.
Photo Credits: Jan Ingar Grindheim Flickr.com
Want to write for Fresh U? Join now
Want more Fresh U? Like us on Facebook! |
<filename>app/services/student.py<gh_stars>1-10
from app.models.answers import Answers
from app.models.student_test import StudentTest, student_test_schema
from app import db
from flask import request, jsonify
from ..models.student import Student, student_schema, students_schema
# Create a student
def post_student():
try:
name = request.json['name']
cpf = request.json['cpf']
course = request.json['course']
email = request.json['email']
phone = request.json['phone']
# business rule
students = students_schema.dump(Student.query.all())
if len(students) >= 100:
return jsonify({'message': 'student limit exceeded', 'data': students}), 200
# Filter student by cpf
student = student_by_cpf(cpf)
if student:
return jsonify({'message': 'student already exists', 'data': {}}), 200
new_student = Student(name, cpf, course, email, phone)
db.session.add(new_student)
db.session.commit()
result = student_schema.dump(new_student)
return jsonify({'message': 'seccessfully registered', 'data': result}), 201
except:
return jsonify({'message': 'server error', 'data': {}}), 500
# Filter student by cpf
def student_by_cpf(cpf):
try:
return Student.query.filter(Student.cpf == cpf).one()
except:
return None
# ---------------------------------------
# Get all students
def get_students():
students = students_schema.dump(Student.query.all())
if students:
return jsonify({'message': 'successfully fetched', 'data': students}), 200
return jsonify({'message': 'data not found', 'data': {}}), 404
# ---------------------------------------
# Get a single student
def get_student(student_id):
student = student_schema.dump(Student.query.get(student_id))
if student:
return jsonify({'message': 'successfully fetched', 'data': student}), 200
return jsonify({'message': "student not found", 'data': {}}), 404
# ---------------------------------------
# Update a student
def update_student(student_id):
try:
name = request.json['name']
cpf = request.json['cpf']
course = request.json['course']
email = request.json['email']
phone = request.json['phone']
student = Student.query.get(student_id)
if not student:
return jsonify({'message': "student not found", 'data': {}}), 404
student.name = name
student.cpf = cpf
student.course = course
student.email = email
student.phone = phone
db.session.commit()
result = student_schema.dump(student)
return jsonify({'message': 'successfully updated', 'data': result}), 201
except:
return jsonify({'message': 'server error', 'data': {}}), 500
# ---------------------------------------
# Delete a student
def delete_student(student_id):
try:
student = Student.query.get(student_id)
if not student:
return jsonify({'message': "student not found", 'data': {}}), 404
# Delete all tests for this student
delete_student_tests(student_id)
db.session.delete(student)
db.session.commit()
result = student_schema.dump(student)
return jsonify({'message': 'successfully deleted', 'data': result}), 200
except:
return jsonify({'message': 'server error', 'data': {}}), 500
# Delete all tests for this student
def delete_student_tests(student_id):
try:
student_tests = StudentTest.query.filter(StudentTest.student_id == student_id).all()
for test in student_tests:
data_tests = student_test_schema.dump(test)
answers = Answers.query.filter(Answers.answers_id == data_tests['student_test_id']).all()
for answer in answers:
db.session.delete(answer)
db.session.delete(test)
db.session.commit()
return {}
except:
return None
|
The equivalence of the transient behaviour formulae for the single server queue
It is indeed obvious to expect that the different results obtained for some problem are equal, but it needs to established. For the M/M/1/N queue, using a simple algebraic approach we will prove that the results obtained by Takâcs and Sharma and Gupta are equal. Furthermore, a direct proof to the equivalence between all formulae of the M/M/l/∞ queue is established. At the end of this paper, we will show that the time-dependent state probabilities for M/M/l/N queue can be written in series form; its coefficients satisfy simple recurrence relations which would allow for the rapid and efficient evaluation of the state probabilities. Moreover, a brief comparison of our technique, Sharma and Gupta's formula and Takâcs result is also given, for the CPU time computing the state probabilities. |
async def async_notify_update(self, update_type):
LOGGER.debug("Hub {update_type.name} updated")
if update_type == aiopulse.UpdateType.rollers:
await update_devices(self.hass, self.config_entry, self.api.rollers)
self.hass.config_entries.async_update_entry(
self.config_entry, title=self.title
)
async_dispatcher_send(
self.hass, ACMEDA_HUB_UPDATE.format(self.config_entry.entry_id)
)
for unique_id in list(self.current_rollers):
if unique_id not in self.api.rollers:
LOGGER.debug("Notifying remove of %s", unique_id)
self.current_rollers.pop(unique_id)
async_dispatcher_send(
self.hass, ACMEDA_ENTITY_REMOVE.format(unique_id)
) |
After years of inquiry, $40m in expenses and an unprecedented clash with the Central Intelligence Agency, the Senate intelligence committee voted on Thursday to declassify portions of a study into the agency's use of torture on detainees suspected of being involved in terrorism.
The landmark 11-3 vote now places the Obama administration back at the center of an inherited controversy that it has sought for over five years to escape.
That controversy has immediate implications for the military tribunals of the 9/11 defendants at Guantánamo Bay, several of whom were subjected to the abuse.
Committee chairwoman Dianne Feinstein of California, a public champion of the investigation, called its findings "shocking" and the CIA's behavior "in stark contrast to our values as a nation".
"This nation admits its errors, as painful as they may be," Feinstein said in a short statement following the vote, which took place in a secret session.
The committee voted to authorize release of the executive summary, findings, conclusions and dissenting views of a report that accuses the CIA of conducting an abusive regimen of interrogations, extralegal detentions and so-called "renditions" of suspected terrorists to partner countries, and then misleading the Bush administration and Congress about its utility.
But the CIA considers the Senate study misleading and factually inaccurate – and it has recently been in a public fight with its overseers featuring accusations of criminal misconduct and even constitutional usurpation.
And while the White House recommitted itself on Thursday to a prompt public release of portions of the report, it announced that the CIA will play the lead role in reviewing for publication a report that calls Langley lawless and deceitful.
“The CIA, in consultation with other agencies, will conduct the declassification review,” National Security Council spokeswoman Caitlin Hayden said.
“The president has been clear that he wants this process completed as expeditiously as possible, consistent with national security, and that’s what we will do.”
Feinstein pointedly noted after the vote that how much of what the administration makes public “in itself will indicate where we are on this.” She hoped for a partial declassification within 30 days, and said she would seek a fuller declassification of the 6,200-page document later – a move that could give the committee leverage over the scope of how much information the administration seeks to withhold from the executive summary release.
The committee study covered 100 detainees who were either in CIA custody or sent by the US to other countries for interrogation, a far larger figure than previously known.
Alberto Mora, the former Navy general counsel who fought with the Bush administration over torture, said he considered the Senate vote to be a vindication.
“I think we’re going to have an authoritative, factual accounting of what actually happened with interrogations,” Mora said.
The Obama administration, after a Justice Department inquiry that ended in 2012, declined to prosecute anyone at the CIA for involvement in waterboarding, which gives those subjected to it the feeling that they are drowning, and other brutal interrogation methods. That move caused severe disillusionment among human rights groups.
But the Senate committee's action has rekindled at least some advocates' hopes for greater transparency about one of the darker episodes of the post-9/11 era, as well as an official recognition of torture's ineffectiveness.
And while the Senate study may not reopen the question of legal accountability for the CIA, it has direct implications for the military tribunal at Guantánamo Bay for the accused 9/11 conspirators.
On Wednesday, defense lawyers for one of those men, Ammar al-Baluchi, said they filed a now-sealed motion for the Senate torture report to be entered as evidence in his case.
On 14 April, the tribunal will consider whether Baluchi’s co-defendant, Ramzi bin al-Shibh, is competent to stand trial. Bin al-Shibh was subjected to the so-called “enhanced interrogation techniques” that are detailed in the Senate report.
“Whether Mr Baluchi was tortured goes to the heart of the defense and prosecution cases. It’s relevant to what evidence can be admitted against him, it’s relevant to how he’s treated before the trial and it’s relevant to what sentence he should receive, if any,” said James Connell, one of the Defense Department attorneys assigned to Baluchi.
Two of the perceived swing voters on the Senate committee – independent Angus King and Republican Susan Collins, both of Maine – said that the techniques used on al-Baluchi and others “constituted torture”, a rebuke to a decade-long effort by the CIA and its allies to recast the interrogations as something less brutal.
“Torture is wrong, and we must make sure that the misconduct and the grave errors made in the CIA’s detention and interrogation program never happen again,” the senators said in a Wednesday statement announcing they would vote for partial declassification, a move that apparently cleared the path to Thursday’s outcome.
But in comments echoed by several committee members, particularly Republicans, King and Collins faulted the people who produced the report for eschewing interviews with CIA officials and associated personnel and excluding GOP committee staff.
The committee's senior Republican, Saxby Chambliss, said he thought the inquiry was "a waste of time" and redundant with a years-old Senate armed services committee investigation, although that inquiry did not examine the CIA.
But Chambliss said he voted for the partial release.
"We need to get this behind us," Chambliss said.
The vote might not have happened this month had Feinstein not taken to the Senate floor in March to accuse the CIA of attempting to subvert the torture investigation by withholding critical information and even intruding on a Senate computer network set up by the agency to facilitate document sharing.
“I have grave concerns that the CIA’s search may well have violated the separation of powers principles embodied in the United States constitution, including the speech and debate clause,” Feinstein said on 11 March.
The CIA maintains that it did not improperly access that network, although neither party is releasing the terms of the agreement between the agency and the committee about network access. It has counter-charged that committee staff may have committed a crime by removing a classified document from a secured facility, an act that Feinstein defended in her speech.
The agency’s inspector general is reviewing the overlapping accusations, as is the Senate sergeant-at-arms. While the CIA referred the document removal incident to the Justice Department for a possible criminal inquiry, the department has not commented regarding its plans.
Human rights advocates praised the committee’s vote for partial declassification as a milestone in a years-long debate over torture.
“This vote is a really important step forward in the fight to build a durable consensus against torture in the United States. It’s going to definitively show the pro-torture narratives out there were false,” said Raha Wala of Human Rights First.
CIA spokesman Dean Boyd issued a statement about the vote for partial declassification:
If the Senate Select Committee on Intelligence (SSCI) submits portions of the Rendition, Detention and Interrogation (RDI) report to the CIA for classification review, the CIA will carry out the review expeditiously. While we have not yet been provided a final version of the report, our review of the 2012 version found several areas in which CIA and SSCI agreed, and several other areas in which we disagreed. The CIA has acknowledged and learned from the RDI program’s shortcomings and has taken corrective measures to prevent such mistakes from happening again. At the same time, we owe it to the men and women directed to carry out this program to try and ensure that any historical account of it is accurate.
Steve Kleinman, a air force reserve colonel and former interrogator, said he hoped one outcome of the pending declassification would be to eliminate the popular perception that interrogations must involve torture to be effective.
“When anyone tortures, that’s not interrogation, that’s torture. We can’t conflate the two,” Kleinman said.
Mora, the former navy general counsel, said the partial release of the report did not remove the legal impediments to prosecuting the architects and practitioners of torture.
But, Mora said, it would help the United States “return to the principle in the future that anyone who commits this crime, regardless of rank, will be called to account and perhaps criminally prosecuted”. |
More than planned: Implementation intention effects in non-planned situations
Forming implementation intentions (i.e., if-then planning) is a powerful self-regulation strategy that enhances goal attainment by facilitating the automatic initiation of goal-directed responses upon encountering critical situations. Yet, little is known about the consequences of forming implementation intentions for goal attainment in situations that were not specified in the if-then plan. In three experiments, we assessed goal attainment in terms of speed and accuracy in an object classification task, focusing on situations that were similar or dissimilar to critical situations and required planned or different responses. The results of Experiments 1 and 3 provide evidence for a facilitation of planned responses in critical and in sufficiently similar situations, enhancing goal attainment when the planned response was required and impairing it otherwise. In Experiment 3, additional unfavorable effects however emerged in situations that were dissimilar to the critical one but required the planned response as well. We discuss theoretical implications as well as potential benefits and pitfalls emerging from these non-planned effects of forming implementation intentions. |
JODHPUR: It was at about 11.30 am when a MiG-27 upgrade aircraft on a routine mission sortie crashed in the residential area of Mahavir Nagar, Kudi Housing Board. While the pilot, Flying Officer Kandpal, ejected safely, seconds before the aircraft crashed, there was no loss to the human lives at the spot.Two houses and a vehicle parked nearby suffered major damages. The aircraft crashed barely 50 metres from the spot where the pilot himself had a narrow brush with death while ejecting out over an electric transformer in the local Bhaironath temple.The aircraft crashed into a vacant plot after hitting the wall of a house. While the aircraft could be seen completely damaged, everybody in the house escaped unhurt. The site of crash is hardly eight km from the Air Force base in Jodhpur where the aircraft took off.ADM I Arun Kumar Hasija confirmed that there were no civilian casualties while the area was cordoned off immediately. Local SHO, Basni, Rajesh Yadav said that in addition to the fire tender of the IAF, even the local tenders rushed to the spot and that the fire was brought under control, easily and within minutes.Meanwhile, the Defence PRO Lt. Col. Manish Ojha said that the aircraft which crashed had got airborne from Jodhpur Air Base and was on a routine training sortie. He said that a Court of Inquiry has been ordered to investigate into the reasons of the accident. District officials who reached the spot said that scant fuel in the aircraft helped in controlling the fire easily.It was in January 2015 that a MiG-27 fighter jet on a routine sortie from the Jodhpur air base to Uttarlai crashed in Rajasthans Barmer while a fraction of it fell on a passing motorcyclist. The injured young man Loon Singh was on his way to a village to distribute invitations for his wedding. The pilot even in this case had ejected out safely. |
def checkWin(self, playerIdentity):
for r in range(3):
if (self.gameBoard[r][0] == playerIdentity and self.gameBoard[r][1]
== playerIdentity and self.gameBoard[r][2] == playerIdentity):
return True
for c in range(3):
if (self.gameBoard[0][c] == playerIdentity and self.gameBoard[1][c]
== playerIdentity and self.gameBoard[2][c] == playerIdentity):
return True
if (self.gameBoard[0][0] == playerIdentity and self.gameBoard[1][1] ==
playerIdentity and self.gameBoard[2][2] == playerIdentity):
return True
elif (self.gameBoard[0][2] == playerIdentity and self.gameBoard[1][1] ==
playerIdentity and self.gameBoard[2][0] == playerIdentity):
return True
return False |
// ToQueryString generates a query string to be provided as part of the URL in a sync command
func (c *Command) ToQueryString() string {
commandStringAsJSON, _ := json.Marshal(c.Commands)
commandString := string(commandStringAsJSON)
commandStringAsQueryString := url.QueryEscape(commandString)
return fmt.Sprintf("token=%s&commands=%s", c.Token, commandStringAsQueryString)
} |
// HandleAsyncWebhookRequest will take an HTTP request and return the
// WebhookRequest object or an error. This is used as a part of the async flow
// for the TrueLayer api.
//
// params
// - req - the http request to handle
//
// returns
// - the webhook request
// - error if an error occurs
func (t *TrueLayer) HandleAsyncWebhookRequest(req *http.Request) (*WebhookRequest, error) {
if req.Body == nil {
return nil, ErrRequestBodyNil
}
return nil, nil
} |
/**
* update the player Typ after load a Game from a savegame
*/
public void updatePlayerTyp() {
switch (this.playerBlack.getPlayerType()) {
case KI_LEVEL1:
this.playerBlack = new KI(this.playerBlack.getColor(), Level.LEVEL1);
break;
case KI_LEVEL2:
this.playerBlack = new KI(this.playerBlack.getColor(), Level.LEVEL2);
break;
case KI_LEVEL3:
this.playerBlack = new KI(this.playerBlack.getColor(), Level.LEVEL3);
break;
}
switch (this.playerWhite.getPlayerType()) {
case KI_LEVEL1:
this.playerWhite = new KI(this.playerWhite.getColor(), Level.LEVEL1);
break;
case KI_LEVEL2:
this.playerWhite = new KI(this.playerWhite.getColor(), Level.LEVEL2);
break;
case KI_LEVEL3:
this.playerWhite = new KI(this.playerWhite.getColor(), Level.LEVEL3);
break;
}
} |
import express from "express";
import {AuthController} from "../controllers/auth/auth.controller";
import {UserController} from "../controllers/dashboard/user.controller";
// Define routes /api/v1
const wrapper = () => {
const router = express.Router();
const controller = new AuthController();
const userController = new UserController();
router.get("/", controller.get);
router.get("/callback", controller.callback);
router.post("/check", controller.check);
router.post("/signin", controller.login);
router.post("/2fa", controller.ga2fa);
router.post("/signup", controller.signup);
router.post("/logout", controller.logout);
router.post("/forgot-password", controller.forgot);
router.post("/reset/:token", controller.reset);
router.delete("/", userController.delete);
return router;
};
export const authRoutes = wrapper;
|
<reponame>nvpro-samples/gl_vk_meshlet_cadscene
/*
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#include <algorithm>
#include <assert.h>
#include <nvmath/nvmath_glsltypes.h>
#include "nvmeshlet_array.hpp"
#include "renderer.hpp"
#include "resources_gl.hpp"
#include "common.h"
namespace meshlettest {
//////////////////////////////////////////////////////////////////////////
class RendererMeshGL : public Renderer
{
public:
class Type : public Renderer::Type
{
bool isAvailable() const { return has_GL_NV_mesh_shader != 0; }
const char* name() const { return "GL mesh"; }
Renderer* create() const
{
RendererMeshGL* renderer = new RendererMeshGL();
return renderer;
}
Resources* resources() { return ResourcesGL::get(); }
unsigned int priority() const { return 4; }
};
class TypeVbum : public Renderer::Type
{
bool isAvailable() const
{
return has_GL_NV_vertex_buffer_unified_memory && has_GL_NV_uniform_buffer_unified_memory && has_GL_NV_mesh_shader;
}
const char* name() const { return "GL mesh nvbindless"; }
Renderer* create() const
{
RendererMeshGL* renderer = new RendererMeshGL();
renderer->m_bindless = true;
return renderer;
}
unsigned int priority() const { return 4; }
Resources* resources() { return ResourcesGL::get(); }
};
public:
bool init(RenderList* NV_RESTRICT list, Resources* resources, const Config& config) override;
void deinit() override;
void draw(const FrameConfig& global) override;
bool m_bindless = false;
private:
const RenderList* NV_RESTRICT m_list;
ResourcesGL* NV_RESTRICT m_resources;
Config m_config;
};
static RendererMeshGL::Type s_uborange;
static RendererMeshGL::TypeVbum s_uborange_vbum;
bool RendererMeshGL::init(RenderList* NV_RESTRICT list, Resources* resources, const Config& config)
{
m_list = list;
m_resources = (ResourcesGL*)resources;
m_config = config;
return true;
}
void RendererMeshGL::deinit() {}
void RendererMeshGL::draw(const FrameConfig& global)
{
ResourcesGL* NV_RESTRICT res = m_resources;
const CadScene* NV_RESTRICT scene = m_list->m_scene;
const CadSceneGL& sceneGL = res->m_scene;
const nvgl::ProfilerGL::Section profile(res->m_profilerGL, "Render");
bool bindless = m_bindless;
size_t vertexSize = scene->getVertexSize();
size_t vertexAttributeSize = scene->getVertexAttributeSize();
// generic state setup
glViewport(0, 0, res->m_framebuffer.renderWidth, res->m_framebuffer.renderHeight);
glBindFramebuffer(GL_FRAMEBUFFER, res->m_framebuffer.fboScene);
glClearColor(0.2f, 0.2f, 0.2f, 0.0f);
glClearDepth(1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_LESS);
glEnable(GL_DEPTH_TEST);
if(res->m_cullBackFace)
{
glEnable(GL_CULL_FACE);
}
else
{
glDisable(GL_CULL_FACE);
}
if(res->m_clipping)
{
for(int i = 0; i < NUM_CLIPPING_PLANES; i++)
{
glEnable(GL_CLIP_DISTANCE0 + i);
}
}
glNamedBufferSubData(res->m_common.viewBuffer.buffer, 0, sizeof(SceneData), &global.sceneUbo);
glNamedBufferSubData(res->m_common.statsBuffer.buffer, 0, sizeof(CullStats), &m_list->m_stats);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, SSBO_SCENE_STATS, res->m_common.statsBuffer);
if(bindless)
{
glEnableClientState(GL_UNIFORM_BUFFER_UNIFIED_NV);
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_SCENE_VIEW, 0, 0);
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_OBJECT, 0, 0);
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_GEOMETRY, 0, 0);
}
if(bindless)
{
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_SCENE_VIEW, res->m_common.viewBuffer.bufferADDR, sizeof(SceneData));
}
else
{
glBindBufferBase(GL_UNIFORM_BUFFER, UBO_SCENE_VIEW, res->m_common.viewBuffer.buffer);
}
{
int lastMaterial = -1;
int lastGeometry = -1;
int lastMatrix = -1;
int lastChunk = -1;
bool lastShorts = false;
bool lastTask = false;
int statsGeometry = 0;
int statsMatrix = 0;
int statsMaterial = 0;
int statsDraw = 0;
bool first = true;
for(int i = 0; i < m_list->m_drawItems.size(); i++)
{
const RenderList::DrawItem& di = m_list->m_drawItems[i];
bool useTask = di.task;
if(first || useTask != lastTask)
{
glUseProgram(useTask ? res->m_programs.draw_object_mesh_task : res->m_programs.draw_object_mesh);
lastTask = useTask;
first = false;
}
if(lastGeometry != di.geometryIndex)
{
const CadSceneGL::Geometry& geo = sceneGL.m_geometry[di.geometryIndex];
int chunk = int(geo.mem.chunkIndex);
#if USE_PER_GEOMETRY_VIEWS
if(bindless)
{
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_GEOMETRY,
res->m_setup.geometryBindings.bufferADDR + sizeof(CadSceneGL::GeometryUbo) * di.geometryIndex,
sizeof(CadSceneGL::GeometryUbo));
}
else
{
glBindBufferRange(GL_UNIFORM_BUFFER, UBO_GEOMETRY, res->m_setup.geometryBindings.buffer,
sizeof(CadSceneGL::GeometryUbo) * di.geometryIndex, sizeof(CadSceneGL::GeometryUbo));
}
#else
if(lastChunk != chunk || lastShorts != di.shorts)
{
int idx = chunk * 2 + (di.shorts ? 1 : 0);
if(bindless)
{
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_GEOMETRY,
res->m_setup.geometryBindings.bufferADDR + sizeof(CadSceneGL::GeometryUbo) * idx,
sizeof(CadSceneGL::GeometryUbo));
}
else
{
glBindBufferRange(GL_UNIFORM_BUFFER, UBO_GEOMETRY, res->m_setup.geometryBindings.buffer,
sizeof(CadSceneGL::GeometryUbo) * idx, sizeof(CadSceneGL::GeometryUbo));
}
lastChunk = chunk;
lastShorts = di.shorts;
}
// we use the same vertex offset for both vbo and abo, our allocator should ensure this condition.
assert(uint32_t(geo.vbo.offset / vertexSize) == uint32_t(geo.abo.offset / vertexAttributeSize));
glUniform4ui(0, uint32_t(geo.topoMeshlet.offset / sizeof(NVMeshlet::MeshletDesc)),
uint32_t(geo.topoPrim.offset),
uint32_t(geo.topoVert.offset / (di.shorts ? 2 : 4)), uint32_t(geo.vbo.offset / vertexSize));
#endif
lastGeometry = di.geometryIndex;
statsGeometry++;
}
if(lastMatrix != di.matrixIndex)
{
if(bindless)
{
glBufferAddressRangeNV(GL_UNIFORM_BUFFER_ADDRESS_NV, UBO_OBJECT,
res->m_scene.m_buffers.matrices.bufferADDR + res->m_alignedMatrixSize * di.matrixIndex,
sizeof(CadScene::MatrixNode));
}
else
{
glBindBufferRange(GL_UNIFORM_BUFFER, UBO_OBJECT, res->m_scene.m_buffers.matrices.buffer,
res->m_alignedMatrixSize * di.matrixIndex, sizeof(CadScene::MatrixNode));
}
lastMatrix = di.matrixIndex;
statsMatrix++;
}
if(useTask)
{
glUniform4ui(1, di.meshlet.offset, di.meshlet.offset + di.meshlet.count - 1, 0, 0);
}
uint32_t offset = useTask ? 0 : di.meshlet.offset;
uint32_t count = useTask ? (di.meshlet.count + 31) / 32 : di.meshlet.count;
glDrawMeshTasksNV(offset, count);
statsDraw++;
}
(void)statsGeometry;
(void)statsMatrix;
(void)statsDraw;
}
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, SSBO_SCENE_STATS, 0);
glBindBufferBase(GL_UNIFORM_BUFFER, UBO_SCENE_VIEW, 0);
glBindBufferBase(GL_UNIFORM_BUFFER, UBO_OBJECT, 0);
glBindBufferBase(GL_UNIFORM_BUFFER, UBO_GEOMETRY, 0);
res->copyStats();
if(res->m_clipping)
{
for(int i = 0; i < NUM_CLIPPING_PLANES; i++)
{
glDisable(GL_CLIP_DISTANCE0 + i);
}
}
if(m_bindless)
{
glDisableClientState(GL_UNIFORM_BUFFER_UNIFIED_NV);
}
if(global.meshletBoxes)
{
res->drawBoundingBoxes(m_list);
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
} // namespace meshlettest
|
def trade(self, symbol: str, id: int, callback, **kwargs):
self.live_subscribe("{}@trade".format(symbol.lower()), id, callback, **kwargs) |
// Repeated queries, ds design
public class WordDistance {
Map<String, List<Integer>> map = new HashMap<>();
public WordDistance(String[] words) {
for (int i=0; i<words.length; i+=1) {
if (map.containsKey(words[i])) map.get(words[i]).add(i);
else map.put(words[i], new ArrayList<>(Arrays.asList(new Integer[]{i})));
}
}
public int shortest(String word1, String word2) {
List<Integer> wl1 = map.get(word1);
List<Integer> wl2 = map.get(word2);
int i = 0;
int j = 0;
int distance = Integer.MAX_VALUE;
while (i < wl1.size() && j < wl2.size()) {
distance = min(distance, abs(wl1.get(i) - wl2.get(j)));
if (wl1.get(i) > wl2.get(j)) j += 1;
else i += 1;
}
if (i < wl1.size()) distance = min(distance, abs(wl1.get(i) - wl2.get(j-1)));
else if (j < wl2.size()) distance = min(distance, abs(wl1.get(i-1) - wl2.get(j)));
return distance;
}
} |
With a few notable exceptions, it's widely accepted that we're now living through a golden age of TV, to the point that even in a year where we've had the likes of Narcos 3, Stranger Things (upcoming), another season of Walking Dead, Glow and many more, some argue that the quality is flagging slightly.
Whatever the score, Band of Brothers was one of the first mega-budget television shows to grace our screens. Produced by Steven Spielberg and Tom Hanks (who was directed by Spielberg and nominated for an Academy Award for Saving Private Ryan four years earlier), the show followed the World War Two 'career' of 'Easy' Company, a group of soldiers from the US 506th Parachute Infantry Regiment, 101st Airborne Division, from their time preparing for combat at a UK training base to the horrors of the war and its immediate aftermath.
Credit: HBO / Band of Brothers
Easy company saw some of the most horrific fighting of the conflict, including fierce battles in Eindhoven (the Netherlands), Carentan (France) and the freezing cold of Bastogne (Belgium). On top of this, towards war's end, the battle-weary unit witnessed firsthand the tragedy of Kaufering concentration camp.
Looking back on the 2001 show, it's the exceptional realism (feted by survivors, many of whom were still alive when the programme aired and were interviewed about their experience) that stands out. But, revisiting it a little more closely, it's also undeniable that a stellar cast played a hefty role in its critical and commercial success.
With Homeland's Damian Lewis taking on the lead role (in as much as there was one) of Richard Winters, who led the company into battle during D-Day, served throughout the war (rising through the ranks) and later also served in the Korean War, the calibre of acting was supreme.
Damian Lewis. Credit: HBO / Band of Brothers
However, a number of now-mega stars also featured in the programme, albeit in smaller roles. Michael Fassbender portrayed Sgt. Burton "Pat" Christenson in seven episodes of the show, although his speaking parts were fairly minimal compared to his later success. Meanwhile, Tom Hardy also appeared in a later episode after the liberation of Holland, wherein he is caught in bed with a local woman, retaining his modesty with only a helmet in one of the shows (understandably rare) moments of hilarity.
Michael Fassbender. Credit: HBO / Band of Brothers
Tom Hardy. Credit HBO / Band of Brothers
Other actors to appear in the show include James McAvoy in the Replacements episode, portraying a soldier roughed up by unfriendly veterans who don't take too kindly to newcomers replacing their fallen friends. It might have been brief, but his appearance (he dies in the episode) is a memorable one, though perhaps not as memorable as that of David Schwimmer's part. Schwimmer (Ross from Friends) played Captain Sobel, who trained the soldiers before deployment but was ultimately not sent into battle with them. Schwimmer's performance was somewhat controversial, with Sobel portrayed as a villain, but in later interviews, surviving members of the company, praised Sobel for preparing them for the horrors of war and effectively keeping them alive.
Other UK actors to feature in the show include Dominic Cooper, albeit in a crowded scene (and non-speaking role), Simon Pegg as Sobel's assistant (with a decent American accent to boot), Lock Stock and Two Smoking Barrels' Dexter Fletcher, who featured in several of the show's episodes as Staff Sergeant John "Johnny" Martin, and Green Street's Marc Warren, who stars in the episode Carentan, playing the character Blithe. When originally aired, the show's makers mistakenly said Blithe died during the war in a hospital (he went blind during the episode) but he actually survived, was sent home and later served in Korea.
Words: Ronan O'Shea |
Two people chat by a chilled fruit machine, among other vending machines, in 1959. Underwood Archives/Getty Images
It’s 4 p.m. at work, and your stomach is rumbling. It’s too early for dinner, and too late to justify a trip to the corner store for a snack. So you rustle around your pockets for change and head to the vending machine. What’ll it be today? Doritos, a Coke, or perhaps a package of Famous Amos chocolate chip cookies?
The choices contained within a vending machine didn’t used to be as predictable. In the early- to mid-20th century, vending machines in the United States and Europe dispensed myriad treats and trinkets. Americans in the ‘40s, for instance, could get a fully cooked hot dog from a vending machine known as a “Speedy Weeny.” Machines in the United Kingdom dispensed hearty fare, such as potatoes and eggs, in the 1960s. Models in Germany provided clocks. Flight insurance became so popular in the United States that airports installed vending machines where people could purchase it ahead of their trips.
Unique vending machines aren’t just something of yesteryear. In Japan, vending machines dispense everything from lobsters to amulets (at Buddhist temples). At the Moscow airport, a vending machine will give you caviar.
But while the United States is a world leader in sheer numbers of vending machines, their respective uniqueness has declined. Since Polyvend introduced the glass-front snack machine in 1972, vending machines have been home primarily to a predictable lineup of sweet and salty snacks, with notable exceptions such as the Tombstone’s deep dish pizza dispenser.
It’s curious that you can find roughly the same contents in American vending machines, whether you’re at an office building in Portland or a mall in Port Arthur. In fact, in 2014, the Center for Science in the Public Interest (CSPI) found that chips, candy, and sweet baked goods constituted over 80 percent of vending machines’ contents on United States public property (which includes places such as parks, libraries, public hospitals, and rest stops). In health-conscious cities such as Washington, D.C., meanwhile, vending machines haven’t started spouting organic fare, and it’s rare to find local foods.
Wouldn’t work be better if you could buy a variety of hot snacks automatically? Vendic / CC BY-SA 3.5
Why is this the case? Consumer demand for salty and sugary fast food has likely led to junk food dominating American vending machines. Confections, candy, snacks, and energy drinks also offer the most revenue for vending machine operators over any other offerings in the glass case.
The uniformity may also be meant to offer a sense of familiarity. This is the same strategy fast food restaurants used to evolve into empires in the 20th century, too. “The key to a successful franchise, according to many texts on the subject, can be expressed in one word: uniformity,” writes Eric Schlosser, author of Fast Food Nation: The Dark Side of the All-American Meal. “Franchises and chain stores strive to offer exactly the same product or service at numerous locations. Customers are drawn to familiar brands by an instinct to avoid the unknown. A brand offers a feeling of reassurance when its products are always and everywhere the same.”
The tides may be turning away from just cheese puffs, though. According to a 2016 State of the Vending Industry report, “consumers are moving towards certain types of healthier snacks and alternative options.” This recalls a recent controversy over whether schools should stock junk food in vending machines, which led the USDA to announce, in 2013, a planned ban of unhealthy foods from elementary and high-school vending machines.
An old Coca-Cola vending machine pictured outside an office in Bisbee, Arizona. Rich Helmer
The question of offering a greater variety of snacks in U.S. vending machine depends on demand, technological advances in refrigeration and stocking, cultural standards, and economics. Still, as The Washington Post notes, selling produce in vending machines is financially unviable and challenging (fruit and vegetables are less shelf-stable than a bag of chips). But if Chinese companies can dispense hot French fries at the touch of a button, it’s not outside the realm of possibility for, say, macaroni and cheese to appear in vending machines in the U.S. It’d break up the monotony of the late afternoon Lay’s and Fanta break, at least.
Help us figure out when, and why, vending machines in the United States began stocking similar snacks. Send these musings to us, along with any procedural comments, at paula.mejia@ .
Gastro Obscura covers the world’s most wondrous food and drink.
Sign up for our email, delivered twice a week. |
/**
* Created by David Lempia on 8/2/2015.
*
* Copy this template and rename it with the name of your event. i.e. Event_ButtonX.
* Inputs and Outputs are put in the Behaviors_xx file. Access them using gd.xx
* Add the event to the Behaviors_xx file.
* Add inputs and outputs to the Behaviors_xx file.
* Pass parameters in through the Event_Template Constructor.
*/
public class Event_Template implements Event {
private Behavior toBehavior;
// Returning true triggers the event
// Pass variables
public boolean isTriggered(){
return(false);
}
// This runs if the event is triggered
public void onTransition(){
}
// Change Behaviors_XX to the name of your behaviors class
// Add parameters as needed
GlobalData gd;
Event_Template(GlobalData gd, Behavior toBehavior){
this.gd=gd;
this.toBehavior = toBehavior;
}
// No change needed
public Behavior getToBehavior() {
return(this.toBehavior);
}
} |
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flagutil
import (
"flag"
"fmt"
"reflect"
"strconv"
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/test-infra/prow/config/secret"
"k8s.io/test-infra/prow/github"
)
func TestGitHubOptions_Validate(t *testing.T) {
t.Parallel()
var testCases = []struct {
name string
in *GitHubOptions
expectedGraphqlEndpoint string
expectedErr bool
}{
{
name: "when no endpoints, sets graphql endpoint",
in: &GitHubOptions{},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
expectedErr: false,
},
{
name: "when empty endpoint, sets graphql endpoint",
in: &GitHubOptions{
endpoint: NewStrings(""),
},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
expectedErr: false,
},
{
name: "when invalid github endpoint, returns error",
in: &GitHubOptions{
endpoint: NewStrings("not a github url"),
},
expectedErr: true,
},
{
name: "both --github-hourly-tokens and --github-allowed-burst are zero: no error",
in: &GitHubOptions{
ThrottleHourlyTokens: 0,
ThrottleAllowBurst: 0,
},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
},
{
name: "both --github-hourly-tokens and --github-allowed-burst are nonzero and hourly is higher or equal: no error",
in: &GitHubOptions{
ThrottleHourlyTokens: 100,
ThrottleAllowBurst: 100,
},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
},
{
name: "both --github-hourly-tokens and --github-allowed-burst are nonzero and hourly is lower: error",
in: &GitHubOptions{
ThrottleHourlyTokens: 10,
ThrottleAllowBurst: 100,
},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
expectedErr: true,
},
{
name: "only --github-hourly-tokens is nonzero: error",
in: &GitHubOptions{
ThrottleHourlyTokens: 10,
},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
expectedErr: true,
},
{
name: "only --github-hourly-tokens is zero: no error, allows easier throttling disable",
in: &GitHubOptions{
ThrottleAllowBurst: 10,
},
expectedGraphqlEndpoint: github.DefaultGraphQLEndpoint,
expectedErr: false,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(s *testing.T) {
err := testCase.in.Validate(false)
if testCase.expectedErr && err == nil {
t.Errorf("%s: expected an error but got none", testCase.name)
}
if !testCase.expectedErr && err != nil {
t.Errorf("%s: expected no error but got one: %v", testCase.name, err)
}
if testCase.expectedGraphqlEndpoint != testCase.in.graphqlEndpoint {
t.Errorf("%s: unexpected graphql endpoint", testCase.name)
}
})
}
}
// TestGitHubOptionsConstructsANewClientOnEachInvocation verifies that multiple invocations do not
// return the same client. This is important for components that use multiple clients with different
// settings, like for example for the throttling.
func TestGitHubOptionsConstructsANewClientOnEachInvocation(t *testing.T) {
o := &GitHubOptions{}
secretAgent := &secret.Agent{}
firstClient, err := o.githubClient(secretAgent, false)
if err != nil {
t.Fatalf("failed to construct first client: %v", err)
}
secondClient, err := o.githubClient(secretAgent, false)
if err != nil {
t.Fatalf("failed to construct second client: %v", err)
}
firstClientAddr, secondClientAddr := fmt.Sprintf("%p", firstClient), fmt.Sprintf("%p", secondClient)
if firstClientAddr == secondClientAddr {
t.Error("got the same client twice on subsequent invocation")
}
}
func TestCustomThrottlerOptions(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
params []FlagParameter
expectPresent map[string]bool
expectDefault map[string]int
}{
{
name: "no customizations",
expectPresent: map[string]bool{"github-hourly-tokens": true, "github-allowed-burst": true},
expectDefault: map[string]int{"github-hourly-tokens": 0, "github-allowed-burst": 0},
},
{
name: "suppress presence",
params: []FlagParameter{DisableThrottlerOptions()},
expectPresent: map[string]bool{"github-hourly-tokens": false, "github-allowed-burst": false},
},
{
name: "custom defaults",
params: []FlagParameter{ThrottlerDefaults(100, 20)},
expectPresent: map[string]bool{"github-hourly-tokens": true, "github-allowed-burst": true},
expectDefault: map[string]int{"github-hourly-tokens": 100, "github-allowed-burst": 20},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs := flag.NewFlagSet(tc.name, flag.ExitOnError)
opts := &GitHubOptions{}
opts.AddCustomizedFlags(fs, tc.params...)
for _, name := range []string{"github-hourly-tokens", "github-allowed-burst"} {
flg := fs.Lookup(name)
if (flg != nil) != (tc.expectPresent[name]) {
t.Errorf("Flag --%s presence differs: expected %t got %t", name, tc.expectPresent[name], flg != nil)
continue
}
expected := strconv.Itoa(tc.expectDefault[name])
if flg != nil && flg.DefValue != expected {
t.Errorf("Flag --%s default value differs: expected %#v got '%#v'", name, expected, flg.DefValue)
}
}
})
}
}
func TestOrgThottlerOptions(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
parameters []string
expectedErrorMsg string
expectedParsedOrgThrottlers map[string]throttlerSettings
}{
{
name: "No org throttler, success",
},
{
name: "Invalid format, a colon too much",
parameters: []string{"--github-throttle-org=kubernetes:10:10:10"},
expectedErrorMsg: "-github-throttle-org=kubernetes:10:10:10 is not in org:hourlyTokens:burst format",
},
{
name: "Invalid format, a colon too little",
parameters: []string{"--github-throttle-org=kubernetes:10"},
expectedErrorMsg: "-github-throttle-org=kubernetes:10 is not in org:hourlyTokens:burst format",
},
{
name: "Invalid format, hourly tokens not an int",
parameters: []string{"--github-throttle-org=kubernetes:a:10"},
expectedErrorMsg: "-github-throttle-org=kubernetes:a:10 is not in org:hourlyTokens:burst format: hourlyTokens is not an int",
},
{
name: "Invalid format, burst not an int",
parameters: []string{"--github-throttle-org=kubernetes:10:a"},
expectedErrorMsg: "-github-throttle-org=kubernetes:10:a is not in org:hourlyTokens:burst format: burst is not an int",
},
{
name: "Invalid, burst > hourly tokens",
parameters: []string{"--github-throttle-org=kubernetes:10:11"},
expectedErrorMsg: "-github-throttle-org=kubernetes:10:11: burst must not be greater than hourlyTokens",
},
{
name: "Invalid, burst < 1",
parameters: []string{"--github-throttle-org=kubernetes:10:0"},
expectedErrorMsg: "-github-throttle-org=kubernetes:10:0: burst must be > 0",
},
{
name: "Invalid, hourly tokens < 1",
parameters: []string{"--github-throttle-org=kubernetes:0:10"},
expectedErrorMsg: "-github-throttle-org=kubernetes:0:10: hourlyTokens must be > 0",
},
{
name: "Invalid, multiple settings for same org",
parameters: []string{
"--github-throttle-org=kubernetes:10:10",
"--github-throttle-org=kubernetes:10:10",
},
expectedErrorMsg: "got multiple -github-throttle-org for the kubernetes org",
},
{
name: "Valid single org setting, success",
parameters: []string{"--github-throttle-org=kubernetes:10:10"},
expectedParsedOrgThrottlers: map[string]throttlerSettings{"kubernetes": {hourlyTokens: 10, burst: 10}},
},
{
name: "Valid settings for multiple orgs, success",
parameters: []string{
"--github-throttle-org=kubernetes:10:10",
"--github-throttle-org=kubernetes-sigs:10:10",
},
expectedParsedOrgThrottlers: map[string]throttlerSettings{
"kubernetes": {hourlyTokens: 10, burst: 10},
"kubernetes-sigs": {hourlyTokens: 10, burst: 10},
},
},
}
exportThrottlerSettings := cmp.Exporter(func(t reflect.Type) bool {
return t == reflect.TypeOf(throttlerSettings{})
})
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs := flag.NewFlagSet(tc.name, flag.ContinueOnError)
opts := &GitHubOptions{}
opts.AddFlags(fs)
if err := fs.Parse(tc.parameters); err != nil {
t.Fatalf("flag parsing failed: %v", err)
}
opts.AppID = "10"
opts.AppPrivateKeyPath = "/test/path"
var actualErrMsg string
if actualErr := opts.Validate(false); actualErr != nil {
actualErrMsg = actualErr.Error()
}
if actualErrMsg != tc.expectedErrorMsg {
t.Fatalf("actual error %s does not match expected error %s", actualErrMsg, tc.expectedErrorMsg)
}
if actualErrMsg != "" {
return
}
if diff := cmp.Diff(tc.expectedParsedOrgThrottlers, opts.parsedOrgThrottlers, exportThrottlerSettings); diff != "" {
t.Errorf("expected org throttlers differ from actual: %s", diff)
}
})
}
}
|
-- | Common pitch representation.
module Music.Pitch.Common
( module Music.Pitch.Common.Semitones,
module Music.Pitch.Common.Quality,
module Music.Pitch.Common.Number,
module Music.Pitch.Common.Interval,
module Music.Pitch.Common.Pitch,
module Music.Pitch.Common.Spell,
module Music.Pitch.Common.Types,
module Music.Pitch.Common.Harmony,
module Music.Pitch.Common.Names,
)
where
import Music.Pitch.Common.Harmony
import Music.Pitch.Common.Interval
import Music.Pitch.Common.Names
import Music.Pitch.Common.Number
import Music.Pitch.Common.Pitch
import Music.Pitch.Common.Quality
import Music.Pitch.Common.Semitones
import Music.Pitch.Common.Spell
import Music.Pitch.Common.Types
|
async def _find_one(self, query, pipe0=[], pipe1=[], check=True):
pipe = pipe0 + [{'$match': query}] + pipe1
logger.debug("pipe")
for stage in pipe:
logger.debug(f" {stage}")
c = self.coll.files.aggregate(pipe)
try:
d = next(c)
except StopIteration:
return None
d1 = await self._factory(d)
assert d1 is not None
if check:
try:
await d1.check()
except Exception as e:
logging.error(crayons.red(f"query: {query}"))
logging.error(crayons.red(f"pipe0: {pipe0}"))
logging.error(crayons.red(f"pipe1: {pipe1}"))
logging.error(crayons.red(f"{self!r}: check failed for {d!r}: {e!r}"))
raise
return d1 |
<gh_stars>0
// Copyright 2020 The jackal Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clusterrouter
import (
"context"
"github.com/jackal-xmpp/stravaganza/v2"
streamerror "github.com/jackal-xmpp/stravaganza/v2/errors/stream"
clusterconnmanager "github.com/ortuman/jackal/cluster/connmanager"
)
// Router defines cluster router type.
type Router struct {
connMng clusterConnManager
}
// New returns a new initialized Router instance.
func New(connMng *clusterconnmanager.Manager) *Router {
r := &Router{
connMng: connMng,
}
return r
}
// Route routes an XMPP stanza to a cluster remote resource.
func (r *Router) Route(ctx context.Context, stanza stravaganza.Stanza, username, resource, instanceID string) error {
conn, err := r.connMng.GetConnection(instanceID)
if err != nil {
return err
}
return conn.LocalRouter().Route(ctx, stanza, username, resource)
}
// Disconnect performs remote cluster resource disconnection.
func (r *Router) Disconnect(ctx context.Context, username, resource string, streamErr *streamerror.Error, instanceID string) error {
conn, err := r.connMng.GetConnection(instanceID)
if err != nil {
return err
}
return conn.LocalRouter().Disconnect(ctx, username, resource, streamErr)
}
// Start starts cluster router.
func (r *Router) Start(_ context.Context) error { return nil }
// Stop stops cluster router.
func (r *Router) Stop(_ context.Context) error { return nil }
|
<gh_stars>1-10
static void solve(int a, int b) {
ans[a][b] = 'X';
Queue<pair> q = new LinkedList<>();
q.add(new pair(a, b));
while (q.size() > 0) {
int i = q.peek().f, j = q.peek().s;
q.poll();
if (i > n || i == 0 || j > n || j == 0) {
continue;
}
for (int k = 0; k < 4; k++) {
int a1 = mat[i + x[k]][j + y[k]].f, b1 = mat[i + x[k]][j + y[k]].s;
if (a1 == a && b1 == b) {
if (ans[i + x[k]][j + y[k]] == null) {
ans[i + x[k]][j + y[k]] = d[3 - k];
q.add(new pair(i + x[k], j + y[k]));
}
}
}
}
} |
<filename>website/docusaurus.d.ts
declare module '@docusaurus/*' {
const something: any;
export = something;
}
declare module '@theme/*' {
const something: any;
export = something;
}
declare module '*.md' {
// eslint-disable-next-line import/order
import { ComponentType } from 'react';
const Component: ComponentType<{}>;
export = Component;
}
declare module '*.module.css' {
const classes: Record<string, string>;
export = classes;
}
declare module 'meteor/aldeed:simple-schema' {
import SimpleSchemaStatic from 'simpl-schema';
export class SimpleSchema extends SimpleSchemaStatic {
static _makeGeneric(name?: string): string;
static extendOptions(options: Record<string, any>): void;
}
}
declare module 'meteor/check' {
export const Match: any;
}
|
/**
* Serialize an array of byte arrays into a single byte array. Used
* to pass through a set of bytes arrays as an attribute of a Scan.
* Use {@link #toByteArrays(byte[], int)} to convert the serialized
* byte array back to the array of byte arrays.
* @param byteArrays the array of byte arrays to serialize
* @return the byte array
*/
public static byte[] toBytes(byte[][] byteArrays) {
int size = 0;
for (byte[] b : byteArrays) {
if (b == null) {
size++;
} else {
size += b.length;
size += WritableUtils.getVIntSize(b.length);
}
}
TrustedByteArrayOutputStream bytesOut = new TrustedByteArrayOutputStream(size);
DataOutputStream out = new DataOutputStream(bytesOut);
try {
for (byte[] b : byteArrays) {
if (b == null) {
WritableUtils.writeVInt(out, 0);
} else {
WritableUtils.writeVInt(out, b.length);
out.write(b);
}
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
out.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return bytesOut.getBuffer();
} |
While hundreds of apps for the Apple Watch have been announced and detailed, screenshots of most of the major applications have yet to be revealed, until now. Developer Steven Troughton-Smith has created a tool to view screenshots of Apple Watch applications by pasting in the link to the existing iPhone application. Below, we’ve put together several galleries of several notable Apple Watch applications, including Twitter, Instagram, Uber, Starbucks, and Apple’s own Keynote presentation remote. We’ll be updating this post live as more application screenshots are discovered. WatchAware is also showcasing more interactive previews of over 2000 Apple Watch apps.
Twitter:
Instagram:
Apple Keynote Remote:
Microsoft PowerPoint:
NBA GameTime:
ESPN:
Uber:
Hotel Tonight:
RunKeeper:
Flipboard:
Pandora:
Chipotle:
Mint:
Shazam:
Slack:
Hipchat:
JetBlue:
1Password:
Things:
Calcbot:
Yahoo Weather:
Clear:
Twitterrific:
Microsoft OneNote:
Microsoft OneDrive:
American Airlines:
Delta:
Breaking News:
CNN:
AP Mobile:
Digg:
Robinhood: |
import requests
from typing import Dict
from .exceptions import AcrossException
__all__ = ["AcrossException", "AcrossAPI"]
class AcrossAPI:
BASEURL = "https://across.to/api"
def suggested_fees(
self, l2Token: str, chainId: int, amount: int
) -> Dict[str, int]:
"""get suggested fees for a given amount of l2Token.
Args:
l2Token (str): Address of L2 Token Contract to Transfer. For ETH use address `0x0`.
chainId (int): Chain ID to transfer from.
amount (int): Amount of the token to transfer.
Note: this amount is in the native decimals of the token.
So, for ETH this would be the amount of human-readable ETH multiplied by `1e18`.
For USDC, you would multiply the number of human-readable USDC by `1e6`.
Returns:
suggested fees for slow and instant relay.
- slowFeePct (int): Fee for slow relay.
- instantFeePct (int): Fee for instant relay.
Raises:
AcrossException: if the request fails.
- 400: if either the slow fee or the instant fee exceeds 25% of the amount.
- 400: invalid input.
- 500: an unexpected error within the API.
"""
url = f"{self.BASEURL}/suggested-fees"
params = {"amount": amount, "chainId": chainId, "l2Token": l2Token}
response = requests.get(url, params=params)
if response.status_code != 200:
raise AcrossException(
f"Failed to get fees for {l2Token}: {response.text}"
)
return response.json()
if __name__ == "__main__":
api = AcrossAPI()
print(
api.suggested_fees(
"0x7f5c764cbc14f9669b88837ca1490cca17c31607", 10, 1000000000
)
)
|
def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
return cls.stripe_class.list(api_key=api_key, **kwargs).auto_paging_iter() |
package app
import (
"github.com/go-playground/validator/v10"
"gorm.io/gorm"
"myapp/util/logger"
)
const (
appErrDataCreationFailure = "data creation failure"
appErrDataAccessFailure = "data access failure"
appErrDataUpdateFailure = "data update failure"
appErrJsonCreationFailure = "json creation failure"
appErrFormDecodingFailure = "form decoding failure"
appErrFormErrResponseFailure = "form error response failure"
)
type App struct {
logger *logger.Logger
db *gorm.DB
validator *validator.Validate
}
func New(
logger *logger.Logger,
db *gorm.DB,
validator *validator.Validate,
) *App {
return &App{
logger: logger,
db: db,
validator: validator,
}
}
func (app *App) Logger() *logger.Logger {
return app.logger
}
|
<reponame>ecmwf/pyeccodes
import pyeccodes.accessors as _
def load(h):
h.add(_.Unsigned('isFillup', 1))
h.alias('local.isFillup', 'isFillup')
h.add(_.Unsigned('yearOfForecast', 2))
h.add(_.Unsigned('monthOfForecast', 1))
h.add(_.Unsigned('dayOfForecast', 1))
h.add(_.Unsigned('hourOfForecast', 1))
h.add(_.Unsigned('minuteOfForecast', 1))
h.add(_.Constant('secondOfForecast', 0))
h.add(_.G2date('dateOfForecast', _.Get('yearOfForecast'), _.Get('monthOfForecast'), _.Get('dayOfForecast')))
h.add(_.Time('timeOfForecast', _.Get('hourOfForecast'), _.Get('minuteOfForecast'), _.Get('secondOfForecast')))
h.add(_.Julian_day('julianForecastDay', _.Get('dateOfForecast'), _.Get('hourOfForecast'), _.Get('minuteOfForecast'), _.Get('secondOfForecast')))
h.add(_.Transient('diffInDays', (_.Get('julianForecastDay') - _.Get('julianDay'))))
h.add(_.Transient('diffInHours', (((_.Get('diffInDays') * 1440) + 0.5) / 60)))
h.add(_.Round('_anoffset', _.Get('diffInHours'), 10))
h.add(_.Transient('anoffset', _.Get('_anoffset')))
h.alias('local.anoffset', 'anoffset')
h.add(_.Unsigned('anoffsetFirst', 2))
h.add(_.Unsigned('anoffsetLast', 2))
h.add(_.Unsigned('anoffsetFrequency', 2))
h.add(_.Transient('is_efas', 1))
h.add(_.Transient('lsdate_bug', 1))
def efas_post_proc_inline_concept(h):
def wrapped(h):
typeOfPostProcessing = h.get_l('typeOfPostProcessing')
if typeOfPostProcessing == 0:
return 'unknown'
if typeOfPostProcessing == 1:
return 'lisflood'
if typeOfPostProcessing == 2:
return 'lisflood_eric'
if typeOfPostProcessing == 3:
return 'lisflood_season'
if typeOfPostProcessing == 4:
return 'lisflood_merged'
if typeOfPostProcessing == 51:
return 'ericha'
if typeOfPostProcessing == 101:
return 'htessel_lisflood'
if typeOfPostProcessing == 102:
return 'htessel_eric'
if typeOfPostProcessing == 103:
return 'htessel_camaflood'
if typeOfPostProcessing == 152:
return 'epic'
dummy = h.get_l('dummy')
if dummy == 1:
return 'unknown'
return wrapped
h.add(_.Concept('efas_post_proc', None, concepts=efas_post_proc_inline_concept(h)))
h.add(_.Unsigned('yearOfModelVersion', 2))
h.add(_.Unsigned('monthOfModelVersion', 1))
h.add(_.Unsigned('dayOfModelVersion', 1))
h.add(_.Unsigned('hourOfModelVersion', 1))
h.add(_.Unsigned('minuteOfModelVersion', 1))
h.add(_.Constant('secondOfModelVersion', 0))
h.add(_.G2date('dateOfModelVersion', _.Get('yearOfModelVersion'), _.Get('monthOfModelVersion'), _.Get('dayOfModelVersion')))
h.add(_.Time('timeOfModelVersion', _.Get('hourOfModelVersion'), _.Get('minuteOfModelVersion'), _.Get('secondOfModelVersion')))
|
Ms. Yang Huiyan, China's richest woman with a net worth of 5.7 bln usd. Photo: icxoYang Huiyan, at just 32, is back atop the list of China’s richest women, with a net worth of nearly six billion usd.
Her property-developer father -- Yang Guoqiang -- gifted his daughter with 70% of the shares of his firm, Country Garden (HK: 2007), prior to its 2007 Hong Kong IPO.
Country Garden founder Yang Guoqiang. Photo: tupianThe elder Yang told Hong Kong media at the time: “Even if I reach the age of 100, I am going to give it to her anyway.
"She's family and I have faith in her.”
Ms Yang ranked as the richest woman after last year's breakup of Ms. Wu Yajun’s marriage – a fellow female real estate tycoon – resulted in a costly divorce.
Ms Yang's father himself had to truly work for his money, at least at first, being a former peasant, brick layer and construction laborer whose riches soared when he went public with the shares from his real estate development firm.
Ms. Yang, a 2003 graduate of Ohio State University, married the son of a leading Chinese government official in 2006 whom she met on a blind date.
Her plate is very much full not only as majority shareholder but also vice-chairman of the Guangdong Province-based developer that focuses on high-end residential projects.
The company recently had over 110 projects underway across South China.
It wasn’t just into the home-building business, but also was heavily involved in tapping the business and leisure travel sector.
Yang Huiyan, second daughter of Country Garden's founder, was 24 when she wed in 2006. Her husband is a Tsinghua University grad who also studied in the US. Photo: guozaiCountry Garden currently operates seven five-star hotels and two four-star hotels, as well as having a dozen hotels under construction in accordance with the five-star rating standard.
In 2007, just as the elder Yang’s firm was going public in Hong Kong, the PRC media wrote of Ms. Yang that even as a teenager, her enterprising father and Country Garden founder would take her along to sit in on company meetings.
“Ms. Yang hardly ever said a word at the time but seemed always to be listening intently to everything.
"We all thought, rightly so it turns out, that Chairman Yang was grooming his second daughter for a high position one day,” said a Country Garden employee at the time.
Guangdong Province-based Country Garden focuses on high-end residential projects. Photo: Company
The employee added that Chairman Yang’s upbringing of Yang Huiyan in particular was reminiscent of the way Mr. Li Ka Shing raised his sons, eventually grooming his eldest to become deputy chairman of Cheung Kong Holdings (HK: 1) -- one of Hong Kong’s top developers.
Even when Ms. Yang was studying in the US, her father still required her to get a part-time job to supplement her spending money, even though he was already very wealthy at the time.
Ms. Yang Huiyan, 32, is both Country Garden's top shareholder and China's richest woman. Photo: china.orgHe did everything he could to make his daughter’s four-year studies in the Midwestern US state of Ohio as productive as possible, even going to far as to hire a native English-speaking foreign girl to live and dine with their family in South China in the months before Yang Huiyan jetted off to America.
Mr. Yang could often be seen with his daughter in tow when they would push properties to interested homebuyers at exhibitions, with the two of them standing beside scale models of their residential property projects.
Just prior to taking Country Garden public in 2007, Mr. Yang said that although he didn’t consider himself old at the time (53 years of age), he said that having someone young with a hand on the helm of the firm would be a good idea given the immense challenges but limitless potential of the regional real estate sector.
Despite being the top shareholder in one of China’s leading property firms, Ms. Yang still prefers to keep a low profile and allows her father do most of the talking at company events.
Country Garden recently 4.21 hkd“She always seems to have a slight smile, but doesn’t say too much,” said a Country Garden employee.
With nearly six billion usd to her name, most of us no doubt would also have plenty of reasons to smile.
For more stories on Ms. Yang, see:
http://news.enorth.com.cn/system/2007/04/28/001637957_01.shtml
http://eladies.sina.com.cn/news/2009/1016/0955923153.shtml
HOUSE CALLS: Broker Takes On China Housing |
package com.jd.blockchain.tools.initializer;
import com.jd.blockchain.consensus.ConsensusProvider;
import com.jd.blockchain.consensus.ConsensusSettings;
import com.jd.blockchain.consensus.service.ConsensusServiceProvider;
import com.jd.blockchain.crypto.asymmetric.PrivKey;
import com.jd.blockchain.crypto.hash.HashDigest;
import com.jd.blockchain.ledger.CryptoSetting;
/**
*
* @author huanghaiquan
*
*/
public interface LedgerInitProcess {
/**
* Init a new ledger;
*
* @param currentId
* @param privKey
* @param ledgerInitProps
* 账本初始化配置属性;
* @param consensusSettings
* 共识配置属性;
* @param consensusServiceProvider
* @param prompter
* @return 返回新账本的 hash;如果未初始化成功,则返回 null;
*/
/**
* Init a new ledger;
* @param currentId Id of current participant;
* @param privKey Private key of current participant;
* @param ledgerInitProps The settings about this initialization;
* @param consensusSettings The consensus settings
* @param consensusProvider
* @param dbConnConfig
* @param prompter
* @return
*/
HashDigest initialize(int currentId, PrivKey privKey, LedgerInitProperties ledgerInitProps,
ConsensusSettings consensusSettings, ConsensusProvider consensusProvider,
DBConnectionConfig dbConnConfig, Prompter prompter);
/**
* @param currentId
* @param privKey
* @param ledgerInitProps
* @param consensusSettings
* @param consensusProvider
* @param dbConnConfig
* @param prompter
* @param cryptoSetting
* @return
*/
HashDigest initialize(int currentId, PrivKey privKey, LedgerInitProperties ledgerInitProps,
ConsensusSettings consensusSettings, ConsensusProvider consensusProvider,
DBConnectionConfig dbConnConfig, Prompter prompter, CryptoSetting cryptoSetting);
}
|
import java.io.*;
import java.util.*;
public class E {
int[]len=new int[]{3,4,5,4,3};
int[][]getGet;
boolean[]cached;
boolean[]answer;
void solve() throws Exception {
cached=new boolean[1<<19];
answer=new boolean[1<<19];
getGet=new int[5][5];
for(int r=0;r<5;r++)
for(int c=0;c<5;c++)
getGet[r][c]=get(r,c);
int set=0;
for(int r=0;r<5;r++)
{
String s=nextLine();
s=s.replaceAll(" ", "");
for(int j=0;j<len[r];j++)
if((s.charAt(j))=='O')
set|=1<<get(r,j);
}
if(rec(set))
System.out.println("Karlsson");
else
System.out.println("Lillebror");
}
boolean rec(int set)
{
if(cached[set])
return answer[set];
boolean res=false;
for(int r=0;r<5;r++)
for(int c=0;c<len[r];c++)
{
int num=get(r,c);
if((set&(1<<num))>0)
{
int cr=r;
int cc=c;
int cset=set;
while(cr>=0 && cr<5 && cc>=0 && cc<len[cr] && (cset&(1<<getGet[cr][cc]))>0)
{
cset^=(1<<getGet[cr][cc]);
res|=!rec(cset);
cc++;
}
cr=r;
cc=c;
cset=set;
while(cr>=0 && cr<5 && cc>=0 && cc<len[cr] && (cset&(1<<getGet[cr][cc]))>0)
{
cset^=(1<<getGet[cr][cc]);
res|=!rec(cset);
if(cr==2 || cr==3)
cc--;
cr++;
}
cr=r;
cc=c;
cset=set;
while(cr>=0 && cr<5 && cc>=0 && cc<len[cr] && (cset&(1<<getGet[cr][cc]))>0)
{
cset^=(1<<getGet[cr][cc]);
res|=!rec(cset);
if(cr==0 || cr==1)
cc++;
cr++;
}
}
}
cached[set]=true;
answer[set]=res;
return res;
}
int get(int r,int c)
{
int res=0;
for(int i=0;i<r;i++)
res+=len[i];
return res+c;
}
BufferedReader reader;
PrintWriter writer;
StringTokenizer stk;
void run() throws Exception {
reader = new BufferedReader(new InputStreamReader(System.in));
stk = null;
writer = new PrintWriter(System.out);
solve();
reader.close();
writer.close();
}
int nextInt() throws Exception {
return Integer.parseInt(nextToken());
}
long nextLong() throws Exception {
return Long.parseLong(nextToken());
}
double nextDouble() throws Exception {
return Double.parseDouble(nextToken());
}
String nextString() throws Exception {
return nextToken();
}
String nextLine() throws Exception {
return reader.readLine();
}
String nextToken() throws Exception {
if (stk == null || !stk.hasMoreTokens()) {
stk = new StringTokenizer(nextLine());
return nextToken();
}
return stk.nextToken();
}
public static void main(String[] a) throws Exception {
new E().run();
}
}
|
<reponame>justinhsg/AoC2020
package day1;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import utils.AoCSolvable;
import utils.NoSolutionException;
public class Solution implements AoCSolvable {
private static final boolean USE_SAMPLE = false;
private final Set<Integer> numbers;
Solution(List<String> lines){
this.numbers = new HashSet<>();
for (String s:lines){
this.numbers.add(Integer.parseInt(s));
}
}
public String partOne() throws NoSolutionException {
for (int i:numbers){
if(numbers.contains(2020-i) && i != 2020-i){
return String.valueOf((2020-i)*i);
}
}
throw new NoSolutionException();
}
public String partTwo() throws NoSolutionException {
for (int i: numbers){
for (int j: numbers){
int k= 2020-i-j;
if(i != j && i!=k && j!=k && numbers.contains(k)){
return String.valueOf(i*j*k);
}
}
}
throw new NoSolutionException();
}
public static void main(String[] args) {
try{
List<String> lines = Files.readAllLines(Paths.get((USE_SAMPLE?"./sample/":"./input/")+Solution.class.getPackageName()));
Solution s = new Solution(lines);
System.out.println(s.partOne());
System.out.println(s.partTwo());
} catch (IOException | NoSolutionException e) {
e.printStackTrace();
}
}
}
|
Optimising pharmacotherapy in older cancer patients with polypharmacy
Abstract Objective Polypharmacy is frequent among older cancer patients and increases the risk of potential drug‐related problems (DRPs). DRPs are associated with adverse drug events, drug‐drug interactions and hospitalisations. Since no standardised polypharmacy assessment methods for oncology patients exist, we aimed to develop one that can be integrated into routine care. Methods Based on the Systematic Tool to Reduce Inappropriate Prescribing (STRIP), we developed OncoSTRIP, which includes a polypharmacy anamnesis, a concise geriatric assessment, a polypharmacy analysis taking life expectancy into account and an optimised treatment plan. Patients ≥65 years with ≥5 chronic drugs visiting our outpatient oncology clinic were eligible for the polypharmacy assessment. Results OncoSTRIP was integrated into routine care of our older cancer patients. In 47 of 60 patients (78%), potential DRPs (n = 101) were found. In total, 85 optimisations were recommended, with an acceptance rate of 41%. It was possible to reduce the number of potential DRPs by 41% and the number of patients with at least one potential DRP by 30%. Mean time spent per patient was 71 min. Conclusions Polypharmacy assessment of older cancer patients identifies many pharmacotherapeutic optimisations. With OncoSTRIP, polypharmacy assessments can be integrated into routine care.
polypharmacy and occurrence of PIMs (Reis, Santos, Jesus Souza, & Reis, 2017). Both polypharmacy and the occurrence of PIMs are frequently seen in this population, with polypharmacy in up to 84% of patients (Nightingale et al., 2015) and the prevalence of PIMs is reported to be around 50% (Nightingale et al., 2015;Reis et al., 2017).
The risk of adverse effects may be even more relevant for cancer patients because of the exposure to highly active antitumor therapies and the risk of drug-drug interactions with cancer treatment.
Polypharmacy in cancer patients is associated with more grade III-IV chemotherapy-related toxicity (Hamaker et al., 2014). In case of a reduced life expectancy, it is appropriate to consider new goals of treatment, including all co-morbidities. Medication intended for long-term prevention can often be safely discontinued, as was demonstrated for statins (Kutner et al., 2015).
The appropriateness for older cancer patients of generic medication screening tools that exist for older patients have been previously reviewed (Whitman, DeGregory, Morris, & Ramsdale, 2016), such as the Screening Tool of Older Peoples' Prescriptions (STOPP) and Screening Tool to Alert doctors to Right Treatment (START) criteria (Gallagher, Ryan, Byrne, Kennedy, & O'Mahony, 2008), Beers criteria (American Geriatrics Society, 2015, Beers Criteria Update Expert Panel, 2015 and the Medication Appropriateness Index (MAI) (Hanlon et al., 1992). While older cancer patients can benefit from applying any of these tools, none of these include all relevant aspects for this specific population such as potentially unnecessary medication, the patient's condition and the treatment goals (Whitman et al., 2016). Medication screening tools specifically designed for cancer patients are sparse. One good example of a practical cancer orientated tool is the "OncoPal deprescribing guideline" (Lindsay et al., 2015) which can be applied in the terminal six months of a patients' life. However, incurable cancer patients on active treatment often have a life expectancy beyond six months, making the tool less applicable to these patients.
Another valuable cancer-specific tool is the individualised medication assessment and planning (iMAP) for older outpatient cancer patients (Nightingale et al., 2017). iMAP is a structured assessment including a patient-involved medication assessment and an analysis of medication based on the identification of potential drug-related problems (DRPs). By looking for these potential DRPs, and not only PIMs, iMAP provides a more complete medication assessment, since DRPs include problems such as overtreatment, undertreatment and potential adverse drug events (Nightingale et al., 2017;Strand, Morley, Cipolle, Ramsey, & Lamsam, 1990).
iMAP has many similarities with the Systematic Tool to Reduce Inappropriate Prescribing (STRIP) method. This method is embed- (Dutch General Practitioners, 2012). While STRIP is already commonly used in Dutch primary care, the method can be specified for cancer patients, for example by adding a practical (de)prescribing guide suitable for cancer patients. Therefore, we developed the "OncoSTRIP," a polypharmacy assessment method specifically optimised for cancer patients with the aim to integrate it into routine care of the older cancer patient.
| Setting and study population
The study protocol was designed as an exploratory prospective study.
While not yet systematically embedded in the routine care of cancer patients, a polypharmacy assessment using STRIP is considered to be part of regular care in the Netherlands. The institutional review board concluded that the Medical Research Involving Human Subjects Act (WMO) did not apply to the study protocol and that an official ethics approval was not required. Although written informed consent was therefore not necessary, patients were informed by their oncologist/haematologist using a protocol summary before any data was collected. The OncoSTRIP was offered to patients ≥65 years with ≥5 chronic medications on active treatment that visited the outpatient oncology/haematology clinic of our community-based hospital between February 2016 and April 2017.
Patients were free to decline participation. Patients that agreed to participate were scheduled for the OncoSTRIP method in alignment of their regular visits to the outpatient clinic, infusion centre or outpatient hospital pharmacy.
| OncoSTRIP method
With OncoSTRIP, the patients followed a structured stepwise polypharmacy assessment, which consecutively consisted of four individual components described in detail in the following section.
| Polypharmacy anamnesis
The goal of the polypharmacy anamnesis step was to collect all relevant information on the patients' medication use. Prior to the anamnesis visit with the patient, the pharmacist collected relevant background data, such as medical history and medication use according to the hospital and/or community pharmacy records. For the polypharmacy anamnesis visit, a structured questionnaire was used ( Figure S1), in which the oncology drugs, supportive drugs, prescription drugs and possible over-the-counter drugs were discussed with the patient by a pharmacist. The following aspects were included: Type of drug, dose, indication, date of start, initial prescriber, effect, adverse drug effects, practical problems (including compliance) and if relevant, extra information on medical history. To allow shared decision-making, patients were asked which drugs they were willing to discontinue and which they highly valued.
| Concise geriatric assessment
In parallel to the polypharmacy anamnesis, a nurse specialist or oncology nurse performed a concise geriatric assessment with the patient.
The concise geriatric assessment consisted of scoring systems Adult Comorbidity Evaluation-27 (ACE-27) (Piccirillo, Tierney, Costas, Grove, & Spitznagel, 2004), Eastern Cooperative Oncology Group performance status (ECOG-PS) (Oken et al., 1982) and Geriatric-8 (G8) (Bellera et al., 2012), to evaluate comorbidity, performance and frailty respectively. Comorbidity, performance status and frailty are essential determinants of the treatment options, prognosis and goals of care, and therefore these were factors to consider when making the treatment plan.
| Polypharmacy analysis
The pharmaceutical analysis was structured by the evaluation of eight potential DRPs: requirement of additional drug therapy, unnecessary drug therapy, ineffective treatment, (potential) adverse effects, clinically relevant contraindications or interactions, underdosing, overdosing and practical drug use problems/optimisations. PIMs were identified by our newly developed "OncoSTRIP list of drugs suitable for deprescribing in older cancer patients" (Table S1) and categorised within the potential DRPs. This deprescribing checklist was based on the STOPP criteria (Gallagher et al., 2008), Beers criteria (American Geriatrics Society, Beers Criteria Update Expert Panel, 2015, "OncPal deprescribing guideline" (Lindsay et al., 2015), "Checklist for symptom stability after withdrawing medicines" (Potter, Flicker, Page, & Etherton-Beer, 2016) and available literature. Besides these explicit criteria, potential DRPs were also identified through the expertise of the clinical pharmacist and treating physician. If necessary, initial prescribers were contacted for further information.
| Polypharmacy treatment plan
After the analysis, the pharmacist's recommendations were reported in the patient's electronical medical record to the treating oncologist/haematologist for reviewing. Upon agreeing with the recommendations, the treating physician discussed the intended medication adjustments with the patient.
| Outcomes
Outcomes were the prevalence of potential DRPs and the proportion of pharmacotherapeutic recommendations. Furthermore, the acceptance rate of the recommendations was evaluated by reviewing patient's electronical medical and/or pharmacy records directly after the patient's consultation with the treating physician, and after median follow-up period of four months. Time invested in the different steps of the polypharmacy assessment was recorded as well.
Finally, with univariate analyses (Fisher's or Fisher-Freeman-Halton exact test, statistical significance at p < .05), the outcomes of the concise geriatric assessment were tested for the prediction of the occurrence of recommendations, to identify patients most likely to benefit from polypharmacy assessment. For the statistical analyses, IBM SPSS version 21 was used.
| Patient characteristics
None of the patients declined to participate in this study. Characteristics of the 60 patients that underwent a polypharmacy assessment are and nine were other chronic drugs (range 4-20). The most commonly used chronic drugs were for the treatment of cardiovascular, lipid and/ or gastrointestinal disorders ( Table 2).
| Optimisation recommendations
In total, 101 potential DRPs were found among 47 of 60 patients (78%), resulting in a mean of 1.7 per patient. As shown in Table 4, the three most commonly found potential DRPs were unnecessary drug therapy (n = 39), (potential) adverse effects (n = 17) and practical problems/optimisations (n = 14).
In total, these potential DRPs led to 85 pharmacotherapeu- Table 5.
| Follow-up of recommendations
Of the 85 recommendations, 35 (41%) were implemented by the treating physician directly after reviewing and discussing it with the patient. After the median follow-up of 4 months, 32 of the 35 (91%) implemented recommendations were still maintained.
| Reduction in polypharmacy
For 17 of 60 patients (28%), it was possible to reduce the pill burden for the complete follow-up period. In 12 patients (20%), at least one drug could be discontinued. Reducing the dosing frequency could be accomplished in six patients (10%). An attempt to reduce the pill burden was tried for an additional two patients (3.3%). However, due to symptom recurrence the recommended change had to be reversed. Table 4, the number of potential DRPs was reduced from 101 to 60 (41% reduction). The number of patients with TA B L E 2 Most commonly used chronic drugs according to their pharmacologic category, with exception of the oncology drugs at least one potential DRP could be reduced from 47 to 33 patients (30% reduction).
| Geriatric assessment subpopulations
To identify patients most likely to benefit from polypharmacy assessment, the outcomes of the concise geriatric assessment were assessed for possible associations with the occurrence of recommendations. No such subpopulation could be identified at statistical significance, although a trend towards significance (p = .079) was seen for people classified as "vulnerable" with the G8 screening (Table S2).
| Time investment
The mean time spent per patient is summarised in Table 6. On average, collecting the relevant data took about 15 min, the concise geriatric assessment 10 min, the polypharmacy anamnesis 24 min and the polypharmacy analysis including providing the treatment plan to the treating physician 22 min. In total, the mean duration of a polypharmacy assessment was 71 min.
| D ISCUSS I ON
In this study, a pharmacist-led polypharmacy assessment led to the identification and implementation of many possible pharmacotherapeutic optimisations among the majority of older cancer patients.
Within this population, there was a high prevalence of patients with at least one potential DRP, which is comparable to previous studies with older cancer patients (around 90%-95%) (Nightingale et al., 2017;Yeoh, Si, & Chew, 2013;Yeoh, Tay, Si, & Chew, 2015). Due to many pharmacotherapeutic recommendations, with OncoSTRIP, it was possible to reduce the total number of potential DRPs and the number of patients with at least one potential DRP.
In comparison, polypharmacy assessment through iMAP resulted in the identification of three potential DRPs per patient on average. Additionally, the total number of DRPs could be reduced by 45.5% and the number of patients with at least one potential DRP by 20.5%. The recommendation acceptance rate was 46% (Nightingale et al., 2017). Thus, despite the identification of a higher number of DRPs per patient, the reductions in DRPs were comparable between iMAP and OncoSTRIP. Cumulatively, OncoSTRIP and iMAP provide reproducible and encouraging results that support routine implementation of polypharmacy assessment in this population.
It is anticipated that a recommendation suggested or discussed by a large team, as used in iMAP, is more likely to be adapted than a recommendation suggested by one clinical pharmacist. However, with respect to the comparable acceptance rates of OncoSTRIP and iMAP, no clear preference exists between the two methods. In our view, reporting recommendations directly in the patients' electronic medical records is efficient, especially since the majority of recommendations do not require immediate action. Discussing polypharmacy recommendations in a multidisciplinary team could be beneficial in selected patients.
In this study, we did not record the reasons why prescribers may have chosen not to follow a recommendation. However, the suboptimal acceptance rate in our study can be partly explained by the observation that approximately one-third of all suggested recommendations were conditional ("if life expectancy is estimated below 2 years, than…"), as the pharmacist generally did not know the estimated life expectancy at the time of providing recommendations.
It is likely that for some patients the life expectancy was higher than the prerequisite for the recommendation, thereby making it irrelevant.
Pill reduction can decrease the risk of adverse drug events and medication errors, and positively influence compliance by simplifying intake regimens. Pill reduction was accomplished and maintained in a substantial part of patients. Undoing a pill reduction due to symptom recurrence was minimal, suggesting it is feasible for patients to stop the discontinued drug(s) for a longer period. In conclusion, the OncoSTRIP polypharmacy assessment resulted in the identification of a high number of possible pharmacotherapeutic optimisations among older cancer patients. An essential aspect for this specific population is to consider the changed goals of care with respect to a reduced life expectancy.
OncoSTRIP made it possible to integrate polypharmacy assessments into routine care of this population. Future studies are needed to identify possible high-risk subpopulations and to assess the effects of OncoSTRIP polypharmacy assessments on (longterm) patients' outcomes.
ACK N OWLED G EM ENTS
This work was supported by the Dutch Cancer Society |
// ApplyBlock executes the block, then commits and updates the mempool atomically
func (s *State) ApplyBlock(eventSwitch types.EventSwitch, block *types.Block, partsHeader types.PartSetHeader, mempool types.IMempool, round int) error {
err := s.ExecBlock(eventSwitch, block, partsHeader, round)
if err != nil {
return errors.New(cmn.Fmt("Exec failed for application: %v", err))
}
err = s.CommitStateUpdateMempool(eventSwitch, block, mempool, round)
if err != nil {
return errors.New(cmn.Fmt("Commit failed for application: %v", err))
}
return nil
} |
n = int(input())
s = input()
list_s = []
for i in range(n):
list_s.append(s[i])
for i in range(n):
try:
while list_s[i] == list_s[i+1]:
if list_s[i] == list_s[i+1]:
list_s.pop(i+1)
except IndexError:
continue
print(len(list_s)) |
package cmd
import "testing"
func TestRootCmd(t *testing.T) {
cmd := NewRootCmd("test")
t.Run("execute", func(t *testing.T) {
if err := cmd.Execute(); err != nil {
t.Error(err)
}
})
t.Run("print version", func(t *testing.T) {
cmd.SetArgs([]string{"-v"})
if err := cmd.Execute(); err != nil {
t.Error(err)
}
})
}
|
def superimpose_all(self):
i = 1
while i < len(self.str_object_list):
self.str_object_list[i] = self.superimpose_n_ca_c(self.str_object_list[0], self.str_object_list[i])
i += 1 |
/** Changes just the color of a given point from the map. First index is 0.
* \exception Throws std::exception on index out of bound.
*/
void CColouredPointsMap::setPointColor(size_t index, float R, float G, float B)
{
if (index >= m_x.size()) THROW_EXCEPTION("Index out of bounds");
this->m_color_R[index] = R;
this->m_color_G[index] = G;
this->m_color_B[index] = B;
} |
<reponame>automl/HPO_for_RL<filename>pbt/exploitation/constant.py
class Constant:
def __init__(self):
"""
Constant mechanism which doesn't exploit any member
"""
pass
def __call__(self, own_name: str, scores: dict) -> str:
"""
Return the name of the given member
:param own_name: The agent of the current agent
:param scores: A dict with names and scores of all agents
:return: The name of the chosen better agent
"""
return own_name |
<reponame>leongaban/redux-saga-exchange<gh_stars>1-10
import * as React from 'react';
import block from 'bem-cn';
import { connect } from 'react-redux';
import { ILastPrice, OrderBookWidgetType } from 'shared/types/models';
import { IAppReduxState, Omit } from 'shared/types/app';
import { floorFloatToFixed } from 'shared/helpers/number';
import { Icon } from 'shared/view/elements';
import { CurrencyConverter } from 'services/miniTickerDataSource/namespace';
import { selectors as miniTickerDataSourceSelectors } from 'services/miniTickerDataSource';
import { selectors } from '../../../redux';
import './LastPrice.scss';
interface IStateProps {
lastPrice: ILastPrice | null;
convertQuoteCurrencyToUSDT: CurrencyConverter;
}
interface IOwnProps {
accuracy: number;
counterCurrency: string;
widgetType: OrderBookWidgetType;
}
type IProps = IOwnProps & IStateProps;
function mapState(state: IAppReduxState, { counterCurrency }: IOwnProps): IStateProps {
return {
lastPrice: selectors.selectLastPrice(state),
convertQuoteCurrencyToUSDT: miniTickerDataSourceSelectors.selectQuoteCurrencyToUSDTConverter(
state, counterCurrency
),
};
}
const arrows: Omit<Record<ILastPrice['change'], string>, 'unchanged'> = {
increased: require('./img/top-inline.svg'),
decreased: require('./img/bottom-inline.svg'),
};
const b = block('last-price');
class LastPrice extends React.PureComponent<IProps> {
public render() {
const { lastPrice, convertQuoteCurrencyToUSDT, accuracy, widgetType } = this.props;
const convertedLastPrice = lastPrice && convertQuoteCurrencyToUSDT(lastPrice.value);
return (
<div className={b({ 'widget-type': widgetType })()}>
{lastPrice !== null
? (
<>
<div className={b('value', { change: lastPrice.change })()}>
{floorFloatToFixed(lastPrice.value, accuracy)}
{lastPrice.change !== 'unchanged' && <Icon src={arrows[lastPrice.change]} className={b('arrow')()} />}
</div>
<div className={b('converted-value')()}>
{convertedLastPrice !== null ? `$ ${convertedLastPrice}` : ' - '}
</div>
</>
) : ' - '
}
</div>
);
}
}
export default (
connect(mapState, () => ({}))(
LastPrice,
)
);
|
Interest in 'keeper but he is close to signing new deal at Town
Bartosz Bialkowski says he has received interest in him from other clubs but is close to signing a new contract at Portman Road.
The 28-year-old ‘keeper’s current deal expires in the summer but the former Southampton stopper has told the Club website that he is looking forward to extending his stay at Town.
“I’m really close to signing a new contract here and I’m very happy about that,” said Bart, who produced one of the saves of the season from Korey Smith’s deflected shot in Blues’ defeat at Bristol City last weekend.
“It’s a great place to be and I’m looking forward to staying here for a longer period.
“I’ve had phone calls asking if I would be interested in something else but I’m not. I’ve not looked at anything else but staying here.” |
<filename>src/client/videostreaming/index.tsx<gh_stars>1-10
export { RemotePeerVideo } from "./RemotePeerVideo";
export { MyVideo } from "./MyVideo";
// TODO: remove this line and all occurrences of StreamSelectorWrapper
export { StreamSelectorWrapper } from "./StreamSelectorWrapper";
|
Episode 677: The Experiment Experiment
toggle caption uniinnsbruck/Flickr
A few years back, a famous psychologist published a series of studies that found people could predict the future — not all the time, but more often than if they were guessing by chance alone.
The paper left psychologists with two options.
"Either we have to conclude that ESP is true," says Brian Nosek, a psychologist at the University of Virginia, "or we have to change our beliefs about the right ways to do science."
Nosek is going with Option B — and not just for psychology experiments. He thinks there's something wrong with the way we're doing science. And he launched a massive project to try to fix it.
Music: "Run Into The Sun." Find us: Twitter/ Facebook/ Spotify/ Tumblr. |
<gh_stars>1-10
/*
* Copyright (c) 2013, Sierra Wireless,
* Copyright (c) 2014, Zebra Technologies,
*
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of {{ project }} nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package leshan.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import leshan.client.response.Callback;
import leshan.client.response.OperationResponse;
import org.eclipse.californium.core.coap.CoAP.ResponseCode;
import org.eclipse.californium.core.coap.MessageObserver;
import org.eclipse.californium.core.coap.Request;
import org.eclipse.californium.core.coap.Response;
import org.eclipse.californium.core.network.CoAPEndpoint;
import org.eclipse.californium.core.network.Exchange;
public abstract class Uplink {
private static final String MESSAGE_BAD_GATEWAY = "Bad Gateway on Async Callback";
private static final String MESSAGE_GATEWAY_TIMEOUT = "Gateway Timed Out on Asynch Callback";
private static final String MESSAGE_INTERRUPTED = "Endpoint Interrupted While Waiting for Sync Response";
protected final CoAPEndpoint origin;
private final InetSocketAddress destination;
public Uplink(final InetSocketAddress destination, final CoAPEndpoint origin) {
if (destination == null || origin == null) {
throw new IllegalArgumentException("Destination and/or Origin cannot be null.");
}
this.destination = destination;
this.origin = origin;
}
protected final void checkStarted(final CoAPEndpoint endpoint) {
if (!endpoint.isStarted()) {
try {
endpoint.start();
} catch (final IOException e) {
e.printStackTrace();
}
}
}
protected void sendAsyncRequest(final Callback callback, final Request request) {
request.addMessageObserver(new MessageObserver() {
@Override
public void onTimeout() {
request.removeMessageObserver(this);
callback.onFailure(OperationResponse.failure(ResponseCode.GATEWAY_TIMEOUT, MESSAGE_GATEWAY_TIMEOUT));
}
@Override
public void onRetransmission() {
// TODO Auto-generated method stub
}
@Override
public void onResponse(final Response response) {
request.removeMessageObserver(this);
if (ResponseCode.isSuccess(response.getCode())) {
callback.onSuccess(OperationResponse.of(response));
} else {
callback.onFailure(OperationResponse.failure(response.getCode(), "Request Failed on Server "
+ response.getOptions()));
}
}
@Override
public void onReject() {
request.removeMessageObserver(this);
callback.onFailure(OperationResponse.failure(ResponseCode.BAD_GATEWAY, MESSAGE_BAD_GATEWAY));
}
@Override
public void onCancel() {
request.removeMessageObserver(this);
callback.onFailure(OperationResponse.failure(ResponseCode.BAD_GATEWAY, MESSAGE_BAD_GATEWAY));
}
@Override
public void onAcknowledgement() {
}
});
checkStarted(origin);
origin.sendRequest(request);
}
protected void sendAsyncResponse(final Exchange exchange, final Response response, final Callback callback) {
response.addMessageObserver(new MessageObserver() {
@Override
public void onTimeout() {
response.removeMessageObserver(this);
}
@Override
public void onRetransmission() {
// TODO: Stuff
}
@Override
public void onResponse(final Response response) {
response.removeMessageObserver(this);
}
@Override
public void onReject() {
response.removeMessageObserver(this);
}
@Override
public void onCancel() {
response.removeMessageObserver(this);
}
@Override
public void onAcknowledgement() {
}
});
checkStarted(origin);
exchange.sendResponse(response);
}
protected OperationResponse sendSyncRequest(final long timeout, final Request request) {
checkStarted(origin);
origin.sendRequest(request);
try {
final Response response = request.waitForResponse(timeout);
if (response == null) {
return OperationResponse.failure(ResponseCode.GATEWAY_TIMEOUT, "Timed Out Waiting For Response.");
} else if (ResponseCode.isSuccess(response.getCode())) {
return OperationResponse.of(response);
} else {
return OperationResponse.failure(response.getCode(),
"Request Failed on Server " + response.getOptions());
}
} catch (final InterruptedException e) {
// TODO: Am I an internal server error?
return OperationResponse.failure(ResponseCode.INTERNAL_SERVER_ERROR, MESSAGE_INTERRUPTED);
}
}
protected InetSocketAddress getDestination() {
return destination;
}
public void stop() {
origin.stop();
}
}
|
/// <reference types="cypress" />
import { ActionRest } from '../actions/ActionRest'
import { GeraDoc } from '../utils/docs'
const action = new ActionRest
const doc = new GeraDoc
const {
Given,
When,
Then
} = require("cypress-cucumber-preprocessor/steps");
let name
let username
let password
let id
//Create User
Given(/^i want create a new user$/, () => {
console.log('POST')
});
When(/^i inform the datas$/, () => {
name = doc.getNome()
username = doc.getEmail()
password = doc.getSenha()
});
When(/^send request$/, () => {
action.restCreateUser(name, username, password)
});
Then(/^has user is created success$/, () => {
});
// Login JWT Token
Given(/^i inform my user and password$/, () => {
username = '<EMAIL>'
password = '<PASSWORD>*'
});
When(/^i send request$/, () => {
action.restLogin(username, password)
});
Then(/^the login is success$/, () => {
});
Then(/^Jwt token is returned$/, () => {
});
// Find user by Id
Given(/^that I want to find a user by id$/, () => {
return true;
});
When(/^I pass the user id$/, () => {
id = 1
action.findById(id)
});
When(/^I submit a request$/, () => {
return true;
});
Then(/^I will have the user returned successfully$/, () => {
return true;
});
// Find user by username
Given(/^that I want to find a user username$/, () => {
return true;
});
When(/^I send the request$/, () => {
username = '<EMAIL>'
action.findByUsername(username)
});
Then(/^I will have the user returned success$/, () => {
return true;
});
// List all users
Given(/^that I want to return the list of all users$/, () => {
return true;
});
When(/^I send this request$/, () => {
action.listAllUsers()
});
Then(/^I will have a list containing all users returned with success$/, () => {
return true;
});
// List all users pageable
Given(/^that I want to list all pageable users$/, () => {
return true;
});
When(/^I send a request$/, () => {
action.listAllUsersPageable()
});
Then(/^I will have the return of the pageable users list$/, () => {
return true;
});
// Update user
Given(/^that i want to update user$/, () => {
return true;
});
When(/^I pass the data$/, () => {
return true;
});
When(/^click on request$/, () => {
id = 2
name = 'Fernandho'
username = '<EMAIL>'
password = '<PASSWORD>'
action.updateUser(id, name, username, password)
});
Then(/^will i have the user updated successfully$/, () => {
return true;
});
// Delete user
Given(/^that I want to delete a user$/, () => {
return true;
});
When(/^submit a delete request$/, () => {
action.deleteUser()
});
Then(/^I will have the user deleted$/, () => {
return true;
});
|
/**
* Class to sort code owners based on their scorings on different {@link CodeOwnerScore}s.
*
* <p>To determine the sort order the scorings are weighted based on the {@link
* CodeOwnerScore#weight()} of the {@link CodeOwnerScore} on which the scoring was done.
*/
@AutoValue
public abstract class CodeOwnerScorings {
/** The scorings that should be taken into account for sorting the code owners. */
public abstract ImmutableSet<CodeOwnerScoring> scorings();
public static CodeOwnerScorings create(CodeOwnerScoring... codeOwnerScorings) {
return new AutoValue_CodeOwnerScorings(ImmutableSet.copyOf(codeOwnerScorings));
}
public static CodeOwnerScorings create(Set<CodeOwnerScoring> codeOwnerScorings) {
return new AutoValue_CodeOwnerScorings(ImmutableSet.copyOf(codeOwnerScorings));
}
/**
* Returns the total scorings for the given code owners.
*
* @param codeOwners the code owners for which the scorings should be returned
*/
public ImmutableMap<CodeOwner, Double> getScorings(ImmutableSet<CodeOwner> codeOwners) {
return codeOwners.stream()
.collect(toImmutableMap(Function.identity(), this::sumWeightedScorings));
}
/** Returns the sum of all weighted scorings that available for the given code owner. */
private double sumWeightedScorings(CodeOwner codeOwner) {
double sum =
scorings().stream()
.map(scoring -> scoring.weightedScoring(codeOwner))
.collect(Collectors.summingDouble(Double::doubleValue));
return sum;
}
} |
// mkArgs prepares a list of paths for the command line
func mkArgs(paths []string) string {
escaped := make([]string, len(paths))
for i, s := range paths {
escaped[i] = quotePath(realRel(s))
}
return strings.Join(escaped, " ")
} |
package com.graphhopper.storage;
import com.graphhopper.routing.util.TraversalMode;
import com.graphhopper.routing.weighting.AbstractWeighting;
import com.graphhopper.routing.weighting.Weighting;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Specifies all properties of a CH routing profile. Generally these properties cannot be changed after the CH
* pre-processing is finished and are stored on disk along with the prepared graph data.
*
* @author easbar
*/
public class CHProfile {
private final Weighting weighting;
private final boolean edgeBased;
public static CHProfile nodeBased(Weighting weighting) {
return new CHProfile(weighting, TraversalMode.NODE_BASED);
}
public static CHProfile edgeBased(Weighting weighting) {
return new CHProfile(weighting, TraversalMode.EDGE_BASED);
}
public CHProfile(Weighting weighting, TraversalMode traversalMode) {
this(weighting, traversalMode.isEdgeBased());
}
public CHProfile(Weighting weighting, boolean edgeBased) {
this.weighting = weighting;
this.edgeBased = edgeBased;
}
public Weighting getWeighting() {
return weighting;
}
public boolean isEdgeBased() {
return edgeBased;
}
public TraversalMode getTraversalMode() {
return edgeBased ? TraversalMode.EDGE_BASED : TraversalMode.NODE_BASED;
}
public String toFileName() {
String result = AbstractWeighting.weightingToFileName(weighting);
// keeping legacy file names for now, like fastest_edge_utc40 (instead of fastest_40_edge), because we will
// most likely use profile names soon: #1708
Pattern pattern = Pattern.compile("-?\\d+");
Matcher matcher = pattern.matcher(result);
if (matcher.find()) {
String turnCostPostfix = matcher.group();
result = matcher.replaceAll("");
result += edgeBased ? "edge" : "node";
result += "_utc" + turnCostPostfix;
} else {
result += edgeBased ? "_edge" : "_node";
}
return result;
}
public String toString() {
String result = weighting.toString();
Pattern pattern = Pattern.compile("\\|u_turn_costs=-?\\d+");
Matcher matcher = pattern.matcher(result);
if (matcher.find()) {
String uTurnCostPostFix = matcher.group();
result = matcher.replaceAll("");
result += "|edge_based=" + edgeBased + uTurnCostPostFix;
} else {
result += "|edge_based=" + edgeBased;
}
return result;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CHProfile chProfile = (CHProfile) o;
return edgeBased == chProfile.edgeBased &&
Objects.equals(weighting, chProfile.weighting);
}
@Override
public int hashCode() {
return Objects.hash(weighting, edgeBased);
}
}
|
OTTAWA — Parks Canada is proposing new lockage fees for the Rideau Canal that would nearly triple the cost of a boat trip from Ottawa to Kingston.
Canal users greeted the huge fee increases, posted to Parks Canada’s website late Friday, with fury and consternation. If they proceed, they warn, the new fees will effectively kill the canal, along with the hundreds of businesses and thousands of jobs it supports in Eastern Ontario.
And if that happens, the canal’s UNESCO world heritage designation could disappear as well, they say.
At present, boaters can purchase passes — ranging in length from a single day to the full season — that let them pass through canal locks for a single up-front price.
Parks Canada is proposing to scrap the pass system and replace it with a complex new ticket fee structure beginning in 2014. Under the new system, boaters would have to purchase between two and four tickets to pass through each lock station, depending on the number of locks it contains.
Each ticket would cost 30 cents per foot of a boat’s length. So, for example, a single ticket for a 30-foot boat would cost $9, meaning the lockage fee would range from $18 and $36 for a vessel of that size. The fee would be less for smaller boats, but more for larger ones.
The new fee structure would raise the cost of travelling the full length of the canal by 287 per cent. The owner of a 20-foot boat now pays $93 in lockage fees for the Ottawa-to-Kingston trip, but would pay $360 under Parks Canada’s proposal. For owners of 40-foot boats, the cost of a one-way trip would soar from the current $186 to $720.
The percentage increase would be even larger — more than 340 per cent — for boaters who now buy a season’s pass. The owner of a 25-foot boat can buy a season’s pass for $220. But the same boater would pay $975 for 130 tickets under the new system.
The fee increases apply to canoes and kayaks, as well, though they would need one fewer ticket to pass through a lock station than a power boat. Under the current system, those who want to paddle the full length of the canal can buy a transit pass for $74.40. If Parks Canada’s proposed fees are adopted, the cost would rise to $182.40 in 2014.
Boaters can save 25 per cent by purchasing 80 tickets in advance, or 10 per cent by buying lockage tickets online in advance.
Parks Canada is also proposing sharp increases in mooring fees. Effective this year, the overnight mooring fee would more than double to $2 a foot, and a new $1-a-foot fee would be imposed for daytime mooring.
All 3,300 fees that Parks Canada charges at its sites across the country have been frozen since 2008 and are being adjusted. The public has until Feb. 18 to provide comments. In future, increases will be tied to the rise in the Consumer Price Index. |
Outside Lands 2014 Listening Guide Part 3
Nahko and Medicine for the People
Acoustic Thump-Hop
Hailing from the music melting pool that is Portland, OR, the band draws influence from genres like hip hop, folk, jazz and tribal. You’ll note traces of inspiring acts like Erykah Badu and Digable Planets in their sound.
Nicki Bluhm and The Gramblers
Rock, Soul, Alt-Country
Nicki Bluhm is a skilled musician residing in the bay area. Constantly in search of new projects, she’s served as a guest-performer to Chris Robinson, Bob Weir and festival mates The Brothers Comatose.
Night Terrors of 1927
Indie, Synth Pop
Starting to breakout from the dense abyss of the indie genre, the talented members of the aptly named Night Terrors of 1927 will no doubt win over any audience willing to give them a look. Outside Lands should prove an appropriate venue for their unique sound.
NOCONA
Indie
Blending high energy with songs that paint a picture for listeners, NOCONA recall memories of setting suns, crisp surf and ageless desert. Formed by husband and wife team of Chris and Adrienne Isom and their friends, the collection gave songs Chris wrote years earlier new life.
Paolo Nutini
Pop Rock, Soul, Blues
27-year-old Nutini wasn’t born a Scottish singer, songwriter and all around musician — he instead believed at an early age he would follow his father into the fish and chip business. Fortunately his grandfather’s passion for music eventually rubbed off, opening the door to the UK chart success Nutini has recently enjoyed, having reached #5 on the UK singles chart.
Phosphorescent
Indie Rock, Pop, Folk
The working moniker of American singer-songwriter Matthew Houck, this artist originally hails from southern Alabama. Having received favorable marks from outlets like Pitchfork and Stylus Magazine, Phosphorescent is about as critically acclaimed as they come.
Ray LaMontagne
Folk, Rock, Blues
Born in New Hampshire, LaMontagne is an American singer and songwriter with a deep body of work including four studio albums. The subsequent sound has critics making ready comparisons to noteworthy names like Otis Redding, The Band, Van Morrison, Nick Drake and Tim Buckley.
Rayland Baxter
Indie, Folk
Rayland has been described as a gentleman, singer of song and teller of tale among many other things — this is a man who wears many hats, literally and figuratively. His sound covers a lot of ground but can be considered a more uptempo brand of folk.
Run the Jewels (El-P & Killer Mike)
Hip Hop
The masterwork of veteran underground MCs El-P and Killer Mike, Run The Jewels catapulted to the top of hip hop blogs with their self-titled album in 2013. Any self-respecting fan of hip hop will want to check out this set, and rightfully so — both artists hold their weight across solo work, but as a duo they create some of the most raw and intelligent hip hop currently running.
SBTRKT
Post-Dubstep, Indie Pop, Electronic
The musical project of Aaron Jerome, SBTRKT has a body of work that includes remixes of songs from MIA, Radiohead, Modeselektor and many more. His music gets heavy rotation from taste making outlets like BBC Radio 1. He prefers to perform under relative anonymity, using masks of native ceremonial designs during live shows.
Spoon
Indie
A veteran indie rock outfit out of Austin, TX, Spoon has been together since 1993 but has enjoyed major commercial success for years following hits like ‘That’s The Way We Get By’ and ‘I Turn My Camera On’. Perhaps not the biggest draw for some attending Outside Lands due to their relatively stagnant releases, they’re by no means an empty addition to the 2014 billing.
Tedeschi Trucks Band
Rock, Blues, Soul
A blues and rock group out of Jacksonville, Florida, this band came together in 2010 under the leadership of married couple Derek Trucks and Susan Tedeschi. Quickly showing off their talent, the group picked up the Grammy for Best Blues Album in 2012, only two years following their formation.
Tegan & Sara
Indie Rock, New Wave
The Canadian indie rock duo of sisters Tegan Rain Quin and Sara Kiersten Quin formed the band we know today as early as 1995 in Calgary. Both sisters are accomplished musicians, taking turns on guitar, drums, keyboards and general songwriting. Indie icons in their later lives, this duo is a favorite among many Outside Lands fans.
The Brothers Comatose
Acoustic, Indie, Folk
Drawing comparisons to artists like The Devil Makes Three and Trampled By Turtles, the boys behind The Brothers Comatose still find a folksy sound all their own. Actual brothers, the members of the band cut their teeth as young musicians living within a house with a reputation with parties full of music.
The Districts
Indie Pop, Rock
A four-piece collective from the small town of Lititz, Pennsylvania, the band formed in 2009 when members were still in high school. Their sound blends jangly indie Americana with blues-inspired rock.
The Flaming Lips
Indie, Art Pop
There are few bands if any in the world that can earn comparison to Wayne Coyne’s charmingly weird outfit of alternative rockers. Beginning in Oklahoma, the band initially broke through in the 1990s and has elevated to elite status in recent decades. Their live show is a visual spectacle and dazzles the eye, leaving much people with dropped jaws and blown minds.
The Killers
Pop, Rock, Electronic
Brandon Flowers and co. were lumped into similar sounding bands like The Bravery but only they managed to rise to the top of the pop charts in record time during the early 2000s — the album Hot Fuss has reached classic status. Their sound takes a heavy loan from 80s-style synth pop and draws shades of Interpol and Franz Ferdinand.
The Kooks
Indie Rock, Britpop
Listening to The Kooks is like venturing into popular sounds of the past, but their sound is far from an efficient recycling job. Named for a song on David Bowie’s Hunky Dory album, the group united at a young age over a shared interest in bands like the Police, the Strokes and the Everly Brothers.
The Soul Rebels
Brass Band, Soul
What’s a music festival without a little taste of New Orleans? Nola natives behind The Soul Rebels fit that bill perfectly well, an eight-piece brass ensemble that incorporates elements of jazz, funk and hip-hop.
Tiësto
Big Room House, Electro Progressive, Trance, Techno
One of the most acclaimed producers currently at the top of his game, Tiësto is held in high regard by outlets like DJ Magazine and EDM mainstream audience alike. Those who dismiss Tiësto outright for pandering to a massive audience are similarly dismissing one of the best sets of the weekend.
Check out the Outside Lands 2014 Playlist. |
/**
* Applies itself to the input
* @param input The input for the function
* @return The output of the function
*/
public double activate(double input) {
switch(this) {
case TANH:
return Math.tanh(input);
case SIGMOID:
return 1 / (1 + Math.exp(-input));
case RELU:
if(input >= 0) {
return input;
} else {
return 0;
}
case SOFTPLUS:
return Math.log(1 + Math.exp(input));
case RELU_LEAKY:
if(input >= 0) {
return input;
} else {
return RELU_LEAKY_LEAKAGE * input;
}
default:
case NONE:
return input;
}
} |
Development of 2-D and 3-D Double-Population Thermal Lattice Boltzmann Models
In this paper, an incompressible two-dimensional (2-D) and three- dimensional (3-D) thermohydrodynamics for the lattice Boltzmann scheme are de- veloped. The basic idea is to solve the velocity fleld and the temperature fleld using two difierent distribution functions. A derivation of the lattice Boltzmann scheme from the continuous Boltzmann equation for 2-D is discussed in detail. By using the same procedure as in the derivation of the discretised density distribution function, it is found that new lattice of four-velocity (2-D) and eight-velocity (3-D) models for internal energy density distribution function can be developed where the viscous and compressive heating efiects are negligible. These models are validated by the numerical simulation of the 2-D porous plate Couette ∞ow problem where the analytical solution exists and the natural convection ∞ows in a cubic cavity. |
import iseg.static_strings as ss
from absl import flags
FLAGS = flags.FLAGS
# System settings
flags.DEFINE_string("cuda_visible_devices", None, "visible cuda devices")
# Common settings
flags.DEFINE_enum("mode", ss.TRAIN, [ss.TRAIN, ss.VAL, ss.TEST_DIR], "mode")
flags.DEFINE_string("tensorboard_dir", None, "Path of tensorboard dir")
flags.DEFINE_string("checkpoint_dir", None, "Path of dir where the checkpoints are stored")
flags.DEFINE_string("visualize_output_dir", None, "Path of the dir to output the visualize results")
flags.DEFINE_boolean("gpu_memory_growth", True, "Is GPU growth allowed")
flags.DEFINE_boolean("restore_checkpoint", True, "Restore the checkpoint")
flags.DEFINE_integer("gpu_batch_size", 16, "Total batch size")
flags.DEFINE_integer("eval_gpu_batch_size", None, "Total batch size for eval")
flags.DEFINE_string("output_file", None, "Output file path")
flags.DEFINE_boolean("press_key_to_end", False, "End the program after press the key")
flags.DEFINE_string("tpu_name", None, "TPU name")
flags.DEFINE_bool("soft_device_placement", False, "If set soft device placement")
flags.DEFINE_integer("sliding_window_crop_height", None, "Sliding window crop height")
flags.DEFINE_integer("sliding_window_crop_width", None, "Sliding window crop width")
# Dataset settings
flags.DEFINE_integer("crop_height", None, "crop height")
flags.DEFINE_integer("crop_width", None, "crop width")
flags.DEFINE_integer("eval_crop_height", None, "eval crop height")
flags.DEFINE_integer("eval_crop_width", None, "eval crop width")
# Training protocol
flags.DEFINE_bool("mixed_precision", True, "Use mixed precision")
flags.DEFINE_float("sgd_momentum_rate", 0, "Momentum rate of SGD")
flags.DEFINE_integer("max_checkpoints_to_keep", 20, "How many checkpoints to keep")
flags.DEFINE_integer("train_epoch", 30, "Epoch to train")
flags.DEFINE_integer("epoch_steps", 1000, "Num steps in each epoch")
flags.DEFINE_integer("initial_epoch", 0, "Initial epoch")
flags.DEFINE_integer("shuffle", 256, "Shuffle rate")
flags.DEFINE_integer("random_seed", 0, "random seed")
flags.DEFINE_bool("training_progress_bar", True, "Show progress bar during training")
# Evaluation protocol
flags.DEFINE_multi_float("scale_rates", [1.0], "Scale rates when predicion")
flags.DEFINE_boolean("flip", False, "Use flip when prediction")
|
Reporter's Notebook: France's Unexpected Political Revival
NPR's Eleanor Beardsley reviews the dramatic changes that have occurred on her beat this year, including the election of Emmanuel Macron and France's resurgence in global politics.
ROBERT SIEGEL, HOST:
As our foreign correspondents pass through NPR headquarters for home leave, we like to catch up, usually informally, on what's transpired on their beat over the past year. But this summer, with Eleanor Beardsley back from Paris for a spell, we thought we'd have that catch-up conversation on the air because her beat has witnessed some of the most dramatic changes of the past year.
Hi, Eleanor.
ELEANOR BEARDSLEY, BYLINE: Hi, Robert.
SIEGEL: People, do you remember France harried by terror attacks, its socialist president weak and unpopular, the far-right on the rise, the future of the European Union in doubt? That was last year. This year...
(SOUNDBITE OF ARCHIVED RECORDING)
PRESIDENT EMMANUEL MACRON: (Speaking French).
(APPLAUSE)
SIEGEL: Emmanuel Macron, the new president, on election night declaring that France had won. Eleanor, how different is the France of 2017 from the France that you were covering a year ago?
BEARDSLEY: Robert, very different. There's a leader in power who seems like he's just been training for this job his whole life. France has a historic role, he says. As a leader among nations, the world looks to France. And France will be there to provide answers to lead on climate change, Middle East peace, fighting terrorism, dealing with the migrant crisis. France has a role to play everywhere now.
SIEGEL: And a year ago, we wondered, wow, if the far-rightist Marine Le Pen wins, then for sure the European Union is done for, the Brits having voted to leave the European Union already. Instead, not only does France seem to be thoroughly inside the EU, but very much a leader within the EU again.
BEARDSLEY: Yes. You know, the Brexit vote was a body blow to the EU. And people felt across the continent if one more country, even a small one, would vote to leave the European Union, that would be it. Now there is this feeling that France and Germany are going to work together to make the EU stronger than ever. And it's going to play a positive role in Europeans' lives and lead on the world stage, maybe a counterbalance to China and the U.S. And there's really a feeling that you want to be part of the EU. It's a great entity. It's a powerful bloc. And there's sort of a feeling, at least in France, that they look to the Brits and they look at the mess they're in, and they're feeling sorry for them.
SIEGEL: I mean, one thing that's very interesting about Emmanuel Macron's victory - and he created his own movement to get elected. And as you've pointed out to me, En Marche, in - what is it? - on the move...
BEARDSLEY: One the move, yeah.
SIEGEL: ...Is EM, just like Emmanuel Macron.
BEARDSLEY: His initials, yeah.
SIEGEL: En Marche is now the party in charge. It threw out the two traditional parties that have been vying with each other for decades right now. But it's not a party of either the extreme right or the extreme left.
BEARDSLEY: It's not. It's a new way. And it is amazing what happened. It would be like if a candidate came in, beat the Democrat and Republican candidates and sort of destroyed the parties along the way.
Emmanuel Macron has talked about France being blocked for decades between two camps, the left and the right. And they can't work together. And certain ways of doing things belong to the left, certain to the right. He says, no, we can be progressive, work together, find new ways. And it doesn't have to be categorized. And so he's pulled people from the left and the right. And they seem ready to build a new way of functioning.
SIEGEL: It sounds like Macron's honeymoon is still in progress with the French republic. Has he actually - any examples, any concrete examples of this new way of approaching politics to show for his efforts?
BEARDSLEY: Well, Robert, the test is to come because Macron has said he will reform, loosen up the rigid French labor market. And this is always associated with a right-wing thing to do. And he says he's going to do it. But the left-wing unions say they'll be out in the streets. So this will be a test coming up with his party and with his government. Can they pass these reforms? But you know, there are cracks starting to appear.
His critics say his party is about him. If he weren't there, what does it consist of? The media says Macron treats them in an imperious way, keeps them at bay. He's not very accessible to the media. He canceled the traditional Bastille Day interview with two top journalists because, as his spokesman said, his thought process is too complicated for journalists' questions. He's a little bit alienating the French military. There was a general who resigned. And so we're seeing a bit of an authoritarian streak in Macron. So I think this fall will be the real test.
SIEGEL: Eleanor Beardsley, thanks for talking with us, and enjoy the next year in Paris.
BEARDSLEY: Thank you, Robert.
(SOUNDBITE OF FETE'S "THE ISLANDS")
Copyright © 2017 NPR. All rights reserved. Visit our website terms of use and permissions pages at www.npr.org for further information.
NPR transcripts are created on a rush deadline by Verb8tm, Inc., an NPR contractor, and produced using a proprietary transcription process developed with NPR. This text may not be in its final form and may be updated or revised in the future. Accuracy and availability may vary. The authoritative record of NPR’s programming is the audio record. |
With media day taking place on Monday, Detroit Pistons fans were given their first real opportunity to hear from, and about, many of their new faces. There was one piece of news that they definitely weren’t expecting though.
Pistons center Aaron Gray will miss training camp and is out indefinitely after sustaining a “cardiac episode” during an August workout. — Jeff Zillgitt (@JeffZillgitt) September 29, 2014
Aaron Gray, who only recently signed as a free agent this summer, will not only miss training camp, but is ruled out indefinitely after suffering a cardiac episode at the end of August. With heart issues being nothing to take lightly, Gray and the Pistons are making the correct decision by applying considerable caution to the situation.
According to Perry A. Farrell of the Detroit Free Press, although the episode occurred during practice, it wasn’t on site at the Pistons facility. Unfortunately this isn’t the only time that Gray has had to cope with heart problems either.
Gray had a procedure called an ablation , to try and alleviate some outstanding cardiac problems. The purpose of ablation is to slow and regulate the rhythm of the heart, and the procedure meant Gray missed much of the Raptors training camp back in that lockout shortened season too.
The priority for all parties involved has to be the health and well-being of Gray, but with it being hard to pin a time scale for return from such a potentially serious event, you have to wonder what ramifications this will have on the shape of Detroit’s roster heading into the season.
The Pistons are going into training camp with 16 guaranteed contracts already on the books, and the need to cut or trade at least one of those. The potential for Gray’s absence to be for an extended period of time, could create a few problems for Detroit though.
Last season they had very poor depth in the middle, and no legitimate true center to act as a backup to Andre Drummond. As a result when Gray was signed, that void was filled, and it made perfect sense. Still, aside from him, there are still no legitimate options on the Pistons roster though.
Depending on how Stan Van Gundy sees his fit within the roster, Greg Monroe may well be asked to spend some time at center, as he has done in the past. That would be far from ideal though, and will be one of the reasons why Detroit may be forced to explore other alternatives.
One such option would be to sign Hasheem Thabeet. Thabeet is a part of the Pistons’ training camp squad, and now seems likely to get some significant burn during the preseason. A former second overall pick, Thabeet has never lived up to expectations in the NBA, but he would at least be a player who could offer the team some rebounding and defensive effort off the bench.
If the Pistons are to sign Thabeet to a guaranteed contract, and are retaining Gray while they wait on a health update, that would create a need for two players to be moved on for Detroit. As a result of Gray’s condition, the Pistons training camp is going to be more important than ever, as guys will have to compete at the highest level to keep their jobs.
Hopefully, Gray will have a speedy recovery and make his Pistons debut very soon, but in the mean time Detroit have some decisions to make. |
PHILADELPHIA, Feb 14 (Reuters) - Harrisburg, Pennsylvania, moved a step closer to defaulting on a bond payment when its city council passed a 2010 budget that does not include $68 million in debt repayments on an incinerator.
Without the debt provision in the $65 million budget, the state capital may miss a March 1 payment of $2.072 million, a rarity for a municipal bond issuer.
Joyce Davis, a spokeswoman for Mayor Linda Thompson, confirmed the council’s decision — taken at a special session on Saturday — and said the mayor is not commenting for now on the implications of exclusion of the debt payments from the budget.
The council also defeated a plan to sell city assets to help pay down the debt which is guaranteed by the city on behalf of the Harrisburg Authority, a separate municipal entity that owns the incinerator. Council members also rejected Thompson’s plan to raise property taxes and water rates.
The $2.072 million payment is the latest installment on a $300 million bond owed on the construction of the incinerator. An additional $637,000 is due on April 1.
City Controller Dan Miller said last year’s payments on the incinerator were made from a debt service reserve fund that is now depleted.
Debt payments on the incinerator total $68 million in 2010, or more than the city’s general fund budget of about $60 million, Miller said.
Miller said on Feb. 9 he would “not be surprised” if Harrisburg fails to meet the March 1 payment.
Asked whether the city may file Chapter 9 bankruptcy as a way to get its debts under control, Miller said that was a “possibility.”
The tax-exempt municipal bond market, which states, cities and municipalities use to raise the funds to build roads, schools and hospitals, is viewed as very safe with a far lower default rate than the corporate bond market.
Just 54 municipal bond issuers rated by Moody’s Investors Service defaulted on their debt between 1970 and 2009, the agency said on Thursday. The average five-year historical cumulative default rate for investment-grade municipal debt was 0.03 percent in the period, compared with 0.97 percent for corporate issuers.
The recession has raised concerns of an increase in defaults as states, cities and towns struggle to balance budgets as required by law in all states except Vermont.
So far, however, those fears have not been realized and ratings agencies have played down the likelihood of a spike in defaults.
Fitch Ratings in January cautioned cities against using the threat of bankruptcy as a weapon to win concessions from labor unions. Even talk of bankruptcy can become self-fulfilling and undermines investor confidence in the market, it said. |
"""
Geo-miners are recipes that extract data and metadata from geographic resources.
"""
from os import stat
from owslib.wms import WebMapService
from osgeo.gdal import Open
from osgeo.osr import SpatialReference, CoordinateTransformation
from .base import Miner
class WMS(Miner):
"""Tiny wrapper around OWSLib.wms.WebMapService"""
default_url = 'http://wms.jpl.nasa.gov/wms.cgi'
def install(self):
url = self.options.get('url', self.default_url)
wms = WebMapService(url)
if wms.identification.type != 'OGC:WMS':
raise TypeError('Not a WMS service %r returned %r type' % (url, wms.identification.type))
print 'WMS from', wms.identification.title
self.buildout.namespace['wms'] = wms
return tuple()
class Raster(Miner):
"""Miner for raster geospatial data and imagery based on GDAL"""
def install(self):
filepath = self.options.get('filepath', 'image.tif')
t_srid = self.options.get('t_srid', 'EPSG:4326')
namespace_key = self.options.get('namespace-key', self.name)
# stat also tests file existence
self.set_names((namespace_key, 'fileDimension'), stat(filepath)[7])
dataset = Open(filepath)
if dataset is None:
raise IOError('%r not recognised as a supported file format.' % filepath)
self.set_names((namespace_key, 'spatialRepresentationInfo', 'column'), dataset.RasterYSize)
self.set_names((namespace_key, 'spatialRepresentationInfo', 'row'), dataset.RasterXSize)
print self.buildout.namespace
return tuple()
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
import java.util.ListIterator;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
/**
* Sets the earliest start time of a stage proportional to the job weight. The
* interval [jobArrival, stageDeadline) is divided as follows. First, each stage
* is guaranteed at least its requested duration. Then, the stage receives a
* fraction of the remaining time. The fraction is calculated as the ratio
* between the weight (total requested resources) of the stage and the total
* weight of all proceeding stages.
*/
public class StageEarliestStartByDemand implements StageEarliestStart {
private long step;
@Override
public long setEarliestStartTime(Plan plan,
ReservationDefinition reservation, int index, ReservationRequest current,
long stageDeadline) {
step = plan.getStep();
// If this is the first stage, don't bother with the computation.
if (index < 1) {
return reservation.getArrival();
}
// Get iterator
ListIterator<ReservationRequest> li =
reservation.getReservationRequests().getReservationResources()
.listIterator(index);
ReservationRequest rr;
// Calculate the total weight & total duration
double totalWeight = calcWeight(current);
long totalDuration = getRoundedDuration(current, plan);
while (li.hasPrevious()) {
rr = li.previous();
totalWeight += calcWeight(rr);
totalDuration += getRoundedDuration(rr, plan);
}
// Compute the weight of the current stage as compared to remaining ones
double ratio = calcWeight(current) / totalWeight;
// Estimate an early start time, such that:
// 1. Every stage is guaranteed to receive at least its duration
// 2. The remainder of the window is divided between stages
// proportionally to its workload (total memory consumption)
long window = stageDeadline - reservation.getArrival();
long windowRemainder = window - totalDuration;
long earlyStart =
(long) (stageDeadline - getRoundedDuration(current, plan)
- (windowRemainder * ratio));
// Realign if necessary (since we did some arithmetic)
earlyStart = stepRoundUp(earlyStart, step);
// Return
return earlyStart;
}
// Weight = total memory consumption of stage
protected double calcWeight(ReservationRequest stage) {
return (stage.getDuration() * stage.getCapability().getMemory())
* (stage.getNumContainers());
}
protected long getRoundedDuration(ReservationRequest stage, Plan plan) {
return stepRoundUp(stage.getDuration(), step);
}
protected static long stepRoundDown(long t, long step) {
return (t / step) * step;
}
protected static long stepRoundUp(long t, long step) {
return ((t + step - 1) / step) * step;
}
}
|
// Run runs a timeoutRunner job
func (g timeoutRunner) Run(job Job, ctx *Ctx) (interface{}, error) {
time.Sleep(time.Duration(time.Second * 3))
return nil, nil
} |
package com.lipata.forkauthority.data.user;
import android.content.Context;
import androidx.test.core.app.ApplicationProvider;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import com.lipata.forkauthority.R;
import com.lipata.forkauthority.api.yelp3.entities.Business;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.*;
/**
* Created by jlipata on 11/6/17.
*/
@RunWith(AndroidJUnit4.class)
public class UserRecordsTest {
private UserRecords userRecords;
@Before
public void setUp() throws Exception {
Context context = ApplicationProvider.getApplicationContext();
userRecords = new UserRecords(
context,
context.getSharedPreferences(
context.getString(R.string.test_shared_prefs_file), // Using a test sharedprefs file
Context.MODE_PRIVATE)
);
}
@Test
public void incrementDismissedCount() throws Exception {
final String BUSINESS_ID = "test-business";
int initialDismissedCount;
if (userRecords.getUserRecords().containsKey(BUSINESS_ID)) {
initialDismissedCount = userRecords.getUserRecords().get(BUSINESS_ID).getDismissedCount();
} else {
Business business = new Business();
business.setId(BUSINESS_ID);
// At this time, we don't have a method to simply add a record,
// this will add the record and set dismissedCount to 1 if it doesn't already exist
userRecords.incrementDismissedCount(BUSINESS_ID);
initialDismissedCount = userRecords
.getUserRecords()
.get(BUSINESS_ID)
.getDismissedCount();
}
userRecords.incrementDismissedCount(BUSINESS_ID);
int newDismissedCount = userRecords.getUserRecords().get(BUSINESS_ID).getDismissedCount();
assertThat(newDismissedCount, is(initialDismissedCount + 1));
}
} |
<reponame>Justson/Contacts_Helper
package com.qypt.just_syn_asis_version1_0.activity;
import java.util.ArrayList;
import java.util.List;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.ListView;
import android.widget.Toast;
import com.mob.tools.network.NetworkHelper;
import com.qypt.just_syn_asis_version1_0.adapter.ChattingAdapter;
import com.qypt.just_syn_asis_version1_0.model.ChattingMessageBean;
import com.qypt.just_syn_asis_version1_0.model.ChattingType;
import com.qypt.just_syn_asis_version1_0.model.RobotModel;
import com.qypt.just_syn_asis_version1_0.model.RobotResultBean;
import com.qypt.just_syn_asis_version1_0.utils.NetWorkHepler;
import com.qypt.just_syn_asis_version1_0.utils.TimerUtils;
/**
*
* @author Administrator justson
* //机器人聊天 用了图灵机器人
*/
public class RobotActivity extends SynActivity implements OnClickListener {
private Button sendButton;
private ListView myListView;
private EditText contentEditText;
private static final String APIKEY = "b1713efc987945038cbe54c10f87c55a";
private static final String SCRET = "<KEY>";
private RobotModel mRobotModel;
private List<ChattingMessageBean> list = new ArrayList<ChattingMessageBean>();
private ChattingAdapter<ChattingMessageBean> mChattingAdapter;
private boolean isUpdata=false;
@Override
protected void initView() {
Log.i("Info", "Thread:"+Thread.currentThread());
overridePendingTransition(R.anim.bottom_out_alp_fade_in, 0);
setContentView(R.layout.activity_robot);
myListView = (ListView) findViewById(R.id.chat_robot);
contentEditText = (EditText) this.findViewById(R.id.content_robot);
sendButton = (Button) findViewById(R.id.send_robot);
sendButton.setOnClickListener(this);
mRobotModel = new RobotModel();
ImageView image=(ImageView) findViewById(R.id.left_robot);
image.setOnClickListener(this);
}
@Override
protected void initViewData() {
list.add(new ChattingMessageBean(TimerUtils.getDate("yyyy-MM-dd hh:mm:ss"), ChattingType.SERVICE, "通信帮手很高兴为您服务"));
list.add(new ChattingMessageBean(TimerUtils.getDate("yyyy-MM-dd hh:mm:ss"), ChattingType.CLIENT, "您好"));
mChattingAdapter = new ChattingAdapter<ChattingMessageBean>(this, list);
myListView.setAdapter(mChattingAdapter);
}
// 点击事件的处理
@Override
public void onClick(View v) {
switch (v.getId()) {
//send
case R.id.send_robot:
sendMessage();
break;
case R.id.left_robot:
this.finish();
break;
}
}
@Override
protected void onPause() {
// TODO Auto-generated method stub
super.onPause();
overridePendingTransition(0, R.anim.top_out_fade_out);
}
/**
* 发送消息
*/
private void sendMessage() {
if (contentEditText.getText().toString().equals("")||isUpdata)
{
Log.i("Info", "isUpdata:"+isUpdata);
return;
}
if(!NetWorkHepler.netWorkIsReady(this))
{
Toast.makeText(getApplicationContext(), "请检查网络", Toast.LENGTH_SHORT).show();
return;
}
isUpdata=true;
String strContent = contentEditText.getText().toString();
list.add(new ChattingMessageBean(TimerUtils.getDate("yyyy-MM-dd hh:mm:ss"), ChattingType.CLIENT, strContent));
mChattingAdapter.notifyDataSetChanged();
contentEditText.setText("");
RobotResultBean mRobotResultBean = mRobotModel
.sendMessageToTuling(strContent);
if (mRobotResultBean == null) {
mRobotResultBean = new RobotResultBean();
mRobotResultBean.setText("服务器正忙...");
}
ChattingMessageBean mChattingMessageBean = new ChattingMessageBean();
mChattingMessageBean.setType(ChattingType.SERVICE);
mChattingMessageBean.setTime(TimerUtils.getDate("yyyy-MM-dd hh:mm:ss"));
mChattingMessageBean.setMessage(mRobotResultBean.getText());
list.add(mChattingMessageBean);
Log.i("Info", "Thread:"+Thread.currentThread());
myListView.requestLayout();
myListView.setSelection(list.size()-1); // 自动跳转到最后一条
isUpdata=false;
}
}
|
/**
* Infers the format name from the profile.
* Introduced for backwards compatibility. Can be removed after
* the external framework is no longer supported
*
* @return the inferred format name
*/
public String inferFormatName() {
if (!StringUtils.isBlank(profile) && profile.contains(":")) {
return profile.split(":")[1];
}
return null;
} |
/*
* Finds packets that have been sent erlier and are now acked.
* Notifies the listener that the packet was succesfully delivered.
* Removes the packet and returns it to the pool.
*/
void PacketManagerBase::HandleAcks(const TSet<uint32>& ackedPackets)
{
TArray<uint32> ackedReliableUIDs;
GetReliableUIDsFromAckedPackets(ackedPackets, ackedReliableUIDs);
TArray<SegmentInfo> segmentsAcked;
GetReliableSegmentInfosFromUIDs(ackedReliableUIDs, segmentsAcked);
TArray<NetworkSegment*> packetsToFree;
packetsToFree.Reserve(segmentsAcked.GetSize());
std::scoped_lock<SpinLock> lock(m_LockSegmentsToSend);
for (SegmentInfo& segmentInfo : segmentsAcked)
{
if (segmentInfo.Listener)
{
segmentInfo.Listener->OnPacketDelivered(segmentInfo.Segment);
}
m_SegmentsToSend[0].erase(segmentInfo.Segment);
m_SegmentsToSend[1].erase(segmentInfo.Segment);
packetsToFree.PushBack(segmentInfo.Segment);
}
#ifdef LAMBDA_CONFIG_DEBUG
m_SegmentPool.FreeSegments(packetsToFree, "PacketManagerBase::HandleAcks");
#else
m_SegmentPool.FreeSegments(packetsToFree);
#endif
} |
q=int(input())
l=[list(map(int,input().split())) for i in range(q)]
def jud(i):
#iの素数判定
num=int(i**0.5)+1
for k in range(2,num):
if i%k==0:
return 0
return 1
s=[0]*(10**5+1)
for i in range(2,10**5+1):
if i%2==0:
s[i]=s[i-1]
else:
if jud(i)*jud((i+1)//2)==1:
s[i]=s[i-1]+1
else:
s[i]=s[i-1]
for k,v in l:
ans=s[v]-s[k-1]
print(ans)
|
<filename>app/src/test/java/com/kickstarter/libs/utils/StringUtilsTest.java
package com.kickstarter.libs.utils;
import com.kickstarter.KSRobolectricTestCase;
import org.junit.Test;
public class StringUtilsTest extends KSRobolectricTestCase {
@Test
public void testIsEmail() {
assertTrue(StringUtils.isEmail("<EMAIL>"));
assertFalse(StringUtils.isEmail("hello<EMAIL>"));
}
@Test
public void testIsEmpty() {
assertTrue(StringUtils.isEmpty(""));
assertTrue(StringUtils.isEmpty(" "));
assertTrue(StringUtils.isEmpty(" "));
assertTrue(StringUtils.isEmpty(null));
assertFalse(StringUtils.isEmpty("a"));
assertFalse(StringUtils.isEmpty(" a "));
}
@Test
public void testIsPresent() {
assertFalse(StringUtils.isPresent(""));
assertFalse(StringUtils.isPresent(" "));
assertFalse(StringUtils.isPresent(" "));
assertFalse(StringUtils.isPresent(null));
assertTrue(StringUtils.isPresent("a"));
assertTrue(StringUtils.isPresent(" a "));
}
@Test
public void testSentenceCase() {
assertEquals("", StringUtils.sentenceCase(""));
assertEquals("A", StringUtils.sentenceCase("a"));
assertEquals("Apple", StringUtils.sentenceCase("APPLE"));
assertEquals("Apple", StringUtils.sentenceCase("APple"));
assertEquals("Apple", StringUtils.sentenceCase("apple"));
assertEquals("Snapple apple", StringUtils.sentenceCase("Snapple Apple"));
assertEquals("Snapple apple snapple", StringUtils.sentenceCase("Snapple Apple Snapple"));
}
@Test
public void testTrim() {
assertEquals("", StringUtils.trim(""));
assertEquals("", StringUtils.trim(" "));
assertEquals("A", StringUtils.trim(" A"));
assertEquals("A", StringUtils.trim("A "));
assertEquals("A", StringUtils.trim(" A "));
assertEquals("", StringUtils.trim("\u00A0"));
assertEquals("A", StringUtils.trim("\u00A0A"));
assertEquals("A", StringUtils.trim("A\u00A0"));
assertEquals("A", StringUtils.trim("\u00A0A\u00A0"));
assertEquals("", StringUtils.trim("\u00A0 "));
assertEquals("A", StringUtils.trim("\u00A0 A"));
assertEquals("A", StringUtils.trim("A\u00A0 "));
assertEquals("A", StringUtils.trim("\u00A0 A \u00A0"));
}
}
|
Labour is attempting to rig the election against Jeremy Corbyn by “purging” genuine supporters on spurious grounds, one of the party’s former MPs has suggested.
Andrew Mackinlay said reports Left-wing supporters who expressed admiration for politicians from other parties are being kicked off the ballot could be a “ruse” to stop Mr Corbyn winning.
It comes after supporters refused a vote despite apparently genuinely backing the party took to social media to express outrage at what has been dubbed the “Labour purge” .
Labour Party HQ has been combing through 400,000 new people who registered to vote in the leadership race amid fears infiltrators from other parties have signed up to disrupt the contest.
As well as drawing up “exclusion lists” of former candidates who have stood against Labour in elections, staff are searching through social media activity to see if applicants support other parties.
Labour Party have emailed me after I have cast my vote for @Corbyn4Leader saying I am no longer a member of the party pic.twitter.com/vjIhOLT2lY — annie (@anniecxxx) August 19, 2015
However there has been outcry in recent days after Labour supporters who had expressed sympathy for or previously backed other parties have been stripped of their right to vote.
Speaking on BBC Radio Four’s Today programme, Mr Mackinlay – Labour’s MP for Thurrock from 1992 to 2010 – indicated he believed the party was rigging the system against Mr Corbyn.
Asked if the so-called purge was a “rouse” to scrap the election, Mr Mackinlay said: “It could be, it could be.
“But the point I want to make is that the architect of these daft rules wasn’t Jeremy Corbyn, it was all these bright things who were part of the [Tony] Blair era and particularly Ed Miliband.”
?
Pressed again on whether the system was “calculated to keep Jeremy Corbyn out”, Mr Mackinlay said: “I think that we would not be having this discussion this morning if all the indications were that Kendall or Cooper or Burnham were in the lead.”
Mr Mackinlay also suggested Labour politicians could use their power in the vetting process to kick out rivals from the party and settle scores – a claim that could open the race up to legal challenge.
“The Labour Party is sending applications for vetting by MPs and leading councillors. Every MP and councillor has somebody who irritates them. That actually in my view disqualifies them from deciding who should be members of the Labour Party.”
Assuming to be a member of a political party means complete compliance keeping my mouth shut or abstaining. Yes that the one — annie (@anniecxxx) August 19, 2015
The former MP also suggested people kicked off the ballot file subject access requests to the party to demand information on why they were excluded – a move that could see Labour flooded with requests after the election.
It came as he was joined on the programme by Jeremy Hardy, a British comedian stopped from voting after expressing support for the Green MP Caroline Lucas, who attacked the process as “rigged”.
“Do you know what, they are so desperate. It wouldn’t surprise me if they are hoping that someone is going to try and actually bring a court case against the Labour Party and actually put the whole thing on hold,” Mr Hardy said.
He added: “I think this is just such a mess. They are trying to stop Corbyn, they will do anything.”
@BeresfordMatt Hi Matt. I've been banned from voting in the Labour leadership election - in spite of donating to them pic.twitter.com/Vi9mIk3ZcW — Pete Sinclair (@pete_sinclair) August 20, 2015
Labour has said it blocks applications for three reasons: If the applicant is not registered to vote at the address given; if they have been excluded from membership in the past; and, crucially, if they do not support the “aims and values” of the party.
It is this third element which has proved most controversial, with staff members apparently able to define what that requirement means by – among other measures – scrolling through the social media activity of applicants.
Those people who signed up to vote as affiliated supporters (through their trade union) or registered supporters (who paid £3 to enter) cannot appeal if they are denied a vote, unlike those who signed up as full members.
A Labour Spokesperson said: “The Labour Party has a robust system to prevent fraudulent or malicious applications.
“All applications to join the Labour Party as a member, affiliate or supporter are verified and those who are identified by our verification team as being candidates, members or supporters of another political party will be denied a vote.” |
/**
* ehcache don't provide direct values access in the api. Values are encapsulated into {@link Element}<br/>
* so for performance reason, this code is no very efficient if cache contains a lot of items. <br/>
* And moreover on a distributed caching configuration.
*/
@SuppressWarnings("unchecked")
@Override
public Collection<V> values() {
final List<Object> keys = cache.getKeys();
final Collection<V> result = new ArrayList<V>(keys.size());
V value;
for (final Object key : keys) {
value = get((K) key);
if (value != null) {
result.add(value);
}
}
return result;
} |
def hook_recenter(modifier=''):
pass |
def category_ratio(database, columns_to_check=None, num_categories=5):
category_ratio_df = pd.DataFrame()
if not columns_to_check:
columns_to_check = database.columns
for column_name in columns_to_check:
name = database[column_name].value_counts(normalize=True).index
values = database[column_name].value_counts(normalize=True).values
val_count_temp = [f"{category} : {round(frequent * 100, 2)}%" for category, frequent in zip(name, values)]
if len(val_count_temp) < num_categories:
val_count = ['0.00'] * num_categories
val_count[:len(val_count_temp)] = val_count_temp
else:
val_count = val_count_temp[:num_categories]
category_ratio_df[column_name] = val_count
return category_ratio_df.transpose().style.applymap(color_imbalanced) |
<filename>doc.go
/*Package gotest provides rich assertions for use within and beyond Go's `testing` package.
Quickstart
Grab the package and import it:
go get github.com/kindrid/gotest
import "github.com/kindrid/gotest"
import "github.com/kindrid/gotest/should"
Code normal `testing`-style tests, but use `gotest.Assert` and assertions found
in `should` like this:
gotest.Assert(t, actualJson, should.HaveFields,
"name", reflect.String,
"children", reflect.Map,
"hobbies", reflect.Slice)
Assertions are just functions that accept interfaces and return a non-empty string if there's an error. Look at `gotest.should.Assertion` for the details. Look at `should/doc.go` and `should/assertion.go` ofr more details.
Gotest adds some command-line options to your tests. You can see them via `go test . -args -help`. Note that gotest has its own verbosity flag which controls different aspects than `-test.v`.
Overview
GoTest plays well with "vernacular" Go `testing` tests, providing a rich set of
assertions to test HTTP Responses, JSON and JSON:API data, general equality,
numeric comparison, collections, strings, panics, types, and time.
Most of these rich assertions are provided by SmartyStreet's excellent assertion
library (https://github.com/smartystreets/assertions) which builds off Aaron
Jacobs' Oglematchers (https://github.com/jacobsa/oglematchers).
In addition, any SmartyTreets-style assertion can be used as is (see https://github.com/smartystreets/goconvey/wiki/Custom-Assertions).
Why
We like Go's stdlib `testing` because it's simple, fast, familiar to most Go
coders, has good tooling support, benchmark support, and coverage support.
In earlier versions of Go, we missed subtests for test organization and a more
BDD approach. We looked at GinkGo (`github.com/onsi/ginkgo`) and GoConvey
(github.com/smartystreets/goconvey/convey)--both with benefits--and chose
GoConvey because of the simple and consistent approach it took to writing
custom assertions. See the documenataion for "gotest/should" for more details on
that. It also had a pretty test runner.
But around the release of Go 1.7 we ran into some problems: the growth of
parameterized (table-driven) tests in our code didn't play well with GoConvey.
GoConvey's approach to building test suites made it hard to focus specific
subtests.
We also ran into an opportunity: `testing` now supported subtests. See
https://godoc.org/testing#hdr-Subtests_and_Sub_benchmarks. We were able to drop
a lot of fancy suite construction code and gain easier focussing on particular
tests. But we had come to appreciate GoConvey's excellent assertion pattern. We
considered moving to `github.com/stretchr/testify` and used it's pattern for the
custom assertion wrapper (see `Assert` below). It also had a simple
custom-assertion pattern, but GoConvey's seemed simpler and more useful. We took
a sideways glance at GUnit (`github.com/smartystreets/gunit`) and Labix's
GoCheck (`gopkg.in/check.v1 `). Very cool packages, but we wanted to stay closer
to `testing` with its new versatility.
*/
package gotest
|
/**
* List of template locations in form of RegEx located by custom locators that must not be validated as custom
* locators are not available at build time.
*/
public final class CustomTemplateLocatorPatternsBuildItem extends SimpleBuildItem {
private final Collection<Pattern> locationPatterns;
public CustomTemplateLocatorPatternsBuildItem(
Collection<Pattern> locationPatterns) {
this.locationPatterns = locationPatterns;
}
public Collection<Pattern> getLocationPatterns() {
return locationPatterns;
}
} |
<gh_stars>1-10
/*
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package guru.qas.martini.spring.configuration;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Lazy;
import guru.qas.martini.runtime.event.json.DefaultFeatureSerializer;
import guru.qas.martini.runtime.event.json.DefaultHostSerializer;
import guru.qas.martini.runtime.event.json.DefaultMartiniResultSerializer;
import guru.qas.martini.runtime.event.json.DefaultStepImplementationSerializer;
import guru.qas.martini.runtime.event.json.DefaultStepResultSerializer;
import guru.qas.martini.runtime.event.json.DefaultSuiteIdentifierSerializer;
import guru.qas.martini.runtime.event.json.FeatureSerializer;
import guru.qas.martini.runtime.event.json.HostSerializer;
import guru.qas.martini.runtime.event.json.MartiniResultSerializer;
import guru.qas.martini.runtime.event.json.StepImplementationSerializer;
import guru.qas.martini.runtime.event.json.StepResultSerializer;
import guru.qas.martini.runtime.event.json.SuiteIdentifierSerializer;
import static com.google.common.base.Preconditions.checkState;
@SuppressWarnings("WeakerAccess")
@Configuration
@Lazy
public class JsonMarshallerConfiguration implements BeanFactoryAware {
protected AutowireCapableBeanFactory beanFactory;
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
checkState(AutowireCapableBeanFactory.class.isInstance(beanFactory),
"BeanFactory must be of type AutowireCapableBeanFactory but found %s", beanFactory.getClass());
this.beanFactory = AutowireCapableBeanFactory.class.cast(beanFactory);
}
@Bean
MartiniResultSerializer getMartiniResultSerializer(
@Value("${json.martini.result.serializer.impl:#{null}}") Class<? extends MartiniResultSerializer> impl
) {
return null == impl ?
beanFactory.createBean(DefaultMartiniResultSerializer.class) :
beanFactory.createBean(impl);
}
@Bean
SuiteIdentifierSerializer getSuiteIdentifierSerializer(
@Value("${json.suite.identifier.serializer.impl:#{null}}") Class<? extends SuiteIdentifierSerializer> impl
) {
return null == impl ?
beanFactory.createBean(DefaultSuiteIdentifierSerializer.class) :
beanFactory.createBean(impl);
}
@Bean
HostSerializer getHostSerializer(
@Value("${json.suite.identifier.host.serializer.impl:#{null}}") Class<? extends HostSerializer> impl
) {
return null == impl ?
beanFactory.createBean(DefaultHostSerializer.class) :
beanFactory.createBean(impl);
}
@Bean
FeatureSerializer getFeatureSerializer(
@Value("${json.feature.serializer.impl:#{null}}") Class<? extends FeatureSerializer> impl
) {
return null == impl ?
beanFactory.createBean(DefaultFeatureSerializer.class) :
beanFactory.createBean(impl);
}
@Bean
StepResultSerializer getStepResultSerializer(
@Value("${json.step.result.serializer.impl:#{null}}") Class<? extends StepResultSerializer> impl
) {
return null == impl ?
beanFactory.createBean(DefaultStepResultSerializer.class) :
beanFactory.createBean(impl);
}
@Bean
StepImplementationSerializer getStepImplementationSerializer(
@Value("${json.step.implementation.serializer.impl:#{null}}") Class<? extends StepImplementationSerializer> impl
) {
return null == impl ?
beanFactory.createBean(DefaultStepImplementationSerializer.class) :
beanFactory.createBean(impl);
}
}
|
Electromagnetic horizons and convex-spherical reflectionless absorber coatings
We mathematically demonstrate the necessity of an electromagnetic (EM) horizon to facilitate prescription of geometrically convex, spherically-shaped material shells whose doubly-anisotropic material blueprints correspond to reflectionless (i.e., scatter-free), primitively causal, and passive absorber media. Subsequently, we simulate the performance of more practical pseudo-reflectionless absorbers, demonstrating their absorption and scattering characteristics. There has long been interest in realizing broadband omnidirectional absorbers, akin to a perfectly matched layer (PML), that would conformally coat an arbitrarily-shaped scatterer. Building upon historic analysis that classifies convex reflectionless absorbers as primitively non-causal, we first invoke a more general energy conservation-based argument to explain more broadly the “failure” of convex PML media to effect reflectionless EM wave absorption. We next elucidate two specific manners in which convex, spherical PML medium coatings, with Drude-dispersive behavior, can also fail despite passing the mathematical “litmus test” routinely used in traditional causality-based analysis of convex PML media. Finally, numerical simulations show that convex spherical “pseudo” PML (P-PML) media are not reflectionless, but nonetheless can effect polarization-robust and omnidirectional absorption with low backscatter, encouraging further investigation into physically realizing P-PML media for an array of engineering applications, such as target concealment from mono-static sensors. |
/**
* Zoom map to the specified location.
*
* @param latlng
* @param zoomLevel
*/
protected void zoomMapToLocation(LatLng latlng, @Nullable Float zoomLevel) {
CameraUpdate center =
CameraUpdateFactory.newLatLng(latlng);
if (zoomLevel == null) {
zoomLevel = 17.0f;
}
mMap.animateCamera(CameraUpdateFactory.newLatLngZoom(latlng, zoomLevel));
} |
A sense of rhythm, in my opinion, is the single most important aspect to guitar playing which separates the good guitarists from the bad ones. I even feel that rhythm is more important than the choice of notes, believe it or not. After all, it’s not what you play that’s important – but how you play it. You could play the blues in an entirely wrong key but if your rhythmic phrasing is good, it will still sound good. Unfortunately, rhythm is an area which beginners tend to neglect. They prefer to practise fancy tapping techniques or sweep picking rather than getting the basics of rhythm. And the result – sloppy guitar soloing.
So how can this be corrected?
You must train yourself to have a good sense of rhythm and the best way of doing this is by using a metronome. What I like to do is to play the major scale (or any other scale that you prefer) up and down the fretboard whilst keeping to the beat of the metronome. I would play the scale using quarter notes, eighth notes and sixteenth notes, and I would also try varying and combining the rhythmic phrases. Doing so trains you to recognize the differences between the different note durations. Once you’ve got the hang of these basic rhythms, you could try playing the scale in a swing rhythm or even triplets – the trick is to be aware of your rhythm and having full control of the timing of your notes. Remember – sloppy rhythm makes a sloppy guitarist! |
<reponame>zerofo/sdu-face-alignment<filename>test_rec_nme.py<gh_stars>100-1000
import argparse
import cv2
import sys
import os
import numpy as np
import mxnet as mx
import datetime
import img_helper
from config import config
import matplotlib.pyplot as plt
from essh_detector import ESSHDetector
from metric import LossValueMetric, NMEMetric
class Handler:
def __init__(self, prefix, epoch, ctx_id=0):
print('loading',prefix, epoch)
if ctx_id>=0:
ctx = mx.gpu(ctx_id)
else:
ctx = mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers['heatmap_output']
image_size = (128, 128)
self.image_size = image_size
model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
model.bind(for_training=False, data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
self.model = model
self.detector = ESSHDetector('./essh-model/essh', 0)
def trans_dot(self, trans1, trans2):
trans1 = np.vstack((trans1, [0,0,1]))
trans2 = np.vstack((trans2, [0,0,1]))
trans21 = np.dot(trans2, trans1)[0:2]
return trans21
def get_maxpos(self, img, det):
img_size = np.asarray(img.shape)[0:2]
# bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
# bindex = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
bindex = np.argmin(offset_dist_squared) # some extra weight on the centering
return bindex
def get_landmark(self, img, label, dataset, use_essh):
if use_essh:
ret = self.detector.detect(img, threshold=0.4)
if ret is None or ret.shape[0]==0:
return None, None
bindex = self.get_maxpos(img, ret)
face = ret[bindex]
bbox = face[0:4]
points = face[5:15].reshape(5,2)
# b = bbox
# cv2.rectangle(img, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))
# for p in landmark:
# cv2.circle(img, (int(p[0]), int(p[1])), 1, (0, 0, 255), 2)
# cv2.imshow("detection result", img)
# cv2.waitKey(0)
# for i in range(bbox.shape[0]):
rimg, label2, trans1 = img_helper.preprocess(img, points, img.shape[0])
ret2 = self.detector.detect(rimg, threshold=0.4)
if ret2 is None or ret2.shape[0]==0:
return None, None
bindex2 = self.get_maxpos(rimg, ret2)
rimg, trans2 = img_helper.transform2(rimg, None, self.image_size[0], ret2[bindex2,0:4], dataset)
else:
rimg, label2, trans1 = img_helper.preprocess(img, label, img.shape[0])
rimg, trans2 = img_helper.transform2(rimg, label2, self.image_size[0], None, dataset)
trans = self.trans_dot(trans1, trans2)
# cv2.imshow("rimg", rimg)
# cv2.waitKey(0)
# img2 = cv2.cvtColor(rimg, cv2.COLOR_BGR2RGB)
img2 = np.transpose(rimg, (2,0,1)) #3*128*128, RGB
input_blob = np.zeros( (1, 3, self.image_size[1], self.image_size[0]),dtype=np.uint8 )
input_blob[0] = img2
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.model.forward(db, is_train=False)
alabel = self.model.get_outputs()[-1].asnumpy()[0]
IM = cv2.invertAffineTransform(trans)
landmark = np.zeros( (68, 2), dtype=np.float32)
for i in xrange(alabel.shape[0]):
a = cv2.resize(alabel[i], (self.image_size[1], self.image_size[0]))
ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
point = (ind[1], ind[0], 1.0) #w, h
point = np.dot(IM, point)
landmark[i] = point[0:2]
npt = img_helper.transform_pt(label[i], trans)
if config.landmark_type=='2d':
npt = np.floor(npt)
else:
npt = np.round(npt)
point = (npt[0], npt[1], 1.0)
point = np.dot(IM, point)
label[i] = point[0:2]
return landmark, label
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='test nme on rec data')
# general
parser.add_argument('--dataset', default='ibug', help='test dataset name')
parser.add_argument('--prefix', default='./models/model-hg2d3-cab/model', help='model prefix')
parser.add_argument('--epoch', type=int, default=0, help='model epoch')
parser.add_argument('--gpu', type=int, default=0, help='')
parser.add_argument('--landmark-type', default='2d', help='')
parser.add_argument('--image-size', type=int, default=128, help='')
parser.add_argument('--use-essh', type=bool, default=False, help='')
args = parser.parse_args()
if args.dataset=='ibug':
rec_path = '/media/3T_disk/my_datasets/sdu_net/data_2d/ibug.rec'
elif args.dataset=='cofw_testset':
rec_path = '/media/3T_disk/my_datasets/sdu_net/data_2d/cofw_testset.rec'
elif args.dataset=='300W':
rec_path = '/media/3T_disk/my_datasets/sdu_net/data_2d/300W.rec'
else:
rec_path = '/media/3T_disk/my_datasets/sdu_net/data_3d/AFLW2000-3D.rec'
dataset = args.dataset
ctx_id = args.gpu
prefix = args.prefix
epoch = args.epoch
use_essh = args.use_essh
image_size = (args.image_size, args.image_size)
config.landmark_type = args.landmark_type
config.input_img_size = image_size[0]
config.use_essh = args.use_essh
handler = Handler(prefix=prefix, epoch=epoch, ctx_id=ctx_id)
idx_path = rec_path[0:-4]+".idx"
imgrec = mx.recordio.MXIndexedRecordIO(idx_path, rec_path, 'r')
seq = list(imgrec.keys)
_metric = NMEMetric()
nme = []
miss = 0
for img_idx in seq:
if img_idx%10==0:
print('processing %d' %img_idx)
s = imgrec.read_idx(img_idx)
header, img = mx.recordio.unpack(s)
try:
img = mx.image.imdecode(img).asnumpy()
except:
continue
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
hlabel = np.array(header.label).reshape((68, 2))
hlabel = hlabel[:,::-1] #convert to X/W first
preds, label = handler.get_landmark(img, hlabel, dataset, use_essh)
if preds is None:
print('no face detected %d' %img_idx)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite('sample-images/miss/%d.jpg'%img_idx, img)
miss += 1
continue
# label = hlabel[np.newaxis, :, :]
# pred_label = preds[np.newaxis, :, :]
_nme = _metric.calculate_nme(label, preds)
nme.append(_nme)
# visualize landmark
# plt.imshow(img)
# plt.plot(preds[0:17,0],preds[0:17,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[17:22,0],preds[17:22,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[22:27,0],preds[22:27,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[27:31,0],preds[27:31,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[31:36,0],preds[31:36,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[36:42,0],preds[36:42,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[42:48,0],preds[42:48,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[48:60,0],preds[48:60,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(preds[60:68,0],preds[60:68,1],marker='o',markersize=1,linestyle='-',color='w',lw=0.5)
# plt.plot(label[0:17,0],label[0:17,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[17:22,0],label[17:22,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[22:27,0],label[22:27,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[27:31,0],label[27:31,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[31:36,0],label[31:36,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[36:42,0],label[36:42,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[42:48,0],label[42:48,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[48:60,0],label[48:60,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.plot(label[60:68,0],label[60:68,1],marker='o',markersize=1,linestyle='-',color='y',lw=0.5)
# plt.axis('off')
# plt.show()
print('total miss num is %d' %miss)
print('nme on %s is %.3f%%' %(dataset, np.mean(nme)*100))
|
// TODO: Need to add tests for repeated POSTs == updates.
@Test
public void testPost() {
DCAEServiceTypeRequest minimalFixture = new DCAEServiceTypeRequest();
minimalFixture.setTypeName("abc");
minimalFixture.setTypeVersion(1);
minimalFixture.setOwner("tester");
minimalFixture.setBlueprintTemplate("{ blueprint template goes here }");
DCAEServiceTypeRequest fullFixture = new DCAEServiceTypeRequest();
fullFixture.setTypeName("def");
fullFixture.setTypeVersion(1);
fullFixture.setOwner("tester");
fullFixture.setBlueprintTemplate("{ blueprint template goes here }");
fullFixture.setAsdcServiceId("4bb4e740-3920-442d-9ed3-89f15bdbff8a");
fullFixture.setAsdcResourceId("3ea9dfae-a00d-4da8-8c87-02a34de8fc02");
fullFixture.setVnfTypes(Arrays.asList(new String[] { "vnf-marble", "vnf-granite" }));
fullFixture.setServiceIds(Arrays.asList(new String[] { "service-alpha", "service-bravo" }));
fullFixture.setServiceLocations(Arrays.asList(new String[] { "New York", "Washington" }));
for (DCAEServiceTypeRequest fixture : new DCAEServiceTypeRequest[] {minimalFixture, fullFixture}) {
try {
Response response = api.dcaeServiceTypesTypeIdPost(fixture, uriInfo, null);
DCAEServiceType serviceType = (DCAEServiceType) response.getEntity();
assertTrue("POST - 200 test case failed", matchTypeVsTypeRequest(serviceType, fixture, URL_PATH));
} catch (Exception e) {
throw new RuntimeException("Unexpected exception: post new 200", e);
}
}
} |
package br.com.alura.leilao.api.retrofit.client;
public interface RespostaListener<T> {
void sucesso(T resposta);
void falha(String mensagem);
} |
Research and Optimization of Computer Intelligent Evaluation Model under C2C Mode by Fuzzy Matter-Element
This paper discusses the establishment of credit evaluation model suitable for the characteristics of C2C e-commerce, taking the C2C model as an example, so as to reduce the risk cost in e-commerce trade. This model synthesizes the advantages of Hamming nearness degree and variable weight theory, and puts forward the design idea of variable entropy weight with fuzzy matter-element, which has the characteristics of simple operation, strong applicability, and close to reality. From the results of empirical analysis, the model calculates the credit level of different e-commerce, and effectively solves the problem of multi-index evaluation incompatibility in the evaluation of e-commerce credit. This model provides a feasible method for selecting the best supplier, improving the scientific decision, and achieve a good application prospect.
Introduction
The C2C (Consumer to Consumer) e-commerce model is a form of transactions between consumers and consumers to carry out economic activities. Due to its good development prospects, wide range of applications, flexible and convenient transactions and other obvious advantages, this model gained a favorable popularity among consumers. Accompanied by this is the credit risk problem of the C2C ecommerce model. Due to the high uncertainty, virtuality and concealment of the model, it is easy to trigger the credit crisis of online procurement commodities, thus hindering the healthy development of the e-commerce market. E-commerce credit risk usually refers to the extent to which the buyer or seller may cause potential losses to the other side of the transaction because of the refusal of the buyer or seller to execute the trade contract due to subjective reasons or force majeure. This research illustrates that in order to effectively control the credit risk of C2C electronic commerce, it is applicable to establish a C2C electronic commerce credit evaluation system, and carry out effective electronic commerce credit evaluation work to scientifically select the best supplier. It will have an important significance on improving the credit evaluation mechanism of C2C electronic commerce in China.
By combing relevant literature, the current research content of e-commerce credit evaluation in academic circles mainly focuses on two aspects: the establishment of index system and the choice of Regarding the establishment of indicator system, the establishment structure of indicator system is constantly being optimized and improved with the deepening of research, which pay more attention to achieving the functional integrity and overall balance of indicator design from different angles. Regarding the choice of evaluation methods, in former researches, the common evaluation methods are mostly single evaluation methods, such as analytic hierarchy process (AHP) , entropy , and factor analysis. However, although the AHP method is simple for application, the subjective color of the index selection is quite strong, and the artificial segmentation trace of the weight design is heavy as well. Although the entropy method and factor analysis embody the objective data information characteristics of the index, they are usually not strong to use the conclusions to explain the problems clearly.
At the same time, the current e-commerce qualifications in the C2C model are uneven, the market supervision is insufficient, and there is a lack of a standardized and unified credit evaluation system. There is less literature on the analysis of C2C, a special e-commerce model. The subjectivity and objectivity of credit evaluation are insufficient, lack of effective empirical analysis, and the literature conclusions are not convincing.
On the basis of taking advantage of the traditional single evaluation method, this paper introduces Hamming nearness degree and variable weight theory, and establishes a variable entropy weight fuzzy matter-element model to provide a recommendation for E-commerce credit evaluation and strategy under C2C mode. New ideas and methods. Hamming nearness degree has the advantages of simple calculation and strong applicability. Variable weight theory can continuously correct the empirical weight by mining data information, effectively overcoming C2C e-commerce credit evaluation and supplier optimization process. The interpretative and persuasive conclusions make the supplier's choice more scientific and concise.
Establishment of Evaluation Index System for C2C Electronic Commerce Credit
E-commerce credit evaluation is usually determined by combining qualitative analysis and quantitative analysis. Compared with other models, the C2C e-commerce model is the most risky. The risks mainly come from the seller's credit risks arising from the process of information screening, commodity ordering, customer evaluation, and quality assurance services. Therefore, this paper constructs the C2C credit evaluation index system for the seller's subject as the evaluation object shall be shown as Figure 1. 3 C2C e-commerce credit evaluation should focus on the main information data of e-commerce trade, and form effective information indicators through data analysis. This paper divides the overall objectives into two major evaluation dimensions according to the basic idea of system decomposition: static indicators and dynamic indicators. Static indicators are qualitative evaluation indicators, mainly reflecting whether the basic qualification information of e-commerce is compliant, complete, true, and accurate. Dynamic index is a quantitative evaluation index, which mainly collects, collates, and counts various quantifiable information that occurred in the course of e-commerce transactions and customer evaluation feedback, and forms an evaluation basis. According to the indicators status of basic information, transaction information, evaluation information and technical information, it is possible to completely restore the entire process from the establishment of qualifications to the development of trade activities of an e-commerce entity, which better reflects the true credit level of the merchants. The objective data of commercial credit evaluation, so the evaluation index system established in Figure 1 is scientific and comprehensive.
Basic Principle of Fuzzy Matter-Element
The main feature of fuzzy matter-element analysis is that it can turn the contradictory problems that are easy to appear in multi-index evaluation into compatibility problems, so that the problems can be solved easily. In real world, because the quantitative matter-element usually have fuzzy properties, a new fuzzy compatibility problem has been formed. The new method is called fuzzy matter-element analysis method by introducing fuzzy mathematics theory and combining it with matter element analysis method. The method has the characteristics of simple calculation, reliable conclusion and strong adaptability.
Fuzzy matter element and Composite Fuzzy Matter-Element.
Matter-element refers to the establishment of an ordered ternary ( , , ) R N C V for describing something, where the name of something is N , the eigenvalue is C , and the quantitative value shall be V . When V has fuzzy properties, it constitutes a fuzzy matter element. As for multidimensional features n with n features 1 2 , , n c c c and corresponding n quantitative 1 2 , , n v v v , R is called n -dimensional fuzzy matter element. The combination of n -dimensional elements for m items constitutes an n -dimensional complex element for m items, which is denoted as nm R . Furthermore, if nm R quantitative value has a fuzzy attribute, it is called as n -dimensional composite fuzzy matter element of m items, denoted as nm R .
actually a process of setting a membership function to determine the degree of membership. Generally, the membership function is constructed from the principle of optimal degree of membership. For the cost type and benefit type evaluation index, the membership shall be calculated as follows: In the formula, ij m and i M correspond to the lower and upper limits of each evaluation object at the i index, respectively. The ij reflects the degree of membership of the j evaluation object at the i indicator. For the evaluation type index, this paper combines the research practice and selects the formula for the degree of affiliation as (excellent, good, medium, low, difference) = (1, 0.8, 0.6, 0.4, 0). According to the principle of superior subordination, select the maximum degree of superior affiliation in each evaluation index, and form a standard fuzzy matter element, denoted as
Principle and Weight Design of Variable Entropy Value
In the electronic business credit evaluation, it is necessary to scientifically allocate the weight coefficient to measure the difference in the importance of each evaluation index. In this paper, the principle of variable entropy weight is applied for weight value design. The principle of variable entropy is a combination concept, which consists of entropy theory and variable weight theory. The main principle of the theory is that entropy theory is used to determine the constant weight, and on the other hand, variable weight theory is used to determine the variable weight, and after reasonable superposition, a comprehensive variable entropy weight is formed.
Constant Weight Defined with Entropy Method.
With the higher the order of a system, the smaller the entropy and the greater the amount of information it contains. On the contrary, if the degree of disorder of the system is higher, the greater the entropy, the lower the information content shall be formed. In general, as for the evaluation object set ( 1,2, , ) , and the local variable vector as 1 2 ( , , ) n W w w w , thus, according to the multiplication rule: In the formula, i is the optimal membership function value of the i item evaluation indicator.
The expression of ( ) b i w in the local state variable vector shall be: In the formula, , , ,c is the parameter within , is the negative level, is the penalty level, is the incentive level, and c is the adjustment level. According to literature, take In table 1, "E" is respect for "excellent", "G" is respect for "good".
Nearness Degree and Comprehensive Evaluation of Hamming
In the above table, the indicator U 33 -U 35 is a qualitative evaluation indicator. The five experts of the procurement expert group combine the relevant information of the evaluation indicator, judge and give corresponding comments, and conduct a comprehensive evaluation. Among them, the indicators U 23 -U 25 , U 32 , U 42 are cost indicators, and the remaining indicators are benefit indicators.
Case Solving
From the data in table 1, the cost type index, benefit type index, and comment type index are treated by fuzzy normalization respectively according to the principle of superior subordination. Therefore, the composite fuzzy matter element nm R and the standard fuzzy matter element 0 nm R are obtained. According to the principle of variable entropy weight, combined with (6) ~ (9), the four-level evaluation index variable entropy weight of the three suppliers A, B, and C is determined. The specific results are detailed in table 2: I II III I II III I II III I II III U Obviously, the greater the Hamming nearness degree is, the better the scheme shall be achieved. Therefore, I should be chosen as the relatively optimal supplier.
Conclusion Analysis
According to further analysis of the distance matrix H , it is easy to find that there are different degrees of room for improvement in the credit evaluation of supplier I, II, and III. For supplier I, the indicators that need to be improved are in descending order of importance. The top three are U22, U44, and U23. That is, the three indicators of "Success Rate of E-commerce Transactions", "User Forwarding Rate of E-commerce Links" and "Arrears Rate of E-commerce Transactions" need to be further strengthened to identify the reasons for transaction failure, strengthen the promotion of commodity brands, and strengthen the management of funds. As for supplier B, the key improvement indicators are U11, U42 and U32, which are the three indicators of "Completeness of Business Registration Information", "Return Rate of E-commerce Transactions" and "Complaint Rate of Ecommerce Rransactions". In particular, it is required to make effort to reduce the return rate and complaint rate, so as to improve the quality of e-commerce products and services. As for supplier III, the key improvement indicators are U22, U13 and U11, which are the three indicators of "Success Rate of E-commerce Transactions", "Completeness of Declaration and Contact Information" and "Completeness of Industrial and Commercial Registration Information". In the performance of the high transaction failure rate and the lack of static information related to e-commerce qualifications, the focus is on finding out whether the lack of e-commerce qualification information is the direct cause of customer trust crisis, and then the transaction failure, we need to supplement the missing information as soon as possible.
Conclusion
With the rapid development of network technology and the emergence of new transaction patterns, C2C e-commerce has been promoted continuously. The credit problem in the process of e-commerce transactions has increasingly become a bottleneck problem that restricts the development of ecommerce operations. In order to solve the problem of incompatibility of multi-index assessment, Hamming nearness degree is introduced and the fuzzy matter-element model is established. In order to overcome the defect of the traditional subjective weight determination method, the variable entropy weight is designed with the theory of variable weight. Through empirical test, this method effectively mines the C2C e-commerce credit evaluation information elements, analyzes the credit advantages and shortcomings of different C2C e-commerce merchants, and has a good comprehensive evaluation effect. The evaluation conclusion is highly credible and can better solve C2C e-commerce credit. This method is also a scientific and practical C2C e-commerce credit evaluation method with bright application prospects. |
The Woman Envelope is alluringly oversized, a creamy hue like an invitation to a wedding or other formal event. The return address is “Official Campaign Headquarters” in an engraved typeface: Women Like Fancy Parties! Underneath is Clinton’s full signature in an autograph-style typeface. Women Like Personal Touches! It features the phrase “[Insert Name] we are in this together…” over the Woman’s address, speaking to our better, cooperative feminine nature. Don’t forget the stamp: The Woman Envelope stamp features a field of stars, you know, cooperating together.
It began with the envelopes. They arrived in our mailbox on the same day a few weeks ago. One was for me (hereafter referred to as “Woman.”) The other was addressed to my husband (hereafter referred as “Man.”) Both envelopes were from Hillary Rodham Clinton asking us to participate in her campaign for President of the United States of America. The Asks, however, could not be more different, nor could the overall presentation. No detail is left to chance — from typefaces to images, paper choices to the very language itself — and all seem taken straight out of a 1950’s ad agency playbook on how best to communicate via gender stereotypes. This Direct Mail campaign is a Direct Fail. To find out why I’m Not With Her, (at least until I have no other choice,) read on:
The Man Envelope is a stark opposite of the Woman Envelope. It has a no-nonsense format; a standard American business-sized affair in workaday white. Its return address is simply Clinton’s name in a familiar, serif typeface. The tagline is distinctly macho: “[Insert Name], we’ve got a fight on our hands…” Note the “bloody” stripes over half the stamp. No more fooling around, Man.
The Man Envelope (Front)
The Man Envelope features additional information on the reverse. Men Like Additional Information. Unlike the Woman Envelope which is simply shows the Hillary Victory Fund mailing address, the Man Envelope includes many bells & whistles. A modern logo, in two-color ink suggests corporate efficiency, because Men Like Business. There is an additional call to action — “My plan to help America’s families is enclosed — but it won’t get done without you!” — because Men Take Action. There is even a QR code and URL because Men Love Technology.
The Man Envelope (Rear)
2. The Letters
The Woman Letter is only two pages long. The opener, though, is long on “feelings” because Women Love Feelings: It references the “worries, concerns, fears, and hopes” of Americans, and Clinton assures us she has feelings too. She is “honored” and “heartbroken” at turns.
The Woman Letter quickly launches into personal stories about Americans — a librarian, a waitress, and families of victims of gun violence and environmental racism — because Women Are Personal.
The first emphasis line, underlined, notes that “…we will only succeed if we work together.” Women Love Togetherness.
Clinton then conjures a few more bad-but-relatable scenarios — sick kids, impoverished parents — and a few more feelings — angry, defeated, and betrayed — to drive home the need for Togetherness.
Only then do women find out what this campaign is: It is a fight. This is only mentioned once. But don’t be scared, because if Clinton wins, women won’t be alone. Women Hate Loneliness. And it’s a two-way road, because Clinton tells us “I cannot do it alone.”
Again, women are reminded that, “We have to bring people together to find ways forward…” Still yet, at the letter’s end, (because Women Are Slow on the Uptake!) we are encouraged: “Let’s do this together.”
Somewhere towards the finale there is paragraph on Clinton’s actual policy achievement and experience. Five quick examples, because Women Hate Substance.
The P.S. offers one more hit of Togetherness, and request that women share the Plans for the Future one-pager with our friends, because Women Love Sharing.
Finally, the salutation is “Thank you,” because Women Appreciate Gratitude.
The Woman Letter (Page One)
The Woman Letter (Page Two)
In case women didn’t get the Togetherness concept in the body of the letter, it is repeated in bold type at the top of the friend-sharable platform primer page. Remember, Women Can Be Dense.
The Woman Platform Primer (Header)
The Woman Platform Primer (Body)
In stark contrast, the Man Letter is a four-page opus, because Men Have Stamina. It is heavy on strong words: The word “fight” appears over five times because Men Like Fighting. There are many other firm words and phrases to appeal to the Fighting Man: “smear” “stark choices” “rip away our progress” “drag us backward” “attack” “surrender” “resist the forces” “destroy the barriers,” etc. Men Are Tough; they can handle this language.
Unlike the Woman Letter, the Man Letter references The Enemy multiple times. The Enemy includes a primary opponent with fundraising savvy and Republicans with political savvy. Men Want to Know the Enemy. There is a detailed discussion of the Republican Enemy’s tactics and analysis of how a Clinton win will affect Congressional and State-level politics as well. Men Like Details. Men Can Handle the Big Picture so much so that putting the platform primer right in the body of the letter won’t overwhelm them.
The donation Clinton requests isn’t really a donation. It is “an investment.” Men Are Good With Money. Along these same lines, there are many references to work, from the things Clinton has “worked toward” to how she can “get the job done.” Men Know From Work.
Men Also Want to Feel Important. Clinton’s letter acknowleges this by giving Men their very own place (on the reply/donation card) to “let me know what’s on your mind,” assuring them, “You’re a big part of [the] conversation…”
Because Men Like a Damsel in Need of Help, Clinton comes right out and asks for it: “So right now I need your help…” She also uses the closer, “Warmest Regards,” because Men Like Warm Women.
The Man Letter (Page One)
The Man Letter (Page Two)
The Man Letter (Page Three)
The Man Letter (Page Four)
3. Bonus Material
The Woman Letter comes with a full-color postcard of Clinton wearing pink. Women Like Pink. She is shown in profile, gazing at an unseen audience, or perhaps into the pleasantly vague distance. Women Don’t Like to Be Confronted Directly.
The Woman Postcard (Front)
On the back, there is a “handwritten” note from Clinton asking women for “your friendship and support!” Women Like a Personal Touch. Women Are Loyal to Their Friends.
The Woman Postcard (Back)
The Man Letter contains no such bonus material. That would be unprofessional. Men Are Not Unprofessional.
4. The Reply Card
The Woman Reply Card is not just a generic reply. It is a “Personal Reply to Hillary” back in that Fancy engraved typeface. Women Like Fancy Things. And it makes sure to drive home the Togetherness theme, and draws on Women’s Gift of Gab by asking them to “talk with [your] friends and neighbors.” Don’t worry. We will.
The Woman Reply Card (Front)
The Woman Reply Card (Back)
The Man Reply Card Features a color photo of Clinton wearing black, because black means business. Men Like Business. She is looking directly at the Man. Men Can Handle Directness.
Behind her are those “bloody” stripes again, and the tough talk continues: “Defeat,” “fight,” “got your back,” “battles.” Men Are Tough. And there is a challenge for the Man to make his “most generous contribution” possible because Men Are Good Providers and Men Like a Challenge.
The bottom of the Man Reply Card offers ample room for the Man to express himself. Men Have Important Thoughts.
The Man is also asked to volunteer. Men Take Action. The Women are asked no such thing. Women Are Too Busy/Lazy/Uniformed/Other.
The Man Reply Card (Front)
The back of the Man Reply Card features the QR code code again, in two-color printing because remember, Men Love Technology.
The Man Reply Card (Back)
I hope that the next President of the United States will realize that words and images have power.
I hope that person also understands that women and men deserve the same treatment in all areas, even in Direct Mail Campaign Solicitations.
Women Love Equality. |
Mythgard Institute is offering 2 FREE online lectures in their Guest Speaker Series, but virtual seating in the webinars is limited, so be sure to visit the Mythgard Institute Guest Speaker series site to register.
Here’s more info (from the Mythgard site):
David Brin will give his talk “Can Science Fiction Change the World?” for the Mythgard Academy Guest Lecture Series on Saturday, July 25, 2015 at 6:00 pm ET
Amy H. Sturgis will give her talk “The Jedi, the Cowboy, and… Thomas Edison?: Pulp Science Fiction and Star Wars” for the Mythgard Academy Guest Lecture Series on Saturday, Aug. 15 at 3:00 pm ET.
Synopsis: What images come to mind when you think of Star Wars? Luke Skywalker watching the twin suns set on Tatooine? Princess Leia with a blaster in her hand and buns on her head? The glow of a lightsaber in the darkness? These visuals convey volumes, and they spring in part from a common origin.
One of the keys to the worldwide success of Star Wars is that the saga draws from a variety of global sources, both classical and contemporary. Join Dr. Amy H. Sturgis as she discusses one particular tradition that has left its indelible imprint on the Star Wars franchise. How did pulp science fiction evolve? What is the relationship between this genre and the Western? And how can tracing the pulp ancestry of Star Wars give us new insights on key moments and messages across the Star Wars canon — and quite possibly shed light on the forthcoming film The Force Awakens? Star Wars fans and newbies alike are welcome! |
<reponame>King0987654/windows2000<filename>private/net/sockets/winsock2/dll/ws2_rp/msrlsp/dcatitem.h<gh_stars>10-100
/*++
Copyright c 1996 Intel Corporation
All Rights Reserved
Permission is granted to use, copy and distribute this software and
its documentation for any purpose and without fee, provided, that
the above copyright notice and this statement appear in all copies.
Intel makes no representations about the suitability of this
software for any purpose. This software is provided "AS IS."
Intel specifically disclaims all warranties, express or implied,
and all liability, including consequential and other indirect
damages, for the use of this software, including liability for
infringement of any proprietary rights, and including the
warranties of merchantability and fitness for a particular purpose.
Intel does not assume any responsibility for any errors which may
appear in this software nor any responsibility to update it.
Module Name:
dcatitem.h
Abstract:
This file contains the class definition for the PROTO_CATALOG_ITEM class.
This class defines the interface to the entries that can be installed and
retrieved in the protocol catalog.
Author:
<EMAIL>
Notes:
$Revision: 1.4 $
$Modtime: 15 Jul 1996 15:41:08 $
Revision History:
most-recent-revision-date email-name
description
--*/
#ifndef _DCATITEM_
#define _DCATITEM_
#include <windows.h>
#include "llist.h"
#include "fwdref.h"
class PROTO_CATALOG_ITEM {
public:
PROTO_CATALOG_ITEM();
INT
Initialize(
LPWSAPROTOCOL_INFOW ProtoInfo
);
~PROTO_CATALOG_ITEM();
LPWSAPROTOCOL_INFOW
GetProtocolInfo();
PWCHAR
GetLibraryPath();
VOID
SetProvider(
IN PRPROVIDER Provider
);
PRPROVIDER
GetProvider();
LIST_ENTRY m_CatalogLinkage;
// Used to link items in catalog. Note that this particular member
// variable is in the public section to make it available for manipulation
// by the catalog object.
private:
WCHAR m_LibraryPath[MAX_PATH];
// Fully qualified path to the provider's DLL image.
WSAPROTOCOL_INFOW m_ProtoInfo;
// The cataloged WSAPROTOCOL_INFOA structure. This is typically used for
// comparison when selecting a provider by address family, socket
// type, etc.
PRPROVIDER m_Provider;
// Pointer to the RPROVIDER object attached to this catalog entry.
}; // class PROTO_CATALOG_ITEM
#endif // _DCATITEM_
|
/**
* This method removes all the assertions where the item specified is
* a subject or an object.
*
* @param item the item related to the statements to remove.
* @return all the removed statements.
*/
protected Set<Statement> removeAllAssertions(EARMARKItem item) {
Set<Statement> result = new HashSet<Statement>();
result.addAll(getAssertionsAsSubject(item));
result.addAll(getAssertionsAsObject(item));
Iterator<Statement> ite = result.iterator();
while (ite.hasNext()) {
rdf.remove(ite.next());
}
return result;
} |
def crop_vardict_to_period(vardict,sdate,edate):
for key in vardict:
if (key != 'time_unit' and key != 'meta' and key != 'datetime'):
vardict[key] = list(np.array(vardict[key])[ \
( (np.array(vardict['datetime'])>=sdate)
& (np.array(vardict['datetime'])<=edate)
) ])
else:
vardict[key] = vardict[key]
vardict['datetime'] = list(np.array(vardict['datetime'])[ \
( (np.array(vardict['datetime'])>=sdate)
& (np.array(vardict['datetime'])<=edate)
) ])
return vardict |
package ovh
import (
"context"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"time"
"github.com/minectl/pkg/update"
ovhsdk "github.com/dirien/ovh-go-sdk/pkg/sdk"
"github.com/minectl/pkg/automation"
"github.com/minectl/pkg/common"
minctlTemplate "github.com/minectl/pkg/template"
)
type OVHcloud struct {
client *ovhsdk.OVHcloud
tmpl *minctlTemplate.Template
}
func NewOVHcloud(endpoint, appKey, appSecret, consumerKey, serviceName, region string) (*OVHcloud, error) {
client, err := ovhsdk.NewOVHClient(endpoint, appKey, appSecret, consumerKey, region, serviceName)
if err != nil {
return nil, err
}
tmpl, err := minctlTemplate.NewTemplateBash()
if err != nil {
return nil, err
}
return &OVHcloud{
client: client,
tmpl: tmpl,
}, nil
}
func createOVHID(instanceName, label string) (id string) {
return fmt.Sprintf("%s|%s", instanceName, label)
}
func getOVHFieldsFromID(id string) (instanceName, label string, err error) {
fields := strings.Split(id, "|")
err = nil
if len(fields) == 3 {
instanceName = fields[0]
label = strings.Join([]string{fields[1], fields[2]}, ",")
} else {
err = fmt.Errorf("could not get fields from custom ID: fields: %v", fields)
return "", "", err
}
return instanceName, label, nil
}
func (o *OVHcloud) CreateServer(args automation.ServerArgs) (*automation.RessourceResults, error) {
pubKeyFile, err := ioutil.ReadFile(fmt.Sprintf("%s.pub", args.MinecraftResource.GetSSH()))
if err != nil {
return nil, err
}
key, err := o.client.CreateSSHKey(context.Background(), ovhsdk.SSHKeyCreateOptions{
Name: fmt.Sprintf("%s-ssh", args.MinecraftResource.GetName()),
PublicKey: string(pubKeyFile),
})
if err != nil {
return nil, err
}
image, err := o.client.GetImage(context.Background(), "Ubuntu 20.04", args.MinecraftResource.GetRegion())
if err != nil {
return nil, err
}
flavor, err := o.client.GetFlavor(context.Background(), args.MinecraftResource.GetSize(), args.MinecraftResource.GetRegion())
if err != nil {
return nil, err
}
var mount string
if args.MinecraftResource.GetVolumeSize() > 0 {
mount = "sdb"
}
userData, err := o.tmpl.GetTemplate(args.MinecraftResource, mount, minctlTemplate.GetTemplateBashName(args.MinecraftResource.IsProxyServer()))
if err != nil {
return nil, err
}
instance, err := o.client.CreateInstance(context.Background(), ovhsdk.InstanceCreateOptions{
Name: createOVHID(args.MinecraftResource.GetName(), strings.Join([]string{common.InstanceTag, args.MinecraftResource.GetEdition()}, "|")),
Region: args.MinecraftResource.GetRegion(),
SSHKeyID: key.ID,
FlavorID: flavor.ID,
ImageID: image.ID,
MonthlyBilling: false,
UserData: userData,
})
if err != nil {
return nil, err
}
stillCreating := true
for stillCreating {
instance, err = o.client.GetInstance(context.Background(), instance.ID)
if err != nil {
return nil, err
}
if instance.Status == ovhsdk.InstanceActive {
stillCreating = false
time.Sleep(2 * time.Second)
} else {
time.Sleep(2 * time.Second)
}
}
if args.MinecraftResource.GetVolumeSize() > 0 {
volume, err := o.client.CreateVolume(context.Background(), ovhsdk.VolumeCreateOptions{
Name: fmt.Sprintf("%s-vol", args.MinecraftResource.GetName()),
Size: args.MinecraftResource.GetVolumeSize(),
Region: args.MinecraftResource.GetRegion(),
Type: ovhsdk.VolumeClassic,
})
if err != nil {
return nil, err
}
stillCreating = true
for stillCreating {
volume, err = o.client.GetVolume(context.Background(), volume.ID)
if err != nil {
return nil, err
}
if volume.Status == ovhsdk.VolumeAvailable {
stillCreating = false
} else {
time.Sleep(2 * time.Second)
}
}
_, err = o.client.AttachVolume(context.Background(), volume.ID, &ovhsdk.VolumeAttachOptions{
InstanceID: instance.ID,
})
if err != nil {
return nil, err
}
stillAttaching := true
for stillAttaching {
volume, err = o.client.GetVolume(context.Background(), volume.ID)
if err != nil {
return nil, err
}
if volume.Status == ovhsdk.VolumeInUse {
stillAttaching = false
} else {
time.Sleep(2 * time.Second)
}
}
}
_, labels, err := getOVHFieldsFromID(instance.Name)
if err != nil {
return nil, err
}
ip4, err := ovhsdk.IPv4(instance)
if err != nil {
return nil, err
}
return &automation.RessourceResults{
ID: instance.ID,
Name: instance.Name,
Region: instance.Region,
PublicIP: ip4,
Tags: labels,
}, err
}
func (o *OVHcloud) DeleteServer(id string, args automation.ServerArgs) error {
keys, err := o.client.ListSSHKeys(context.Background())
if err != nil {
return err
}
for _, key := range keys {
if key.Name == fmt.Sprintf("%s-ssh", args.MinecraftResource.GetName()) {
err := o.client.DeleteSSHKey(context.Background(), key.ID)
if err != nil {
return err
}
}
}
volumes, err := o.client.ListVolumes(context.Background())
if err != nil {
return err
}
for _, volume := range volumes {
for _, attached := range volume.AttachedTo {
if attached == id {
detachVolume, err := o.client.DetachVolume(context.Background(), volume.ID, &ovhsdk.VolumeDetachOptions{
InstanceID: id,
})
if err != nil {
return err
}
stillDetaching := true
for stillDetaching {
detachedVolume, err := o.client.GetVolume(context.Background(), detachVolume.ID)
if err != nil {
return err
}
if detachedVolume.Status == ovhsdk.VolumeAvailable {
stillDetaching = false
} else {
time.Sleep(2 * time.Second)
}
}
err = o.client.DeleteVolume(context.Background(), volume.ID)
if err != nil {
return err
}
}
}
}
err = o.client.DeleteInstance(context.Background(), id)
if err != nil {
return err
}
return nil
}
func (o *OVHcloud) ListServer() ([]automation.RessourceResults, error) {
instances, err := o.client.ListInstance(context.Background())
if err != nil {
return nil, err
}
var result []automation.RessourceResults
for _, instance := range instances {
// no error checking. could be server in the region which don't belong to minectl
_, labels, _ := getOVHFieldsFromID(instance.Name)
if strings.Contains(labels, common.InstanceTag) {
ip4, err := ovhsdk.IPv4(&instance)
if err != nil {
return nil, err
}
result = append(result, automation.RessourceResults{
ID: instance.ID,
Name: instance.Name,
Region: instance.Region,
PublicIP: ip4,
Tags: labels,
})
}
}
return result, nil
}
func (o *OVHcloud) UpdateServer(id string, args automation.ServerArgs) error {
instance, err := o.client.GetInstance(context.Background(), id)
if err != nil {
return err
}
ip4, err := ovhsdk.IPv4(instance)
if err != nil {
return err
}
remoteCommand := update.NewRemoteServer(args.MinecraftResource.GetSSH(), ip4, "ubuntu")
err = remoteCommand.UpdateServer(args.MinecraftResource)
if err != nil {
return err
}
return nil
}
func (o *OVHcloud) UploadPlugin(id string, args automation.ServerArgs, plugin, destination string) error {
instance, err := o.client.GetInstance(context.Background(), id)
if err != nil {
return err
}
ip4, err := ovhsdk.IPv4(instance)
if err != nil {
return err
}
remoteCommand := update.NewRemoteServer(args.MinecraftResource.GetSSH(), ip4, "ubuntu")
// as we are not allowed to login via root user, we need to add sudo to the command
source := filepath.Join("/tmp", filepath.Base(plugin))
err = remoteCommand.TransferFile(plugin, source)
if err != nil {
return err
}
_, err = remoteCommand.ExecuteCommand(fmt.Sprintf("sudo mv %s %s\nsudo systemctl restart minecraft.service", source, destination))
if err != nil {
return err
}
return nil
}
func (o *OVHcloud) GetServer(id string, args automation.ServerArgs) (*automation.RessourceResults, error) {
instance, err := o.client.GetInstance(context.Background(), id)
if err != nil {
return nil, err
}
ip4, err := ovhsdk.IPv4(instance)
if err != nil {
return nil, err
}
_, labels, err := getOVHFieldsFromID(instance.Name)
if err != nil {
return nil, err
}
return &automation.RessourceResults{
ID: instance.ID,
Name: instance.Name,
Region: instance.Region,
PublicIP: ip4,
Tags: labels,
}, err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.