code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def leftordered(M): """ Returns the given matrix in left-ordered-form. """ l = list(M.T) l.sort(key=tuple) return array(l)[::-1].T
Returns the given matrix in left-ordered-form.
leftordered
python
pybrain/pybrain
pybrain/tools/ibp.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/ibp.py
BSD-3-Clause
def testIBP(): """ Plot matrices generated by an IBP, for a few different settings. """ from pybrain.tools.plotting.colormaps import ColorMap import pylab # always 50 customers n = 50 # define parameter settings ps = [(10, 0.1), (10,), (50,), (50, 0.5), ] # generate a few matrices, on for each parameter setting ms = [] for p in ps: if len(p) > 1: m = generateIBP(n, p[0], p[1]) else: m = generateIBP(n, p[0]) ms.append(leftordered(m)) # plot the matrices for m in ms: ColorMap(m, pixelspervalue=3) pylab.show()
Plot matrices generated by an IBP, for a few different settings.
testIBP
python
pybrain/pybrain
pybrain/tools/ibp.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/ibp.py
BSD-3-Clause
def __init__(self, DS, **kwargs): """ Initialize with the training data set DS. All keywords given are set as member variables. The following are particularly important: :key hidden: number of hidden units :key TDS: test data set for checking convergence :key VDS: validation data set for final performance evaluation :key epoinc: number of epochs to train for, before checking convergence (default: 5) """ self.DS = DS self.hidden = 10 self.maxepochs = 1000 self.Graph = None self.TDS = None self.VDS = None self.epoinc = 5 setAllArgs(self, kwargs) self.trainCurve = None
Initialize with the training data set DS. All keywords given are set as member variables. The following are particularly important: :key hidden: number of hidden units :key TDS: test data set for checking convergence :key VDS: validation data set for final performance evaluation :key epoinc: number of epochs to train for, before checking convergence (default: 5)
__init__
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def initGraphics(self, ymax=10, xmax= -1): """ initialize the interactive graphics output window, and return a handle to the plot """ if xmax < 0: xmax = self.maxepochs figure(figsize=[12, 8]) ion() draw() #self.Graph = MultilinePlotter(autoscale=1.2 ) #xlim=[0, self.maxepochs], ylim=[0, ymax]) self.Graph = MultilinePlotter(xlim=[0, xmax], ylim=[0, ymax]) self.Graph.setLineStyle([0, 1], linewidth=2) return self.Graph
initialize the interactive graphics output window, and return a handle to the plot
initGraphics
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def saveTrainingCurve(self, learnfname): """ save the training curves into a file with the given name (CSV format) """ logging.info('Saving training curves into ' + learnfname) if self.trainCurve is None: logging.error('No training curve available for saving!') learnf = open(learnfname, "wb") writer = csv.writer(learnf, dialect='excel') nDataSets = len(self.trainCurve) for i in range(1, len(self.trainCurve[0]) - 1): writer.writerow([self.trainCurve[k][i] for k in range(nDataSets)]) learnf.close()
save the training curves into a file with the given name (CSV format)
saveTrainingCurve
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def saveNetwork(self, fname): """ save the trained network to a file """ NetworkWriter.writeToFile(self.Trainer.module, fname) logging.info("Network saved to: " + fname)
save the trained network to a file
saveNetwork
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs): """ Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class. """ if hidden is not None: self.hidden = hidden logging.info("Constructing FNN with following config:") FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim) logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Training FNN with following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class.
setupNN
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def runTraining(self, convergence=0, **kwargs): """ Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments whether test error is going down again, and stop training accordingly. CAVEAT: No support for Sequential datasets!""" assert isinstance(self.Trainer, Trainer) if self.Graph is not None: self.Graph.setLabels(x='epoch', y='normalized regression error') self.Graph.setLegend(['training', 'test'], loc='upper right') epoch = 0 inc = self.epoinc best_error = Infinity best_epoch = 0 learncurve_x = [0] learncurve_y = [0.0] valcurve_y = [0.0] converged = False convtest = 0 if convergence > 0: logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc)) while epoch <= self.maxepochs and not converged: self.Trainer.trainEpochs(inc) epoch += inc learncurve_x.append(epoch) # calculate errors on TRAINING data err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS) learncurve_y.append(err_trn) if self.TDS is None: logging.info("epoch: %6d, err_trn: %10g" % (epoch, err_trn)) else: # calculate same errors on TEST data err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS) valcurve_y.append(err_tst) if err_tst < best_error: # store best error and parameters best_epoch = epoch best_error = err_tst bestweights = self.Trainer.module.params.copy() convtest = 0 else: convtest += 1 logging.info("epoch: %6d, err_trn: %10g, err_tst: %10g, best_tst: %10g" % (epoch, err_trn, err_tst, best_error)) if self.Graph is not None: self.Graph.addData(1, epoch, err_tst) # check if convegence criterion is fulfilled (no improvement after N epoincs) if convtest >= convergence: converged = True if self.Graph is not None: self.Graph.addData(0, epoch, err_trn) self.Graph.update() # training finished! logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error)) if self.VDS is not None: # calculate same errors on VALIDATION data self.Trainer.module.params[:] = bestweights.copy() err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS) logging.info("Result on evaluation data: %10g" % err_val) # store training curve for saving into file self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments whether test error is going down again, and stop training accordingly. CAVEAT: No support for Sequential datasets!
runTraining
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def __init__(self, DS, **kwargs): """ Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables. """ if not isinstance(DS, ClassificationDataSet): raise TypeError('Need a ClassificationDataSet to do classification!') NNtools.__init__(self, DS, **kwargs) self.nClasses = self.DS.nClasses # need this because targets may be altered later self.clsnames = None self.targetsAreOneOfMany = False
Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables.
__init__
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def _convertAllDataToOneOfMany(self, values=[0, 1]): """ converts all datasets associated with self into 1-out-of-many representations, e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0], or accordingly with other upper and lower bounds, as given by the values keyword """ if self.targetsAreOneOfMany: return else: # convert all datasets to one-of-many ("winner takes all") representation for dsname in ["DS", "TDS", "VDS"]: d = getattr(self, dsname) if d is not None: if d.outdim < d.nClasses: d._convertToOneOfMany(values) self.targetsAreOneOfMany = True
converts all datasets associated with self into 1-out-of-many representations, e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0], or accordingly with other upper and lower bounds, as given by the values keyword
_convertAllDataToOneOfMany
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs): """ Setup FNN and trainer for classification. """ self._convertAllDataToOneOfMany() if hidden is not None: self.hidden = hidden FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, outclass=SoftmaxLayer) logging.info("Constructing classification FNN with following config:") logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Trainer received the following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
Setup FNN and trainer for classification.
setupNN
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def setupRNN(self, trainer=BackpropTrainer, hidden=None, **trnargs): """ Setup an LSTM RNN and trainer for sequence classification. """ if hidden is not None: self.hidden = hidden self._convertAllDataToOneOfMany() RNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, hiddenclass=LSTMLayer, recurrent=True, outclass=SoftmaxLayer) logging.info("Constructing classification RNN with following config:") logging.info(str(RNN) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Trainer received the following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(RNN, dataset=self.DS, **trnargs)
Setup an LSTM RNN and trainer for sequence classification.
setupRNN
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def runTraining(self, convergence=0, **kwargs): """ Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments whether test error is going down again, and stop training accordingly. """ assert isinstance(self.Trainer, Trainer) if self.Graph is not None: self.Graph.setLabels(x='epoch', y='% classification error') self.Graph.setLegend(['training', 'test'], loc='lower right') epoch = 0 inc = self.epoinc best_error = 100.0 best_epoch = 0 learncurve_x = [0] learncurve_y = [0.0] valcurve_y = [0.0] converged = False convtest = 0 if convergence > 0: logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc)) while epoch <= self.maxepochs and not converged: self.Trainer.trainEpochs(inc) epoch += inc learncurve_x.append(epoch) # calculate errors on TRAINING data if isinstance(self.DS, SequentialDataSet): r_trn = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.DS)) else: # FIXME: messy - validation does not belong into the Trainer... out, trueclass = self.Trainer.testOnClassData(return_targets=True) r_trn = 100. * (1.0 - Validator.classificationPerformance(out, trueclass)) learncurve_y.append(r_trn) if self.TDS is None: logging.info("epoch: %6d, err_trn: %5.2f%%" % (epoch, r_trn)) else: # calculate errors on TEST data if isinstance(self.DS, SequentialDataSet): r_tst = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.TDS)) else: # FIXME: messy - validation does not belong into the Trainer... out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.TDS) r_tst = 100. * (1.0 - Validator.classificationPerformance(out, trueclass)) valcurve_y.append(r_tst) if r_tst < best_error: best_epoch = epoch best_error = r_tst bestweights = self.Trainer.module.params.copy() convtest = 0 else: convtest += 1 logging.info("epoch: %6d, err_trn: %5.2f%%, err_tst: %5.2f%%, best_tst: %5.2f%%" % (epoch, r_trn, r_tst, best_error)) if self.Graph is not None: self.Graph.addData(1, epoch, r_tst) # check if convegence criterion is fulfilled (no improvement after N epoincs) if convtest >= convergence: converged = True if self.Graph is not None: self.Graph.addData(0, epoch, r_trn) self.Graph.update() logging.info("Best epoch: %6d, with error: %5.2f%%" % (best_epoch, best_error)) if self.VDS is not None: # calculate errors on VALIDATION data self.Trainer.module.params[:] = bestweights.copy() if isinstance(self.DS, SequentialDataSet): r_val = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.VDS)) else: out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.VDS) r_val = 100. * (1.0 - Validator.classificationPerformance(out, trueclass)) logging.info("Result on evaluation data: %5.2f%%" % r_val) self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments whether test error is going down again, and stop training accordingly.
runTraining
python
pybrain/pybrain
pybrain/tools/neuralnets.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/neuralnets.py
BSD-3-Clause
def crowding_distance(individuals, fitnesses): """ Crowding distance-measure for multiple objectives. """ distances = collections.defaultdict(lambda: 0) individuals = list(individuals) # Infer the number of objectives by looking at the fitness of the first. n_obj = len(fitnesses[individuals[0]]) for i in range(n_obj): individuals.sort(key=lambda x: fitnesses[x][i]) # normalization between 0 and 1. normalization = float(fitnesses[individuals[0]][i] - fitnesses[individuals[-1]][i]) # Make sure the boundary points are always selected. distances[individuals[0]] = 1e100 distances[individuals[-1]] = 1e100 tripled = list(zip(individuals, individuals[1:-1], individuals[2:])) for pre, ind, post in tripled: distances[ind] += (fitnesses[pre][i] - fitnesses[post][i]) / normalization return distances
Crowding distance-measure for multiple objectives.
crowding_distance
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def _non_dominated_front_old(iterable, key=lambda x: x, allowequality=True): """Return a subset of items from iterable which are not dominated by any other item in iterable.""" items = list(iterable) keys = dict((i, key(i)) for i in items) dim = len(list(keys.values())[0]) if any(dim != len(k) for k in list(keys.values())): raise ValueError("Wrong tuple size.") # Make a dictionary that holds the items another item dominates. dominations = collections.defaultdict(lambda: []) for i in items: for j in items: if allowequality: if all(keys[i][k] < keys[j][k] for k in range(dim)): dominations[i].append(j) else: if all(keys[i][k] <= keys[j][k] for k in range(dim)): dominations[i].append(j) dominates = lambda i, j: j in dominations[i] res = set() items = set(items) for i in items: res.add(i) for j in list(res): if i is j: continue if dominates(j, i): res.remove(i) break elif dominates(i, j): res.remove(j) return res
Return a subset of items from iterable which are not dominated by any other item in iterable.
_non_dominated_front_old
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def _non_dominated_front_fast(iterable, key=lambda x: x, allowequality=True): """Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version. """ items = list(iterable) keys = dict((i, key(i)) for i in items) dim = len(list(keys.values())[0]) dominations = {} for i in items: for j in items: good = True if allowequality: for k in range(dim): if keys[i][k] >= keys[j][k]: good = False break else: for k in range(dim): if keys[i][k] > keys[j][k]: good = False break if good: dominations[(i, j)] = None res = set() items = set(items) for i in items: res.add(i) for j in list(res): if i is j: continue if (j, i) in dominations: res.remove(i) break elif (i, j) in dominations: res.remove(j) return res
Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version.
_non_dominated_front_fast
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def _non_dominated_front_arr(iterable, key=lambda x: x, allowequality=True): """Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations. """ items = list(iterable) fits = list(map(key, items)) l = len(items) x = array(fits) a = tile(x, (l, 1, 1)) b = a.transpose((1, 0, 2)) if allowequality: ndom = sum(a <= b, axis=2) else: ndom = sum(a < b, axis=2) ndom = array(ndom, dtype=bool) res = set() for ii in range(l): res.add(ii) for ij in list(res): if ii == ij: continue if not ndom[ij, ii]: res.remove(ii) break elif not ndom[ii, ij]: res.remove(ij) return set([items[i] for i in res])
Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations.
_non_dominated_front_arr
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def non_dominated_sort(iterable, key=lambda x: x, allowequality=True): """Return a list that is sorted in a non-dominating fashion. Keys have to be n-tuple.""" items = set(iterable) fronts = [] while items: front = non_dominated_front(items, key, allowequality) items -= front fronts.append(front) return fronts
Return a list that is sorted in a non-dominating fashion. Keys have to be n-tuple.
non_dominated_sort
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def _const_non_dominated_front_arr(iterable, key=lambda x: x, allowequality=True): """Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations. """ items = list(iterable) # pop fits = list(map(key, items)) # fitness x = array([fits[i][0] for i in range(len(fits))]) v = array([fits[i][1] for i in range(len(fits))]) c = array([fits[i][2] for i in range(len(fits))]) l = len(items) a = tile(x, (l, 1, 1)) b = a.transpose((1, 0, 2)) if allowequality: ndom = sum(a <= b, axis=2) else: ndom = sum(a < b, axis=2) ndom = array(ndom, dtype=bool) res = set() for ii in range(l): res.add(ii) for ij in list(res): if ii == ij: continue if not ndom[ij, ii] and v[ij] and v[ii]: res.remove(ii) break elif not ndom[ii, ij] and v[ij] and v[ii]: res.remove(ij) elif v[ij] and not v[ii]: res.remove(ii) break elif v[ii] and not v[ij]: res.remove(ij) elif not v[ii] and not v[ij]: cii = abs(sum(c[ii])) cij = abs(sum(c[ij])) if cii < cij: res.remove(ij) else: res.remove(ii) break return set([items[i] for i in res])
Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations.
_const_non_dominated_front_arr
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def const_non_dominated_sort(iterable, key=lambda x: x, allowequality=True): """Return a list that is sorted in a non-dominating fashion. Keys have to be n-tuple.""" items = set(iterable) fronts = [] while items: front = const_non_dominated_front(items, key, allowequality) items -= front fronts.append(front) return fronts
Return a list that is sorted in a non-dominating fashion. Keys have to be n-tuple.
const_non_dominated_sort
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def const_crowding_distance(individuals, fitnesses): """ Crowding distance-measure for multiple objectives. """ distances = collections.defaultdict(lambda: 0) individuals = list(individuals) # Infer the number of objectives by looking at the fitness of the first. n_obj = len(fitnesses[individuals[0]][0]) for i in range(n_obj): individuals.sort(key=lambda x: fitnesses[x][0][i]) # normalization between 0 and 1. normalization = float(fitnesses[individuals[0]][0][i] - fitnesses[individuals[-1]][0][i]) # Make sure the boundary points are always selected. distances[individuals[0]] = 1e100 distances[individuals[-1]] = 1e100 tripled = list(zip(individuals, individuals[1:-1], individuals[2:])) for pre, ind, post in tripled: distances[ind] += (fitnesses[pre][0][i] - fitnesses[post][0][i]) / normalization return distances
Crowding distance-measure for multiple objectives.
const_crowding_distance
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def const_number_of_feasible_pop(iterable, key=lambda x: x, allowequality=True): """Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations. """ items = list(iterable) # pop fits = list(map(key, items)) # fitness v = list([fits[i][1] for i in range(len(fits))]) n = v.count(True) return n
Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations.
const_number_of_feasible_pop
python
pybrain/pybrain
pybrain/tools/nondominated.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/nondominated.py
BSD-3-Clause
def rankedFitness(R): """ produce a linear ranking of the fitnesses in R. (The highest rank is the best fitness)""" #l = sorted(list(enumerate(R)), cmp = lambda a,b: cmp(a[1],b[1])) #l = sorted(list(enumerate(l)), cmp = lambda a,b: cmp(a[1],b[1])) #return array(map(lambda (r, dummy): r, l)) res = zeros_like(R) l = list(zip(R, list(range(len(R))))) l.sort() for i, (_, j) in enumerate(l): res[j] = i return res
produce a linear ranking of the fitnesses in R. (The highest rank is the best fitness)
rankedFitness
python
pybrain/pybrain
pybrain/tools/rankingfunctions.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rankingfunctions.py
BSD-3-Clause
def __call__(self, R): """ :key R: one-dimensional array containing fitnesses. """ res = rankedFitness(R) return res / float(max(res))
:key R: one-dimensional array containing fitnesses.
__call__
python
pybrain/pybrain
pybrain/tools/rankingfunctions.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rankingfunctions.py
BSD-3-Clause
def adaptAgent(agent_klass): """Return a factory function that instantiates a pybrain agent and adapts it to the rlglue framework interface. :type agent_klass: subclass of some pybrain agent :key agent_klass: Some class that is to be adapted to the rlglue framework """ # TODO: return a real class instead of a function, so docstrings and such # are not lost. def inner(*args, **kwargs): return RlglueAgentAdapter(agent_klass, *args, **kwargs) return inner
Return a factory function that instantiates a pybrain agent and adapts it to the rlglue framework interface. :type agent_klass: subclass of some pybrain agent :key agent_klass: Some class that is to be adapted to the rlglue framework
adaptAgent
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def adaptAgentObject(agent_object): """Return an object that adapts a pybrain agent to the rlglue interface. """ # This is pretty hacky: We first take a bogus agent with a bogus module # for our function adaptAgent to work, then substitue the bogus agent with # our actual agent. agent = adaptAgent(LearningAgent)(Module(1, 1)) agent.agent = agent_object return agent
Return an object that adapts a pybrain agent to the rlglue interface.
adaptAgentObject
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def __init__(self, klass, *args, **kwargs): """ Create an object that adapts an object of class klass to the protocol of rlglue agents. :type klass: subclass of some pybrain agent :key klass: Some class that is to be adapted to the rlglue framework """ if not issubclass(klass, LearningAgent): raise ValueError("Supply a LearningAgent as first argument") self.agent = klass(*args, **kwargs) # TODO: At the moment, learning is done after a certain amount of # steps - this is somehow logic of the agent, and not of the wrapper # Maybe there are some changes in the agent API needed. self.learnCycle = 1 self.episodeCount = 1
Create an object that adapts an object of class klass to the protocol of rlglue agents. :type klass: subclass of some pybrain agent :key klass: Some class that is to be adapted to the rlglue framework
__init__
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def agent_init(self, task_specification=None): """Give the agent a specification of the action and state space. Since pybrain agents are not using task specifications (they are already set up to the problem domain) the task_specification parameter is only there for API consistency, but it will be ignored. The specification for the specifications can be found here: http://rlai.cs.ualberta.ca/RLBB/TaskSpecification.html :type task_specification: string """ # This is (for now) actually a dummy method to satisfy the # RLGlue interface. It is the programmer's job to check wether an # experiment fits the agent object. self.agent.reset()
Give the agent a specification of the action and state space. Since pybrain agents are not using task specifications (they are already set up to the problem domain) the task_specification parameter is only there for API consistency, but it will be ignored. The specification for the specifications can be found here: http://rlai.cs.ualberta.ca/RLBB/TaskSpecification.html :type task_specification: string
agent_init
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def agent_step(self, reward, observation): """ Return an action depending on an observation and a reward. :type reward: number :type firstObservation: Observation """ self._giveReward(reward) self._integrateObservation(observation) return self._getAction()
Return an action depending on an observation and a reward. :type reward: number :type firstObservation: Observation
agent_step
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def agent_end(self, reward): """ Give the last reward to the agent. :type reward: number """ self._giveReward(reward) self.agent.newEpisode() self.episodeCount += 1 if self.episodeCount % self.learnCycle == 0: self.agent.learn() self.agent.reset()
Give the last reward to the agent. :type reward: number
agent_end
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def agent_cleanup(self): """This is called when an episode ends. Should be in one ratio to agent_init."""
This is called when an episode ends. Should be in one ratio to agent_init.
agent_cleanup
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def _getAction(self): """ Return a RLGlue action that is made out of a numpy array yielded by the hold pybrain agent. """ action = RLGlueAction() action.doubleArray = self.agent.getAction().tolist() action.intArray = [] return action
Return a RLGlue action that is made out of a numpy array yielded by the hold pybrain agent.
_getAction
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def __init__(self, path, port=None, autoreconnect=None): """Instantiate an object with the given variables.""" if os.name not in ('posix', 'mac'): raise NotImplementedError( "Killing processes under win32 not supported") self.path = path self.port = port self.autoreconnect = autoreconnect self.running = False
Instantiate an object with the given variables.
__init__
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def run(self): """Run the benchmark. All agents are tested loop times against the environment and statistics for each run are saved into the benchmark directory benchmarkDir. """ # Create benchmark directory: the desired name plus the current date # and time. try: os.makedirs(self.benchmarkDir) except OSError as e: if not "File exists" in str(e): raise e for name, agent_klass in self.agents: todo = range(self.loops) if not self.overwrite: # If overwrite is set to false, we will only do the experiments # that have not been done. # index gets the index of a benchmark file out of the filename. index = lambda x: int(x[x.rfind("-") + 1:]) done = set(index(i) for i in os.listdir(self.benchmarkDir) if i.startswith("%s-" % name)) todo = (i for i in todo if i not in done) for j in todo: logging.info("Starting agent %s's loop #%i" % (name, j + 1)) # Make a clean copy of the agent for every run # Start subprocess that gives us the experiment agent = agent_klass() stats = self.testAgent(agent) # Dump stats to the given directory self.saveStats(name + "-%i" % j, stats)
Run the benchmark. All agents are tested loop times against the environment and statistics for each run are saved into the benchmark directory benchmarkDir.
run
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def testAgent(path, agent, port=DEFAULT_PORT): """Test an agent once on a rlcompetition experiment. Path specifies the executable file of the rl competition. """ agent = adaptAgentObject(BenchmarkingAgent(agent)) experiment = RLCExperiment(path, str(port)) experiment.start() # This method is provided by rlglue and makes a client to be runnable # over the network. clientAgent = ClientAgent(agent) clientAgent.connect(DEFAULT_HOST, port, CLIENT_TIMEOUT) logging.info("Agent connected") clientAgent.runAgentEventLoop() clientAgent.close() logging.info("Agent finished") experiment.stop() return agent.agent.benchmark
Test an agent once on a rlcompetition experiment. Path specifies the executable file of the rl competition.
testAgent
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def __init__(self, agent): """Return a wrapper around the given agent.""" if hasattr(agent, 'benchmark') or hasattr(agent, 'agent'): raise ValueError("Wrapped agent must not define a benchmark or" + "an agent attribute.") self.agent = agent self.benchmark = BenchmarkDataSet() # For episodewide statistics self.__rewards = []
Return a wrapper around the given agent.
__init__
python
pybrain/pybrain
pybrain/tools/rlgluebridge.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/rlgluebridge.py
BSD-3-Clause
def buildNetwork(*layers, **options): """Build arbitrarily deep networks. `layers` should be a list or tuple of integers, that indicate how many neurons the layers should have. `bias` and `outputbias` are flags to indicate whether the network should have the corresponding biases; both default to True. To adjust the classes for the layers use the `hiddenclass` and `outclass` parameters, which expect a subclass of :class:`NeuronLayer`. If the `recurrent` flag is set, a :class:`RecurrentNetwork` will be created, otherwise a :class:`FeedForwardNetwork`. If the `fast` flag is set, faster arac networks will be used instead of the pybrain implementations.""" # options opt = {'bias': True, 'hiddenclass': SigmoidLayer, 'outclass': LinearLayer, 'outputbias': True, 'peepholes': False, 'recurrent': False, 'fast': False, } for key in options: if key not in list(opt.keys()): raise NetworkError('buildNetwork unknown option: %s' % key) opt[key] = options[key] if len(layers) < 2: raise NetworkError('buildNetwork needs 2 arguments for input and output layers at least.') # Bind the right class to the Network name network_map = { (False, False): FeedForwardNetwork, (True, False): RecurrentNetwork, } try: network_map[(False, True)] = _FeedForwardNetwork network_map[(True, True)] = _RecurrentNetwork except NameError: if opt['fast']: raise NetworkError("No fast networks available.") if opt['hiddenclass'].sequential or opt['outclass'].sequential: if not opt['recurrent']: # CHECKME: a warning here? opt['recurrent'] = True Network = network_map[opt['recurrent'], opt['fast']] n = Network() # linear input layer n.addInputModule(LinearLayer(layers[0], name='in')) # output layer of type 'outclass' n.addOutputModule(opt['outclass'](layers[-1], name='out')) if opt['bias']: # add bias module and connection to out module, if desired n.addModule(BiasUnit(name='bias')) if opt['outputbias']: n.addConnection(FullConnection(n['bias'], n['out'])) # arbitrary number of hidden layers of type 'hiddenclass' for i, num in enumerate(layers[1:-1]): layername = 'hidden%i' % i if issubclass(opt['hiddenclass'], LSTMLayer): n.addModule(opt['hiddenclass'](num, peepholes=opt['peepholes'], name=layername)) else: n.addModule(opt['hiddenclass'](num, name=layername)) if opt['bias']: # also connect all the layers with the bias n.addConnection(FullConnection(n['bias'], n[layername])) # connections between hidden layers for i in range(len(layers) - 3): n.addConnection(FullConnection(n['hidden%i' % i], n['hidden%i' % (i + 1)])) # other connections if len(layers) == 2: # flat network, connection from in to out n.addConnection(FullConnection(n['in'], n['out'])) else: # network with hidden layer(s), connections from in to first hidden and last hidden to out n.addConnection(FullConnection(n['in'], n['hidden0'])) n.addConnection(FullConnection(n['hidden%i' % (len(layers) - 3)], n['out'])) # recurrent connections if issubclass(opt['hiddenclass'], LSTMLayer): if len(layers) > 3: errorexit("LSTM networks with > 1 hidden layers are not supported!") n.addRecurrentConnection(FullConnection(n['hidden0'], n['hidden0'])) n.sortModules() return n
Build arbitrarily deep networks. `layers` should be a list or tuple of integers, that indicate how many neurons the layers should have. `bias` and `outputbias` are flags to indicate whether the network should have the corresponding biases; both default to True. To adjust the classes for the layers use the `hiddenclass` and `outclass` parameters, which expect a subclass of :class:`NeuronLayer`. If the `recurrent` flag is set, a :class:`RecurrentNetwork` will be created, otherwise a :class:`FeedForwardNetwork`. If the `fast` flag is set, faster arac networks will be used instead of the pybrain implementations.
buildNetwork
python
pybrain/pybrain
pybrain/tools/shortcuts.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/shortcuts.py
BSD-3-Clause
def _buildNetwork(*layers, **options): """This is a helper function to create different kinds of networks. `layers` is a list of tuples. Each tuple can contain an arbitrary number of layers, each being connected to the next one with IdentityConnections. Due to this, all layers have to have the same dimension. We call these tuples 'parts.' Afterwards, the last layer of one tuple is connected to the first layer of the following tuple by a FullConnection. If the keyword argument bias is given, BiasUnits are added additionally with every FullConnection. Example: _buildNetwork( (LinearLayer(3),), (SigmoidLayer(4), GaussianLayer(4)), (SigmoidLayer(3),), ) """ bias = options['bias'] if 'bias' in options else False net = FeedForwardNetwork() layerParts = iter(layers) firstPart = iter(next(layerParts)) firstLayer = next(firstPart) net.addInputModule(firstLayer) prevLayer = firstLayer for part in chain(firstPart, layerParts): new_part = True for layer in part: net.addModule(layer) # Pick class depending on whether we entered a new part if new_part: ConnectionClass = FullConnection if bias: biasUnit = BiasUnit('BiasUnit for %s' % layer.name) net.addModule(biasUnit) net.addConnection(FullConnection(biasUnit, layer)) else: ConnectionClass = IdentityConnection new_part = False conn = ConnectionClass(prevLayer, layer) net.addConnection(conn) prevLayer = layer net.addOutputModule(layer) net.sortModules() return net
This is a helper function to create different kinds of networks. `layers` is a list of tuples. Each tuple can contain an arbitrary number of layers, each being connected to the next one with IdentityConnections. Due to this, all layers have to have the same dimension. We call these tuples 'parts.' Afterwards, the last layer of one tuple is connected to the first layer of the following tuple by a FullConnection. If the keyword argument bias is given, BiasUnits are added additionally with every FullConnection. Example: _buildNetwork( (LinearLayer(3),), (SigmoidLayer(4), GaussianLayer(4)), (SigmoidLayer(3),), )
_buildNetwork
python
pybrain/pybrain
pybrain/tools/shortcuts.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/shortcuts.py
BSD-3-Clause
def loadData(self, fname): """ decide which format the data is in """ self.filename = fname if fname.find('.mat') >= 0: self.loadMATdata(fname) elif fname.find('.svm') >= 0: self.loadSVMdata(fname) else: # dataset consists of raw ascii columns self.loadRawData(fname)
decide which format the data is in
loadData
python
pybrain/pybrain
pybrain/tools/svmdata.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/svmdata.py
BSD-3-Clause
def loadMATdata(self, fname): """ read Matlab file containing one variable called 'data' which is an array nSamples x nFeatures+1 and contains the class in the first column """ from mlabwrap import mlab #@UnresolvedImport from numpy import float d = mlab.load(fname) self.nSamples = d.data.shape[0] x = [] y = [] for i in range(self.nSamples): label = int(d.data[i, 0]) x.append(d.data[i, 1:].astype(float).tolist()) y.append([ float(label) ]) self._setDataFields(x, y)
read Matlab file containing one variable called 'data' which is an array nSamples x nFeatures+1 and contains the class in the first column
loadMATdata
python
pybrain/pybrain
pybrain/tools/svmdata.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/svmdata.py
BSD-3-Clause
def loadSVMdata(self, fname): """ read svm sparse format from file 'fname' (with labels only) output: [attributes[], labels[]] """ x = [] y = [] nFeatMax = 0 for line in open(fname, 'r').readlines(): # format is: # <class> <featnr>:<featval> <featnr>:<featval> ... # (whereby featnr starts at 1) if not line: break line = line.split() label = float(line[0]) feat = [] nextidx = 1 for r in line[1:]: # construct list of features, taking care of sparsity (idx, val) = r.split(':') idx = int(idx) for _ in range(nextidx, idx): feat.append(0) # zzzzwar hier ein bug?? feat.append(float(val)) nextidx = idx + 1 nFeat = len(feat) if nFeatMax < nFeat: nFeatMax = nFeat x.append(feat) y.append([ label ]) self.nSamples += 1 for xi in x: while len(xi) < nFeatMax: xi.append(0.) self._setDataFields(x, y)
read svm sparse format from file 'fname' (with labels only) output: [attributes[], labels[]]
loadSVMdata
python
pybrain/pybrain
pybrain/tools/svmdata.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/svmdata.py
BSD-3-Clause
def loadRawData(self, fname): """ read svm sparse format from file 'fname' (with labels only) output: [attributes[], labels[]] """ targetfile = open(fname.replace('data', 'targets'), 'r') x = [] y = [] for line in open(fname, 'r').readlines(): if not line: break targline = targetfile.readline() targline = list(map(int, targline.split())) for i, v in enumerate(targline): if v: label = i break feat = list(map(float, line.split())) x.append(feat) y.append([float(label)]) self.nSamples += 1 self.nCls = len(targline) targetfile.close() self._setDataFields(x, y)
read svm sparse format from file 'fname' (with labels only) output: [attributes[], labels[]]
loadRawData
python
pybrain/pybrain
pybrain/tools/svmdata.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/svmdata.py
BSD-3-Clause
def getTargets(self): """ return the targets of the dataset, preserving the current sample pointer """ self.storePointer() self.reset() targets = [] while not self.endOfSequences(): input, target, dummy = self.getSample() targets.append(target) self.recallPointer() return targets
return the targets of the dataset, preserving the current sample pointer
getTargets
python
pybrain/pybrain
pybrain/tools/svmdata.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/svmdata.py
BSD-3-Clause
def classificationPerformance(cls, output, target): """ Returns the hit rate of the outputs compared to the targets. :arg output: array of output values :arg target: array of target values """ output = array(output) target = array(target) assert len(output) == len(target) n_correct = sum(output == target) return float(n_correct) / float(len(output))
Returns the hit rate of the outputs compared to the targets. :arg output: array of output values :arg target: array of target values
classificationPerformance
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def MSE(cls, output, target, importance=None): """ Returns the mean squared error. The multidimensional arrays will get flattened in order to compare them. :arg output: array of output values :arg target: array of target values :key importance: each squared error will be multiplied with its corresponding importance value. After summing up these values, the result will be divided by the sum of all importance values for normalization purposes. """ # assert equal shapes output = array(output) target = array(target) assert output.shape == target.shape if importance is not None: assert importance.shape == target.shape importance = importance.flatten() # flatten structures output = output.flatten() target = target.flatten() if importance is None: importance = ones(len(output)) # calculate mse squared_error = (output - target) ** 2 mse = dot(squared_error, importance) / sum(importance) return mse
Returns the mean squared error. The multidimensional arrays will get flattened in order to compare them. :arg output: array of output values :arg target: array of target values :key importance: each squared error will be multiplied with its corresponding importance value. After summing up these values, the result will be divided by the sum of all importance values for normalization purposes.
MSE
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def getSequenceEnds(cls, dataset): """ Returns the indices of the last elements of the sequences stored inside dataset. :arg dataset: Must implement :class:`SequentialDataSet` """ sequence_ends = delete(dataset.getField('sequence_index') - 1, 0) sequence_ends = append(sequence_ends, dataset.getLength() - 1) # print(sequence_ends; exit()) sequence_ends = array(sequence_ends) return sequence_ends
Returns the indices of the last elements of the sequences stored inside dataset. :arg dataset: Must implement :class:`SequentialDataSet`
getSequenceEnds
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def getSequenceEndsImportance(cls, dataset): """ Returns the importance values of the last elements of the sequences stored inside dataset. :arg dataset: Must implement :class:`ImportanceDataSet` """ importance = zeros(dataset.getLength()) importance[cls.getSequenceEnds(dataset)] = 1. return importance
Returns the importance values of the last elements of the sequences stored inside dataset. :arg dataset: Must implement :class:`ImportanceDataSet`
getSequenceEndsImportance
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def classificationPerformance(cls, module, dataset): """ Returns the hit rate of the module's output compared to the targets stored inside dataset. :arg module: Object of any subclass of pybrain's Module type :arg dataset: Dataset object at least containing the fields 'input' and 'target' (for example SupervisedDataSet) """ return ModuleValidator.validate( Validator.classificationPerformance, module, dataset)
Returns the hit rate of the module's output compared to the targets stored inside dataset. :arg module: Object of any subclass of pybrain's Module type :arg dataset: Dataset object at least containing the fields 'input' and 'target' (for example SupervisedDataSet)
classificationPerformance
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def MSE(cls, module, dataset): """ Returns the mean squared error. :arg module: Object of any subclass of pybrain's Module type :arg dataset: Dataset object at least containing the fields 'input' and 'target' (for example SupervisedDataSet) """ return ModuleValidator.validate( Validator.MSE, module, dataset)
Returns the mean squared error. :arg module: Object of any subclass of pybrain's Module type :arg dataset: Dataset object at least containing the fields 'input' and 'target' (for example SupervisedDataSet)
MSE
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def validate(cls, valfunc, module, dataset): """ Abstract validate function, that is heavily used by this class. First, it calculates the module's output on the dataset. In advance, it compares the output to the target values of the dataset through the valfunc function and returns the result. :arg valfunc: A function expecting arrays for output, target and importance (optional). See Validator.MSE for an example. :arg module: Object of any subclass of pybrain's Module type :arg dataset: Dataset object at least containing the fields 'input' and 'target' (for example SupervisedDataSet) """ target = dataset.getField('target') output = ModuleValidator.calculateModuleOutput(module, dataset) if isinstance(dataset, ImportanceDataSet): importance = dataset.getField('importance') return valfunc(output, target, importance) else: return valfunc(output, target)
Abstract validate function, that is heavily used by this class. First, it calculates the module's output on the dataset. In advance, it compares the output to the target values of the dataset through the valfunc function and returns the result. :arg valfunc: A function expecting arrays for output, target and importance (optional). See Validator.MSE for an example. :arg module: Object of any subclass of pybrain's Module type :arg dataset: Dataset object at least containing the fields 'input' and 'target' (for example SupervisedDataSet)
validate
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def _calculateModuleOutputSequential(cls, module, dataset): """ Calculates the module's output on the dataset. Especially designed for datasets storing sequences. After a sequence is fed to the module, it has to be resetted. :arg dataset: Dataset object of type SequentialDataSet or subclass. """ outputs = [] for seq in dataset._provideSequences(): module.reset() for i in range(len(seq)): output = module.activate(seq[i][0]) outputs.append(output.copy()) outputs = array(outputs) return outputs
Calculates the module's output on the dataset. Especially designed for datasets storing sequences. After a sequence is fed to the module, it has to be resetted. :arg dataset: Dataset object of type SequentialDataSet or subclass.
_calculateModuleOutputSequential
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def calculateModuleOutput(cls, module, dataset): """ Calculates the module's output on the dataset. Can be called with any type of dataset. :arg dataset: Any Dataset object containing an 'input' field. """ if isinstance(dataset, SequentialDataSet) or isinstance(dataset, ImportanceDataSet): return cls._calculateModuleOutputSequential(module, dataset) else: module.reset() input = dataset.getField('input') output = array([module.activate(inp) for inp in input]) return output
Calculates the module's output on the dataset. Can be called with any type of dataset. :arg dataset: Any Dataset object containing an 'input' field.
calculateModuleOutput
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def __init__(self, trainer, dataset, n_folds=5, valfunc=ModuleValidator.classificationPerformance, **kwargs): """ :arg trainer: Trainer containing a module to be trained :arg dataset: Dataset for training and testing :key n_folds: Number of pieces, the dataset will be splitted to :key valfunc: Validation function. Should expect a module and a dataset. E.g. ModuleValidator.MSE() :key others: see setArgs() method """ self._trainer = trainer self._dataset = dataset self._n_folds = n_folds self._calculatePerformance = valfunc self._max_epochs = None self.setArgs(**kwargs)
:arg trainer: Trainer containing a module to be trained :arg dataset: Dataset for training and testing :key n_folds: Number of pieces, the dataset will be splitted to :key valfunc: Validation function. Should expect a module and a dataset. E.g. ModuleValidator.MSE() :key others: see setArgs() method
__init__
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def setArgs(self, **kwargs): """ Set the specified member variables. :key max_epochs: maximum number of epochs the trainer should train the module for. :key verbosity: set verbosity level """ for key, value in list(kwargs.items()): if key in ("verbose", "ver", "v"): self._verbosity = value elif key in ("max_epochs"): self._max_epochs = value
Set the specified member variables. :key max_epochs: maximum number of epochs the trainer should train the module for. :key verbosity: set verbosity level
setArgs
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def validate(self): """ The main method of this class. It runs the crossvalidation process and returns the validation result (e.g. performance). """ dataset = self._dataset trainer = self._trainer n_folds = self._n_folds l = dataset.getLength() inp = dataset.getField("input") tar = dataset.getField("target") indim = dataset.indim outdim = dataset.outdim assert l > n_folds perms = array_split(permutation(l), n_folds) perf = 0. for i in range(n_folds): # determine train indices train_perms_idxs = list(range(n_folds)) train_perms_idxs.pop(i) temp_list = [] for train_perms_idx in train_perms_idxs: temp_list.append(perms[ train_perms_idx ]) train_idxs = concatenate(temp_list) # determine test indices test_idxs = perms[i] # train #print("training iteration", i) train_ds = SupervisedDataSet(indim, outdim) train_ds.setField("input" , inp[train_idxs]) train_ds.setField("target" , tar[train_idxs]) trainer = copy.deepcopy(self._trainer) trainer.setData(train_ds) if not self._max_epochs: trainer.train() else: trainer.trainEpochs(self._max_epochs) # test #print("testing iteration", i) test_ds = SupervisedDataSet(indim, outdim) test_ds.setField("input" , inp[test_idxs]) test_ds.setField("target" , tar[test_idxs]) # perf += self.getPerformance( trainer.module, dataset ) perf += self._calculatePerformance(trainer.module, test_ds) perf /= n_folds return perf
The main method of this class. It runs the crossvalidation process and returns the validation result (e.g. performance).
validate
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def testOnSequenceData(module, dataset): """ Fetch targets and calculate the modules output on dataset. Output and target are in one-of-many format. The class for each sequence is determined by first summing the probabilities for each individual sample over the sequence, and then finding its maximum.""" target = dataset.getField("target") output = ModuleValidator.calculateModuleOutput(module, dataset) # determine last indices of the sequences inside dataset ends = SequenceHelper.getSequenceEnds(dataset) ##format = "%d"*len(ends) summed_output = zeros(dataset.outdim) # class_output and class_target will store class labels instead of # one-of-many values class_output = [] class_target = [] for j in range(len(output)): # sum up the output values of one sequence summed_output += output[j] # print(j, output[j], " --> ", summed_output) # if we reached the end of the sequence if j in ends: # convert summed_output and target to class labels class_output.append(argmax(summed_output)) class_target.append(argmax(target[j])) # reset the summed_output to zeros summed_output = zeros(dataset.outdim) ##print(format % tuple(class_output)) ##print(format % tuple(class_target)) class_output = array(class_output) class_target = array(class_target) # print(class_target) # print(class_output) return Validator.classificationPerformance(class_output, class_target)
Fetch targets and calculate the modules output on dataset. Output and target are in one-of-many format. The class for each sequence is determined by first summing the probabilities for each individual sample over the sequence, and then finding its maximum.
testOnSequenceData
python
pybrain/pybrain
pybrain/tools/validation.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/validation.py
BSD-3-Clause
def __init__(self, filename, newfile): """ :key newfile: is the file to be read or is it a new file? """ self.filename = filename if not newfile: self.dom = parse(filename) if self.dom.firstChild.nodeName != 'PyBrain': raise Exception('Not a correct PyBrain XML file') else: domimpl = getDOMImplementation() self.dom = domimpl.createDocument(None, 'PyBrain', None) self.root = self.dom.documentElement
:key newfile: is the file to be read or is it a new file?
__init__
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def readAttrDict(self, node, transform = None): """ read a dictionnary of attributes :key transform: optionally function transforming the attribute values on reading """ args = {} for name, val in list(node.attributes.items()): name = str(name) if transform != None: args[name] = transform(val, name) else: args[name] = val return args
read a dictionnary of attributes :key transform: optionally function transforming the attribute values on reading
readAttrDict
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def writeAttrDict(self, node, adict, transform = None): """ read a dictionnary of attributes :key transform: optionally transform the attribute values on writing """ for name, val in list(adict.items()): if val != None: if transform != None: node.setAttribute(name, transform(val, name)) else: node.setAttribute(name, val)
read a dictionnary of attributes :key transform: optionally transform the attribute values on writing
writeAttrDict
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def newChild(self, node, name): """ create a new child of node with the provided name. """ elem = self.dom.createElement(name) node.appendChild(elem) return elem
create a new child of node with the provided name.
newChild
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def getChild(self, node, name): """ get the child with the given name """ for n in node.childNodes: if name and n.nodeName == name: return n
get the child with the given name
getChild
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def findNode(self, name, index = 0, root = None): """ return the toplevel node with the provided name (if there are more, choose the index corresponding one). """ if root == None: root = self.root for n in root.childNodes: if n.nodeName == name: if index == 0: return n index -= 1 return None
return the toplevel node with the provided name (if there are more, choose the index corresponding one).
findNode
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def findNamedNode(self, name, nameattr, root = None): """ return the toplevel node with the provided name, and the fitting 'name' attribute. """ if root == None: root = self.root for n in root.childNodes: if n.nodeName == name: # modif JPQ # if 'name' in n.attributes: if n.attributes['name']: # modif JPQ # if n.attributes['name'] == nameattr: if n.attributes['name'].value == nameattr: return n return None
return the toplevel node with the provided name, and the fitting 'name' attribute.
findNamedNode
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def baseTransform(val): """ back-conversion: modules are encoded by their name and classes by the classname """ from pybrain.structure.modules.module import Module from inspect import isclass if isinstance(val, Module): return val.name elif isclass(val): return val.__name__ else: return str(val)
back-conversion: modules are encoded by their name and classes by the classname
baseTransform
python
pybrain/pybrain
pybrain/tools/customxml/handling.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/handling.py
BSD-3-Clause
def readFrom(filename, name = None, index = 0): """ append the network to an existing xml file :key name: if this parameter is specified, read the network with this name :key index: which network in the file shall be read (if there is more than one) """ r = NetworkReader(filename, newfile = False) if name: netroot = r.findNamedNode('Network', name) else: netroot = r.findNode('Network', index) return r.readNetwork(netroot)
append the network to an existing xml file :key name: if this parameter is specified, read the network with this name :key index: which network in the file shall be read (if there is more than one)
readFrom
python
pybrain/pybrain
pybrain/tools/customxml/networkreader.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/networkreader.py
BSD-3-Clause
def appendToFile(net, filename): """ append the network to an existing xml file """ w = NetworkWriter(filename, newfile = False) netroot = w.newRootNode('Network') w.writeNetwork(net, netroot) w.save()
append the network to an existing xml file
appendToFile
python
pybrain/pybrain
pybrain/tools/customxml/networkwriter.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/networkwriter.py
BSD-3-Clause
def writeToFile(net, filename): """ write the network as a new xml file """ w = NetworkWriter(filename, newfile = True) netroot = w.newRootNode('Network') w.writeNetwork(net, netroot) w.save()
write the network as a new xml file
writeToFile
python
pybrain/pybrain
pybrain/tools/customxml/networkwriter.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/networkwriter.py
BSD-3-Clause
def writeNetwork(self, net, netroot): """ write a Network into a new XML node """ netroot.setAttribute('name', net.name) netroot.setAttribute('class', canonicClassString(net)) if net.argdict: self.writeArgs(netroot, net.argdict) # the modules mods = self.newChild(netroot, 'Modules') # first write the input modules (in order) for im in net.inmodules: self.writeModule(mods, im, True, im in net.outmodules) # now the output modules (in order) for om in net.outmodules: if om not in net.inmodules: self.writeModule(mods, om, False, True) # now the rest for m in net.modulesSorted: if m not in net.inmodules and m not in net.outmodules: self.writeModule(mods, m, False, False) # the motherconnections if len(net.motherconnections) > 0: mothers = self.newChild(netroot, 'MotherConnections') for m in net.motherconnections: self.writeBuildable(mothers, m) # the connections conns = self.newChild(netroot, 'Connections') for m in net.modulesSorted: for c in net.connections[m]: self.writeConnection(conns, c, False) if hasattr(net, "recurrentConns"): for c in net.recurrentConns: self.writeConnection(conns, c, True)
write a Network into a new XML node
writeNetwork
python
pybrain/pybrain
pybrain/tools/customxml/networkwriter.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/networkwriter.py
BSD-3-Clause
def writeBuildable(self, rootnode, m): """ store the class (with path) and name in a new child. """ mname = m.__class__.__name__ mnode = self.newChild(rootnode, mname) mnode.setAttribute('name', m.name) mnode.setAttribute('class', canonicClassString(m)) if m.argdict: self.writeArgs(mnode, m.argdict) if m.paramdim > 0 and not isinstance(m, SharedConnection): self.writeParams(mnode, m.params) return mnode
store the class (with path) and name in a new child.
writeBuildable
python
pybrain/pybrain
pybrain/tools/customxml/networkwriter.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/customxml/networkwriter.py
BSD-3-Clause
def makeMnistDataSets(path): """Return a pair consisting of two datasets, the first being the training and the second being the test dataset.""" test = SupervisedDataSet(28 * 28, 10) test_image_file = os.path.join(path, 't10k-images-idx3-ubyte') test_label_file = os.path.join(path, 't10k-labels-idx1-ubyte') test_images = images(test_image_file) test_labels = (flaggedArrayByIndex(l, 10) for l in labels(test_label_file)) for image, label in zip(test_images, test_labels): test.addSample(image, label) train = SupervisedDataSet(28 * 28, 10) train_image_file = os.path.join(path, 'train-images-idx3-ubyte') train_label_file = os.path.join(path, 'train-labels-idx1-ubyte') train_images = images(train_image_file) train_labels = (flaggedArrayByIndex(l, 10) for l in labels(train_label_file)) for image, label in zip(train_images, train_labels): train.addSample(image, label) return train, test
Return a pair consisting of two datasets, the first being the training and the second being the test dataset.
makeMnistDataSets
python
pybrain/pybrain
pybrain/tools/datasets/mnist.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/datasets/mnist.py
BSD-3-Clause
def plot_module_classification_sequence_performance(module, dataset, sequence_index, bounds=(0, 1)): """Plot all outputs and fill the value of the output of the correct category. The grapth of a good classifier should be like all white, with all other values very low. A graph with lot of black is a bad sign. :param module: The module/network to plot. :type module: pybrain.structure.modules.module.Module :param dataset: Training dataset used as inputs and expected outputs. :type dataset: SequenceClassificationDataSet :param sequence_index: Sequence index to plot in the dataset. :type sequence_index: int :param bounds: Outputs lower and upper bound. :type bounds: list """ outputs = [] valid_output = [] module.reset() for sample in dataset.getSequenceIterator(sequence_index): out = module.activate(sample[0]) outputs.append(out) valid_output.append(out[sample[1].argmax()]) plt.fill_between(list(range(len(valid_output))), 1, valid_output, facecolor='k', alpha=0.8) plt.plot(outputs, linewidth=4, alpha=0.7) plt.yticks(bounds)
Plot all outputs and fill the value of the output of the correct category. The grapth of a good classifier should be like all white, with all other values very low. A graph with lot of black is a bad sign. :param module: The module/network to plot. :type module: pybrain.structure.modules.module.Module :param dataset: Training dataset used as inputs and expected outputs. :type dataset: SequenceClassificationDataSet :param sequence_index: Sequence index to plot in the dataset. :type sequence_index: int :param bounds: Outputs lower and upper bound. :type bounds: list
plot_module_classification_sequence_performance
python
pybrain/pybrain
pybrain/tools/plotting/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/classification.py
BSD-3-Clause
def plot_module_classification_dataset_performance(module, dataset, cols=4, bounds=(0, 1)): """Do a plot_module_classification_sequence_performance() for all sequences in the dataset. :param module: The module/network to plot. :type module: pybrain.structure.modules.module.Module :param dataset: Training dataset used as inputs and expected outputs. :type dataset: SequenceClassificationDataSet :param bounds: Outputs lower and upper bound. :type bounds: list """ # Outputs and detected category error for each sequence. for i in range(dataset.getNumSequences()): plt.subplot(ceil(dataset.getNumSequences() / float(cols)), cols, i) ClassificationDataSetPlot.plot_module_classification_sequence_performance(module, dataset, i, bounds)
Do a plot_module_classification_sequence_performance() for all sequences in the dataset. :param module: The module/network to plot. :type module: pybrain.structure.modules.module.Module :param dataset: Training dataset used as inputs and expected outputs. :type dataset: SequenceClassificationDataSet :param bounds: Outputs lower and upper bound. :type bounds: list
plot_module_classification_dataset_performance
python
pybrain/pybrain
pybrain/tools/plotting/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/classification.py
BSD-3-Clause
def punchcard_module_classification_performance(module, dataset, s=800): """Punshcard-like clasification performances.__add__( Actual dataset target vs. estimated target by the module. The graph of a good classfier module should a have no red dots visible: - Red Dots: Target (only visible if the black dot doesn't cover it). - Green Dots: Estimated classes confidences (size = outputs means). - Black Dots: Single winnter-takes-all estimated target. :param module: An object that has at least reset() and activate() methods. :param dataset: A classification dataset. It should, for any given sequence, have a constant target. :type dataset: ClassificationDataSet """ # TODO: Could also show the variation for each dot # (e.g., vertical errorbar of 2*stddev). # TODO: Could keep together all sequences of a given class and somehow # arrange them closer togther. Could then aggregate them and # include horizontal errorbar. def calculate_module_output_mean(module, inputs): """Returns the mean of the module's outputs for a given input list.""" outputs = np.zeros(module.outdim) module.reset() for inpt in inputs: outputs += module.activate(inpt) return outputs / len(inputs) num_sequences = dataset.getNumSequences() actual = [] expected = [] confidence_x = [] confidence_s = [] correct = 0 for seq_i in range(num_sequences): seq = dataset.getSequence(seq_i) outputs_mean = calculate_module_output_mean(module, seq[0]) actual.append(np.argmax(outputs_mean)) confidence_s.append(np.array(outputs_mean)) confidence_x.append(np.ones(module.outdim) * seq_i) # FIXME: np.argmax(seq[1]) == dataset.getSequenceClass(seq_i) is bugged for split SequenceClassificationDataSet. expected.append(np.argmax(seq[1])) if actual[-1] == expected[-1]: correct += 1 plt.title('{}% Correct Classification (red dots mean bad classification)'.format(correct * 100 / num_sequences)) plt.xlabel('Sequence') plt.ylabel('Class') plt.scatter(list(range(num_sequences)), expected, s=s, c='r', linewidths=0) plt.scatter(list(range(num_sequences)), actual, s=s, c='k') plt.scatter(confidence_x, list(range(module.outdim)) * num_sequences, s=s*np.array(confidence_s), c='g', linewidths=0, alpha=0.66) plt.yticks(list(range(dataset.nClasses)), dataset.class_labels)
Punshcard-like clasification performances.__add__( Actual dataset target vs. estimated target by the module. The graph of a good classfier module should a have no red dots visible: - Red Dots: Target (only visible if the black dot doesn't cover it). - Green Dots: Estimated classes confidences (size = outputs means). - Black Dots: Single winnter-takes-all estimated target. :param module: An object that has at least reset() and activate() methods. :param dataset: A classification dataset. It should, for any given sequence, have a constant target. :type dataset: ClassificationDataSet
punchcard_module_classification_performance
python
pybrain/pybrain
pybrain/tools/plotting/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/classification.py
BSD-3-Clause
def calculate_module_output_mean(module, inputs): """Returns the mean of the module's outputs for a given input list.""" outputs = np.zeros(module.outdim) module.reset() for inpt in inputs: outputs += module.activate(inpt) return outputs / len(inputs)
Returns the mean of the module's outputs for a given input list.
calculate_module_output_mean
python
pybrain/pybrain
pybrain/tools/plotting/classification.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/classification.py
BSD-3-Clause
def __init__(self, mat, cmap=None, pixelspervalue=20, minvalue=None, maxvalue=None): """ Make a colormap image of a matrix :key mat: the matrix to be used for the colormap. """ if minvalue == None: minvalue = amin(mat) if maxvalue == None: maxvalue = amax(mat) if not cmap: cmap = cm.hot figsize = (array(mat.shape) / 100. * pixelspervalue)[::-1] self.fig = figure(figsize=figsize) axes([0, 0, 1, 1]) # Make the plot occupy the whole canvas axis('off') self.fig.set_size_inches(figsize) imshow(mat, cmap=cmap, clim=(minvalue, maxvalue), interpolation='nearest')
Make a colormap image of a matrix :key mat: the matrix to be used for the colormap.
__init__
python
pybrain/pybrain
pybrain/tools/plotting/colormaps.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/colormaps.py
BSD-3-Clause
def __init__(self, f, xmin= -1, xmax=1, ymin= -1, ymax=1, precision=50, newfig=True, colorbar=False, cblabel=None): """ :key precision: how many steps along every dimension """ if isinstance(f, FunctionEnvironment): assert f.xdim == 2 self.f = lambda x, y: f(array([x, y])) elif isclass(f) and issubclass(f, FunctionEnvironment): tmp = f(2) self.f = lambda x, y: tmp(array([x, y])) else: self.f = f self.precision = precision self.colorbar = colorbar self.cblabel = cblabel self.xs = r_[xmin:xmax:self.precision * 1j] self.ys = r_[ymin:ymax:self.precision * 1j] self.zs = self._generateValMap() if newfig: self.fig = figure()
:key precision: how many steps along every dimension
__init__
python
pybrain/pybrain
pybrain/tools/plotting/fitnesslandscapes.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/fitnesslandscapes.py
BSD-3-Clause
def _generateValMap(self): """ generate the function fitness values for the current grid of x and y """ vals = zeros((len(self.xs), len(self.ys))) for i, x in enumerate(self.xs): for j, y in enumerate(self.ys): vals[j, i] = self.f(x, y) return vals
generate the function fitness values for the current grid of x and y
_generateValMap
python
pybrain/pybrain
pybrain/tools/plotting/fitnesslandscapes.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/fitnesslandscapes.py
BSD-3-Clause
def plotAll(self, levels=50, popup=True): """ :key levels: how many fitness levels should be drawn.""" tmp = contour(self.xs, self.ys, self.zs, levels) if self.colorbar: cb = colorbar(tmp) if self.cblabel != None: cb.set_label(self.cblabel) if popup: show()
:key levels: how many fitness levels should be drawn.
plotAll
python
pybrain/pybrain
pybrain/tools/plotting/fitnesslandscapes.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/fitnesslandscapes.py
BSD-3-Clause
def addSamples(self, samples, rescale=True, color=''): """plot some sample points on the fitness landscape. :key rescale: should the plotting ranges be adjusted? """ # split samples into x and y sx = zeros(len(samples)) sy = zeros(len(samples)) for i, s in enumerate(samples): sx[i] = s[0] sy[i] = s[1] if rescale: self._rescale(min(sx), max(sx), min(sy), max(sy)) plot(sx, sy, color + '+')
plot some sample points on the fitness landscape. :key rescale: should the plotting ranges be adjusted?
addSamples
python
pybrain/pybrain
pybrain/tools/plotting/fitnesslandscapes.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/fitnesslandscapes.py
BSD-3-Clause
def plotFitnessProgession(fitdict, batchsize=1, semilog=True, targetcutoff=1e-10, minimize=True, title=None, verbose=True, varyplotsymbols=False, averageOverEvaluations=True, onlysuccessful=False, useMedian=False, resolution=1000): """ Plot multiple fitness curves on a single figure, with the following customizations: :arg fitdict: a dictionary mapping a name to a list of fitness-arrays :key batchsize: the number of evaluations between two points in fitness-arrays specific batch sizes can also be given given in fitdict :key targetcutoff: this gives the cutoff point at the best fitness :key averageOverEvaluations: averaging is done over fitnesses (for a given number of evaluations) or over evaluations required to reach a certain fitness. :key resolution: resolution when averaging over evaluations :key onlysuccessful: consider only successful runs :key title: specify a title. :key varyplotsymbols: used different line types for each curve. """ def isSuccessful(l): """ criterion for successful run """ if targetcutoff == None: return True elif minimize: return min(l) <= targetcutoff else: return max(l) >= targetcutoff def paddedClipped(l, maxLen): assert len(l) <= maxLen res = zeros(maxLen) if targetcutoff == None: res[:len(l)] += l elif minimize: res[:len(l)] += l.clip(min=targetcutoff, max=1e100) else: res[:len(l)] += l.clip(max=targetcutoff, min= -1e100) return res def relevantPart(l): """ the part of the vector that's above the cutoff. """ if targetcutoff != None: for i, val in enumerate(l): if minimize and val <= targetcutoff: return l[:i + 1] elif not minimize and val >= targetcutoff: return l[:i + 1] return l i = 0 for name, flist in sorted(fitdict.items()): if isinstance(flist, tuple): batchsize = flist[1] flist = flist[0] i += 1 nbRuns = len(flist) print((name, nbRuns, 'runs',)) if targetcutoff != None: if onlysuccessful: # filter out unsuccessful runs flist = list(filter(isSuccessful, flist)) print((',', len(flist), 'of which were successful.')) else: print() # cut off irrelevant part flist = list(map(relevantPart, flist)) if len(flist) == 0: continue if averageOverEvaluations: worstPerf = max(list(map(max, flist))) if semilog: yPlot = list(reversed(power(10, ((array(list(range(resolution + 1))) / float(resolution)) * (log10(worstPerf) - log10(targetcutoff)) + log10(targetcutoff))))) else: yPlot = list(reversed((array(list(range(resolution + 1))) / float(resolution)) * (worstPerf - targetcutoff) + targetcutoff)) xPlot = avgFoundAfter(yPlot, flist, batchsize, useMedian=useMedian) else: longestRun = max(list(map(len, flist))) xPlot = array(list(range(longestRun))) * batchsize summed = zeros(longestRun) for l in flist: summed += paddedClipped(l, longestRun) yPlot = paddedClipped(summed / len(flist), longestRun) if semilog: semilogy() if varyplotsymbols: psymbol = plotsymbols[i % len(plotsymbols)] else: psymbol = '-' plot(xPlot, yPlot, psymbol, label=name) ylabel('-fitness') xlabel('number of evaluations') pylab.title(title) legend()
Plot multiple fitness curves on a single figure, with the following customizations: :arg fitdict: a dictionary mapping a name to a list of fitness-arrays :key batchsize: the number of evaluations between two points in fitness-arrays specific batch sizes can also be given given in fitdict :key targetcutoff: this gives the cutoff point at the best fitness :key averageOverEvaluations: averaging is done over fitnesses (for a given number of evaluations) or over evaluations required to reach a certain fitness. :key resolution: resolution when averaging over evaluations :key onlysuccessful: consider only successful runs :key title: specify a title. :key varyplotsymbols: used different line types for each curve.
plotFitnessProgession
python
pybrain/pybrain
pybrain/tools/plotting/fitnessprogression.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/fitnessprogression.py
BSD-3-Clause
def relevantPart(l): """ the part of the vector that's above the cutoff. """ if targetcutoff != None: for i, val in enumerate(l): if minimize and val <= targetcutoff: return l[:i + 1] elif not minimize and val >= targetcutoff: return l[:i + 1] return l
the part of the vector that's above the cutoff.
relevantPart
python
pybrain/pybrain
pybrain/tools/plotting/fitnessprogression.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/fitnessprogression.py
BSD-3-Clause
def __init__(self, maxLines=1, autoscale=0.0, **kwargs): """ :key maxLines: Number of Plots to draw and so max ID. :key autoscale: If set to a factor > 1, axes are automatically expanded whenever out-range data points are added :var indexList: The x-component of the data points :var DataList: The y-component of the data points""" self.indexList = [] self.dataList = [] self.Lines = [] self.autoscale = autoscale clf() self.Axes = axes(**kwargs) self.nbLines = 0 self.defaultLineStyle = {} self._checkMaxId(maxLines - 1) self.replot = True # is the plot still current? self.currentID = None self.offset = 0 # external references to IDs are modified by this
:key maxLines: Number of Plots to draw and so max ID. :key autoscale: If set to a factor > 1, axes are automatically expanded whenever out-range data points are added :var indexList: The x-component of the data points :var DataList: The y-component of the data points
__init__
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def _checkMaxId(self, id): """ Appends additional lines as necessary :key id: Lines up to this id are added automatically """ if id >= self.nbLines: for i in range(self.nbLines, id + 1): # create a new line with corresponding x/y data, and attach it to the plot l = Line2D([], [], color=self.graphColor[i % 9], **self.defaultLineStyle) self.Lines.append(l) self.Axes.add_line(l) self.indexList.append([]) self.dataList.append([]) self.nbLines = id + 1
Appends additional lines as necessary :key id: Lines up to this id are added automatically
_checkMaxId
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def addData(self, id0, x, y): """ The given data point or points is appended to the given line. :key id0: The plot ID (counted from 0) the data point(s) belong to. :key x: The x-component of the data point(s) :key y: The y-component of the data point(s)""" id = id0 + self.offset if not (isinstance(x, list) | isinstance(x, tuple)): self._checkMaxId(id) self.indexList[id].append(x) self.dataList[id].append(y) self.currentID = id else: for i, xi in enumerate(x): self.addData(id0, xi, y[i]) self.replot = True
The given data point or points is appended to the given line. :key id0: The plot ID (counted from 0) the data point(s) belong to. :key x: The x-component of the data point(s) :key y: The y-component of the data point(s)
addData
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def setData(self, id0, x, y): """ Data series id0 is replaced by the given lists :key id0: The plot ID (counted from 0) the data point(s) belong to. :key x: The x-component of the data points :key y: The y-component of the data points""" id = id0 + self.offset self._checkMaxId(id) self.indexList[id] = x self.dataList[id] = y self.replot = True
Data series id0 is replaced by the given lists :key id0: The plot ID (counted from 0) the data point(s) belong to. :key x: The x-component of the data points :key y: The y-component of the data points
setData
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def saveData(self, filename): """ Writes the data series for all points to a file :key filename: The name of the output file """ file = open(filename, "w") for i in range(self.nbLines): datLen = len(self.indexList[i]) for j in range(datLen): file.write(repr(self.indexList[i][j]) + "\n") file.write(repr(self.dataList[i][j]) + "\n") file.close()
Writes the data series for all points to a file :key filename: The name of the output file
saveData
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def setLineStyle(self, id=None, **kwargs): """ hand parameters to the specified line(s), and set them as default for new lines :key id: The line or lines (list!) to be modified - defaults to last one added """ if id is None: id = self.currentID if isinstance(id, list) | isinstance(id, tuple): # apply to specified list of lines self._checkMaxId(max(id) + self.offset) for i in id: self.Lines[i + self.offset].set(**kwargs) elif id >= 0: # apply to selected line self._checkMaxId(id + self.offset) self.Lines[id + self.offset].set(**kwargs) else: # apply to all lines for l in self.Lines: l.set(**kwargs) # set as new default linestyle if 'color' in kwargs: kwargs.popitem('color') self.defaultLineStyle = kwargs
hand parameters to the specified line(s), and set them as default for new lines :key id: The line or lines (list!) to be modified - defaults to last one added
setLineStyle
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def update(self): """ Updates the current plot, if necessary """ if not self.replot: return xr = list(self.Axes.get_xlim()) yr = list(self.Axes.get_ylim()) for i in range(self.nbLines): self.Lines[i].set_data(self.indexList[i], self.dataList[i]) if self.autoscale > 1.0: if self.indexList[i][0] < xr[0]: xr[0] = self.indexList[i][0] ymn = min(self.dataList[i]) if ymn < yr[0]: yr[0] = ymn while self.indexList[i][-1] > xr[1]: xr[1] = (xr[1] - xr[0]) * self.autoscale + xr[0] ymx = max(self.dataList[i]) while ymx > yr[1]: yr[1] = (yr[1] - yr[0]) * self.autoscale + yr[0] if self.autoscale > 1.0: self.Axes.set_xlim(tuple(xr)) self.Axes.set_ylim(tuple(yr)) #self.Axes.draw() #pylab.show() draw_if_interactive() self.replot = False
Updates the current plot, if necessary
update
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def show(self, xLabel='', yLabel='', Title='', popup=False, imgfile=None): """ Plots the data internally and saves an image of it to the plotting directory. :key title: The title of the plot. :key xLable: The label for the x-axis :key yLable: The label for the y-axis :key popup: also produce a popup window with the image?""" clf() for i in range(self.nbLines): plot(self.indexList[i], self.dataList[i]) xlabel(xLabel) ylabel(yLabel) title(Title) if imgfile == None: imgfile = imp.find_module('pybrain')[1] + "/tools/plotting/plot.png" savefig(imgfile) if popup: ioff() show()
Plots the data internally and saves an image of it to the plotting directory. :key title: The title of the plot. :key xLable: The label for the x-axis :key yLable: The label for the y-axis :key popup: also produce a popup window with the image?
show
python
pybrain/pybrain
pybrain/tools/plotting/multiline.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/multiline.py
BSD-3-Clause
def plotVariations(datalist, titles, genFun, varyperplot=None, prePlotFun=None, postPlotFun=None, _differentiator=0.0, **optionlists): """ A tool for quickly generating a lot of variations of a plot. Generates a number of figures from a list of data (and titles). For each data item it produces one or more figures, each with one or more plots, while varying all options in optionlists (trying all combinations). :arg genFun: is the function that generates the curve to be plotted, for each set of options. :key varyperplot: determines which options are varied within a figure. :key prePlotFun: is called before the plots of a figure :key postPlotFun: is called after the plots of a figure (e.g. for realigning axes). """ odl = subDict(optionlists, varyperplot, False) fdl = subDict(optionlists, varyperplot, True) # title contains file and non-varying parameters titadd1 = ''.join([k+'='+str(vs[0])[:min(5, len(str(vs[0])))]+' ' for k,vs in list(odl.items()) if len(vs) == 1]) for x, tit in zip(datalist, titles): for figdict in sorted(dictCombinations(fdl.copy())): pylab.figure() # it also contains the parameters that don't vary per figure titadd2 = ''.join([k+'='+str(v)[:min(5, len(str(v)))]+' ' for k,v in list(figdict.items())]) pylab.title(tit+'\n'+titadd1+titadd2) # code initializing the plot if prePlotFun is not None: prePlotFun(x) for i, odict in enumerate(sorted(dictCombinations(odl.copy()))): # concise labels lab = ''.join([k[:3]+'='+str(v)[:min(5, len(str(v)))]+'-' for k,v in list(odict.items()) if len(odl[k]) > 1]) if len(lab) > 0: lab = lab[:-1] # remove trailing '-' else: lab = None generated = genFun(x, **dict(odict, **figdict)) if generated is not None: if len(generated) == 2: xs, ys = generated else: ys = generated xs = list(range(len(ys))) # the differentiator can slightly move the curves to be able to tell them apart if they overlap if _differentiator != 0.0: ys = generated+_differentiator*i pylab.plot(xs, ys, label=lab) if postPlotFun is not None: postPlotFun(tit) # a legend is only necessary, if there are multiple plots if lab is not None: pylab.legend()
A tool for quickly generating a lot of variations of a plot. Generates a number of figures from a list of data (and titles). For each data item it produces one or more figures, each with one or more plots, while varying all options in optionlists (trying all combinations). :arg genFun: is the function that generates the curve to be plotted, for each set of options. :key varyperplot: determines which options are varied within a figure. :key prePlotFun: is called before the plots of a figure :key postPlotFun: is called after the plots of a figure (e.g. for realigning axes).
plotVariations
python
pybrain/pybrain
pybrain/tools/plotting/quickvariations.py
https://github.com/pybrain/pybrain/blob/master/pybrain/tools/plotting/quickvariations.py
BSD-3-Clause
def iterRbms(self): """Yield every two layers as an rbm.""" layers = [i for i in self.net.modulesSorted if isinstance(i, NeuronLayer) and not isinstance(i, BiasUnit)] # There will be a single bias. bias = [i for i in self.net.modulesSorted if isinstance(i, BiasUnit)][0] layercons = (self.net.connections[i][0] for i in layers) # The biascons will not be sorted; we have to sort them to zip nicely # with the corresponding layers. biascons = self.net.connections[bias] biascons.sort(key=lambda c: layers.index(c.outmod)) modules = list(zip(layers, layers[1:], layercons, biascons)) for visible, hidden, layercon, biascon in modules: rbm = Rbm.fromModules(visible, hidden, bias, layercon, biascon) yield rbm
Yield every two layers as an rbm.
iterRbms
python
pybrain/pybrain
pybrain/unsupervised/trainers/deepbelief.py
https://github.com/pybrain/pybrain/blob/master/pybrain/unsupervised/trainers/deepbelief.py
BSD-3-Clause
def trainOnDataset(self, dataset): """This function trains the RBM using the same algorithm and implementation presented in: http://www.cs.toronto.edu/~hinton/MatlabForSciencePaper.html""" cfg = self.cfg for rows in dataset.randomBatches(self.datasetField, cfg.batchSize): olduw, olduhb, olduvb = \ zeros((self.rbm.visibleDim, self.rbm.hiddenDim)), \ zeros(self.rbm.hiddenDim), zeros(self.rbm.visibleDim) for t in range(cfg.maxIter): #print("*** Iteration %2d **************************************" % t) params = self.rbm.params params = params.reshape((self.rbm.visibleDim, self.rbm.hiddenDim)) biasParams = self.rbm.biasParams mm = cfg.iniMm if t < cfg.mmSwitchIter else cfg.finMm w, hb, vb = self.calcUpdateByRows(rows) #print("Delta: ") #print("Weight: ",) #print(w) #print("Visible bias: ",) #print(vb) #print("Hidden bias: ",) #print(hb) #print("") olduw = uw = olduw * mm + \ cfg.rWeights * (w - cfg.weightCost * params) olduhb = uhb = olduhb * mm + cfg.rHidBias * hb olduvb = uvb = olduvb * mm + cfg.rVisBias * vb #print("Delta after momentum: ") #print("Weight: ",) #print(uw) #print("Visible bias: ",) #print(uvb) #print("Hidden bias: ",) #print(uhb) #print("") # update the parameters of the original rbm params += uw biasParams += uhb # Create a new inverted rbm with correct parameters invBiasParams = self.invRbm.biasParams invBiasParams += uvb self.invRbm = self.rbm.invert() self.invRbm.biasParams[:] = invBiasParams #print("Updated ") #print("Weight: ",) #print(self.rbm.connections[self.rbm['visible']][0].params.reshape( \) # (self.rbm.indim, self.rbm.outdim)) #print("Visible bias: ",) #print(self.invRbm.connections[self.invRbm['bias']][0].params) #print("Hidden bias: ",) #print(self.rbm.connections[self.rbm['bias']][0].params) #print("")
This function trains the RBM using the same algorithm and implementation presented in: http://www.cs.toronto.edu/~hinton/MatlabForSciencePaper.html
trainOnDataset
python
pybrain/pybrain
pybrain/unsupervised/trainers/rbm.py
https://github.com/pybrain/pybrain/blob/master/pybrain/unsupervised/trainers/rbm.py
BSD-3-Clause
def calcUpdateByRow(self, row): """This function trains the RBM using only one data row. Return a 3-tuple consiting of updates for (weightmatrix, hidden bias weights, visible bias weights).""" # a) positive phase poshp = self.rbm.activate(row) # compute the posterior probability pos = outer(row, poshp) # fraction from the positive phase poshb = poshp posvb = row # b) the sampling & reconstruction sampled = self.sampler(poshp) recon = self.invRbm.activate(sampled) # the re-construction of data # c) negative phase neghp = self.rbm.activate(recon) neg = outer(recon, neghp) neghb = neghp negvb = recon # compute the raw delta # !!! note that this delta is only the 'theoretical' delta return self.updater(pos, neg, poshb, neghb, posvb, negvb)
This function trains the RBM using only one data row. Return a 3-tuple consiting of updates for (weightmatrix, hidden bias weights, visible bias weights).
calcUpdateByRow
python
pybrain/pybrain
pybrain/unsupervised/trainers/rbm.py
https://github.com/pybrain/pybrain/blob/master/pybrain/unsupervised/trainers/rbm.py
BSD-3-Clause
def calcUpdateByRows(self, rows): """Return a 3-tuple constisting of update for (weightmatrix, hidden bias weights, visible bias weights).""" delta_w, delta_hb, delta_vb = \ zeros((self.rbm.visibleDim, self.rbm.hiddenDim)), \ zeros(self.rbm.hiddenDim), zeros(self.rbm.visibleDim) for row in rows: dw, dhb, dvb = self.calcUpdateByRow(row) delta_w += dw delta_hb += dhb delta_vb += dvb delta_w /= len(rows) delta_hb /= len(rows) delta_vb /= len(rows) # !!! note that this delta is only the 'theoretical' delta return delta_w, delta_hb, delta_vb
Return a 3-tuple constisting of update for (weightmatrix, hidden bias weights, visible bias weights).
calcUpdateByRows
python
pybrain/pybrain
pybrain/unsupervised/trainers/rbm.py
https://github.com/pybrain/pybrain/blob/master/pybrain/unsupervised/trainers/rbm.py
BSD-3-Clause
def __init__(self, layer, num_layers, encoder_type=None): ''' @param layer: one encoder layer, i.e., self-attention layer @param num_layers: number of self-attention layers @param encoder_type: type differentiation ''' super(Encoder, self).__init__() self.encoder_type = encoder_type self.layers = make_clones(layer, num_layers) if 'AllRank' == encoder_type: self.norm = LayerNorm(layer.hid_dim)
@param layer: one encoder layer, i.e., self-attention layer @param num_layers: number of self-attention layers @param encoder_type: type differentiation
__init__
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def forward(self, x): ''' Forward pass through the encoder block. @param x: input with a shape of [batch_size, ranking_size, num_features] @return: ''' for layer in self.layers: x = layer(x) if 'AllRank' == self.encoder_type: return self.norm(x) elif self.encoder_type in ['AttnDIN', 'DASALC']: return x else: raise NotImplementedError
Forward pass through the encoder block. @param x: input with a shape of [batch_size, ranking_size, num_features] @return:
forward
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def __init__(self, hid_dim, encoder_type=None, dropout=None): ''' @param hid_dim: number of input/output features @param dropout: dropout probability ''' super(SublayerConnection, self).__init__() self.encoder_type = encoder_type self.norm = LayerNorm(hid_dim=hid_dim) if 'AllRank' == encoder_type: self.dropout = nn.Dropout(dropout)
@param hid_dim: number of input/output features @param dropout: dropout probability
__init__
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def forward(self, x, sublayer): ''' Foward pass through the sublayer connection module, applying the residual connection to any sublayer with the same size. @param x: input with a shape of [batch_size, ranking_size, num_features] @param sublayer: the layer through which to pass the input prior to applying the sum @return: output with a shape of [batch_size, ranking_size, num_features] ''' if 'AllRank' == self.encoder_type: return x + self.dropout(sublayer(self.norm(x))) elif 'DASALC' == self.encoder_type: # residual is not clearly mentioned, which is also not specified in Figure 1 of the paper return self.norm(sublayer(x)) elif 'AttnDIN' == self.encoder_type: return self.norm(x + sublayer(x)) else: raise NotImplementedError
Foward pass through the sublayer connection module, applying the residual connection to any sublayer with the same size. @param x: input with a shape of [batch_size, ranking_size, num_features] @param sublayer: the layer through which to pass the input prior to applying the sum @return: output with a shape of [batch_size, ranking_size, num_features]
forward
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def __init__(self, hid_dim, eps=1e-6): ''' @param hid_dim: shape of normalised features @param eps: epsilon for standard deviation ''' super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(hid_dim)) self.b_2 = nn.Parameter(torch.zeros(hid_dim)) self.eps = eps
@param hid_dim: shape of normalised features @param eps: epsilon for standard deviation
__init__
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT
def forward(self, x): ''' Forward pass through the layer normalization @param x: input shape, i.e., [batch_size, ranking_size, num_features] @return: normalized input with a shape of [batch_size, ranking_size, num_features] ''' mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
Forward pass through the layer normalization @param x: input shape, i.e., [batch_size, ranking_size, num_features] @return: normalized input with a shape of [batch_size, ranking_size, num_features]
forward
python
wildltr/ptranking
ptranking/base/list_ranker.py
https://github.com/wildltr/ptranking/blob/master/ptranking/base/list_ranker.py
MIT