code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def step(self): """ integrate state using simple rectangle rule """ thrust = float(self.action[0]) rudder = float(self.action[1]) h, hdot, v = self.sensors rnd = random.normal(0, 1.0, size=3) thrust = min(max(thrust, -1), +2) rudder = min(max(rudder, -90), +90) drag = 5 * h + (rudder ** 2 + rnd[0]) force = 30.0 * thrust - 2.0 * v - 0.02 * v * drag + rnd[1] * 3.0 v = v + self.dt * force / self.mass v = min(max(v, -10), +40) torque = -v * (rudder + h + 1.0 * hdot + rnd[2] * 10.) last_hdot = hdot hdot += torque / self.I hdot = min(max(hdot, -180), 180) h += (hdot + last_hdot) / 2.0 if h > 180.: h -= 360. elif h < -180.: h += 360. self.sensors = (h, hdot, v)
integrate state using simple rectangle rule
step
python
pybrain/pybrain
pybrain/rl/environments/shipsteer/shipsteer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/shipsteer/shipsteer.py
BSD-3-Clause
def reset(self): """ re-initializes the environment, setting the ship to rest at a random orientation. """ # [h, hdot, v] self.sensors = [random.uniform(-30., 30.), 0.0, 0.0] if self.render: if self.server.clients > 0: # If there are clients send them reset signal self.server.send(["r", "r", "r"])
re-initializes the environment, setting the ship to rest at a random orientation.
reset
python
pybrain/pybrain
pybrain/rl/environments/shipsteer/shipsteer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/shipsteer/shipsteer.py
BSD-3-Clause
def performAction(self, action): """ stores the desired action for the next time step. """ self.action = action self.step() if self.render: if self.updateDone: self.updateRenderer() if self.server.clients > 0: sleep(0.2)
stores the desired action for the next time step.
performAction
python
pybrain/pybrain
pybrain/rl/environments/shipsteer/shipsteer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/shipsteer/shipsteer.py
BSD-3-Clause
def drawScene(self): ''' This methode describes the complete scene.''' # clear the buffer if self.zDis < 10: self.zDis += 0.25 if self.lastz > 100: self.lastz -= self.zDis glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glLoadIdentity() # Point of view glRotatef(self.lastx, 0.0, 1.0, 0.0) glRotatef(self.lasty, 1.0, 0.0, 0.0) #glRotatef(15, 0.0, 0.0, 1.0) # direction of view is aimed to the center of gravity of the cube glTranslatef(-self.centerOfGrav[0], -self.centerOfGrav[1] - 50.0, -self.centerOfGrav[2] - self.lastz) #Objects #Massstab for lk in range(41): if float(lk - 20) / 10.0 == (lk - 20) / 10: glColor3f(0.75, 0.75, 0.75) glPushMatrix() glRotatef(90, 1, 0, 0) glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -30) quad = gluNewQuadric() gluCylinder(quad, 2, 2, 60, 4, 1) glPopMatrix() else: if float(lk - 20) / 5.0 == (lk - 20) / 5: glColor3f(0.75, 0.75, 0.75) glPushMatrix() glRotatef(90, 1, 0, 0) glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -15.0) quad = gluNewQuadric() gluCylinder(quad, 1, 1, 30, 4, 1) glPopMatrix() else: glColor3f(0.75, 0.75, 0.75) glPushMatrix() glRotatef(90, 1, 0, 0) glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -7.5) quad = gluNewQuadric() gluCylinder(quad, 0.5, 0.5, 15, 4, 1) glPopMatrix() # Floor tile = self.worldRadius / 40.0 glEnable (GL_BLEND) glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glColor3f(0.8, 0.8, 0.5) glPushMatrix() glTranslatef(0.0, -3.0, 0.0) glBegin(GL_QUADS) glNormal(0.0, 1.0, 0.0) glVertex3f(-self.worldRadius, 0.0, -self.worldRadius) glVertex3f(-self.worldRadius, 0.0, self.worldRadius) glVertex3f(self.worldRadius, 0.0, self.worldRadius) glVertex3f(self.worldRadius, 0.0, -self.worldRadius) glEnd() glPopMatrix() #Water for xF in range(40): for yF in range(40): if float(xF + yF) / 2.0 == (xF + yF) / 2: glColor4f(0.7, 0.7, 1.0, 0.5) else: glColor4f(0.9, 0.9, 1.0, 0.5) glPushMatrix() glTranslatef(0.0, -0.03, 0.0) glBegin(GL_QUADS) glNormal(0.5 + sin(float(xF) + float(self.step) / 4.0) * 0.5, 0.5 + cos(float(xF) + float(self.step) / 4.0) * 0.5, 0.0) for i in range(2): for k in range(2): glVertex3f((i + xF - 20) * tile, sin(float(xF + i) + float(self.step) / 4.0) * 3.0, ((k ^ i) + yF - 20) * tile) glEnd() glPopMatrix() self.ship() # swap the buffer glutSwapBuffers()
This methode describes the complete scene.
drawScene
python
pybrain/pybrain
pybrain/rl/environments/shipsteer/viewer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/shipsteer/viewer.py
BSD-3-Clause
def __init__(self, size, suicideenabled=True): """ the size of the board is generally between 3 and 19. """ self.size = size self.suicideenabled = suicideenabled self.reset()
the size of the board is generally between 3 and 19.
__init__
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def _iterPos(self): """ an iterator over all the positions of the board. """ for i in range(self.size): for j in range(self.size): yield (i, j)
an iterator over all the positions of the board.
_iterPos
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def getBoardArray(self): """ an array with two boolean values per position, indicating 'white stone present' and 'black stone present' respectively. """ a = zeros(self.outdim) for i, p in enumerate(self._iterPos()): if self.b[p] == self.WHITE: a[2 * i] = 1 elif self.b[p] == self.BLACK: a[2 * i + 1] = 1 return a
an array with two boolean values per position, indicating 'white stone present' and 'black stone present' respectively.
getBoardArray
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def doMove(self, c, pos): """ the action is a (color, position) tuple, for the next stone to move. returns True if the move was legal. """ self.movesDone += 1 if pos == 'resign': self.winner = -c return True elif not self.isLegal(c, pos): return False elif self._suicide(c, pos): assert self.suicideenabled self.b[pos] = 'y' self.winner = -c return True elif self._capture(c, pos): self.winner = c self.b[pos] = 'x' return True else: self._setStone(c, pos) return True
the action is a (color, position) tuple, for the next stone to move. returns True if the move was legal.
doMove
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def _setStone(self, c, pos): """ set stone, and update liberties and groups. """ self.b[pos] = c merge = False self.groups[pos] = self.size * pos[0] + pos[1] freen = [n for n in self._neighbors(pos) if self.b[n] == self.EMPTY] self.liberties[self.groups[pos]] = set(freen) for n in self._neighbors(pos): if self.b[n] == -c: self.liberties[self.groups[n]].difference_update([pos]) elif self.b[n] == c: if merge: newg = self.groups[pos] oldg = self.groups[n] if newg == oldg: self.liberties[newg].difference_update([pos]) else: # merging 2 groups for p in list(self.groups.keys()): if self.groups[p] == oldg: self.groups[p] = newg self.liberties[newg].update(self.liberties[oldg]) self.liberties[newg].difference_update([pos]) del self.liberties[oldg] else: # connect to this group del self.liberties[self.groups[pos]] self.groups[pos] = self.groups[n] self.liberties[self.groups[n]].update(freen) self.liberties[self.groups[n]].difference_update([pos]) merge = True
set stone, and update liberties and groups.
_setStone
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def _suicide(self, c, pos): """ would putting a stone here be suicide for c? """ # any free neighbors? for n in self._neighbors(pos): if self.b[n] == self.EMPTY: return False # any friendly neighbor with extra liberties? for n in self._neighbors(pos): if self.b[n] == c: if len(self.liberties[self.groups[n]]) > 1: return False # capture all surrounding ennemies? if self._capture(c, pos): return False return True
would putting a stone here be suicide for c?
_suicide
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def _capture(self, c, pos): """ would putting a stone here lead to a capture? """ for n in self._neighbors(pos): if self.b[n] == -c: if len(self.liberties[self.groups[n]]) == 1: return True return False
would putting a stone here lead to a capture?
_capture
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def getLiberties(self, pos): """ how many liberties does the stone at pos have? """ if self.b[pos] == self.EMPTY: return None return len(self.liberties[self.groups[pos]])
how many liberties does the stone at pos have?
getLiberties
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def getGroupSize(self, pos): """ what size is the worm that this stone is part of? """ if self.b[pos] == self.EMPTY: return None g = self.groups[pos] return len([x for x in list(self.groups.values()) if x == g])
what size is the worm that this stone is part of?
getGroupSize
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def randomBoard(self, nbmoves): """ produce a random, undecided and legal capture-game board, after at most nbmoves. :return: the number of moves actually done. """ c = self.BLACK self.reset() for i in range(nbmoves): l = set(self.getAcceptable(c)) l.difference_update(self.getKilling(c)) if len(l) == 0: return i self._setStone(c, choice(list(l))) c = -c return nbmoves
produce a random, undecided and legal capture-game board, after at most nbmoves. :return: the number of moves actually done.
randomBoard
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def playToTheEnd(self, p1, p2): """ alternate playing moves between players until the game is over. """ assert p1.color == -p2.color i = 0 p1.game = self p2.game = self players = [p1, p2] while not self.gameOver(): p = players[i] self.performAction(p.getAction()) i = (i + 1) % 2
alternate playing moves between players until the game is over.
playToTheEnd
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegame.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegame.py
BSD-3-Clause
def __init__(self, size): """ the size of the board is a tuple, where each dimension must be minimum 5. """ self.size = size assert size[0] >= 5 assert size[1] >= 5 self.reset()
the size of the board is a tuple, where each dimension must be minimum 5.
__init__
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomoku.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomoku.py
BSD-3-Clause
def _iterPos(self): """ an iterator over all the positions of the board. """ for i in range(self.size[0]): for j in range(self.size[1]): yield (i, j)
an iterator over all the positions of the board.
_iterPos
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomoku.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomoku.py
BSD-3-Clause
def _fiveRow(self, color, pos): """ Is this placement the 5th in a row? """ # TODO: more efficient... for dir in [(0, 1), (1, 0), (1, 1), (1, -1)]: found = 1 for d in [-1, 1]: for i in range(1, 5): next = (pos[0] + dir[0] * i * d, pos[1] + dir[1] * i * d) if (next[0] < 0 or next[0] >= self.size[0] or next[1] < 0 or next[1] >= self.size[1] or self.b[next] != color): break else: found += 1 if found >= 5: return True return False
Is this placement the 5th in a row?
_fiveRow
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomoku.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomoku.py
BSD-3-Clause
def getBoardArray(self): """ an array with thow boolean values per position, indicating 'white stone present' and 'black stone present' respectively. """ a = zeros(self.outdim) for i, p in enumerate(self._iterPos()): if self.b[p] == self.WHITE: a[2 * i] = 1 elif self.b[p] == self.BLACK: a[2 * i + 1] = 1 return a
an array with thow boolean values per position, indicating 'white stone present' and 'black stone present' respectively.
getBoardArray
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomoku.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomoku.py
BSD-3-Clause
def doMove(self, c, pos): """ the action is a (color, position) tuple, for the next stone to move. returns True if the move was legal. """ self.movesDone += 1 if not self.isLegal(c, pos): return False elif self._fiveRow(c, pos): self.winner = c self.b[pos] = 'x' return True else: self._setStone(c, pos) if self.movesDone == self.size[0] * self.size[1]: # DRAW self.winner = self.DRAW return True
the action is a (color, position) tuple, for the next stone to move. returns True if the move was legal.
doMove
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomoku.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomoku.py
BSD-3-Clause
def playToTheEnd(self, p1, p2): """ alternate playing moves between players until the game is over. """ assert p1.color == -p2.color i = 0 p1.game = self p2.game = self players = [p1, p2] while not self.gameOver(): p = players[i] self.performAction(p.getAction()) i = (i + 1) % 2
alternate playing moves between players until the game is over.
playToTheEnd
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomoku.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomoku.py
BSD-3-Clause
def getKilling(self, c): """ return all legal positions for a color that immediately kill the opponent. """ res = GomokuGame.getKilling(self, c) for p in self.getLegals(c): k = self._killsWhich(c, p) if self.pairsTaken[c] + len(k) / 2 >= 5: res.append(p) return res
return all legal positions for a color that immediately kill the opponent.
getKilling
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/pente.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/pente.py
BSD-3-Clause
def _killsWhich(self, c, pos): """ placing a stone of color c at pos would kill which enemy stones? """ res = [] for dir in [(0, 1), (1, 0), (1, 1), (1, -1)]: for d in [-1, 1]: killcands = [] for i in [1, 2, 3]: next = (pos[0] + dir[0] * i * d, pos[1] + dir[1] * i * d) if (next[0] < 0 or next[0] >= self.size[0] or next[1] < 0 or next[1] >= self.size[1]): break if i == 3 and self.b[next] == c: res += killcands break if i != 3 and self.b[next] != -c: break killcands.append(next) return res
placing a stone of color c at pos would kill which enemy stones?
_killsWhich
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/pente.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/pente.py
BSD-3-Clause
def doMove(self, c, pos): """ the action is a (color, position) tuple, for the next stone to move. returns True if the move was legal. """ self.movesDone += 1 if not self.isLegal(c, pos): return False elif self._fiveRow(c, pos): self.winner = c self.b[pos] = 'x' return True else: tokill = self._killsWhich(c, pos) if self.pairsTaken[c] + len(tokill) / 2 >= 5: self.winner = c self.b[pos] = 'x' return True self._setStone(c, pos, tokill) if self.movesDone == (self.size[0] * self.size[1] + 2 * (self.pairsTaken[self.BLACK] + self.pairsTaken[self.WHITE])): # DRAW self.winner = self.DRAW return True
the action is a (color, position) tuple, for the next stone to move. returns True if the move was legal.
doMove
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/pente.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/pente.py
BSD-3-Clause
def _setStone(self, c, pos, tokill=None): """ set stone, and potentially kill stones. """ if tokill == None: tokill = self._killsWhich(c, pos) GomokuGame._setStone(self, c, pos) for p in tokill: self.b[p] = self.EMPTY self.pairsTaken[c] += len(tokill) // 2
set stone, and potentially kill stones.
_setStone
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/pente.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/pente.py
BSD-3-Clause
def getAction(self): """ get suggested action, return them if they are legal, otherwise choose randomly. """ ba = self.game.getBoardArray() # network is given inputs with self/other as input, not black/white if self.color != CaptureGame.BLACK: # invert values tmp = zeros(len(ba)) tmp[:len(ba)-1:2] = ba[1:len(ba):2] tmp[1:len(ba):2] = ba[:len(ba)-1:2] ba = tmp self.module.reset() return [self.color, self._legalizeIt(self.module.activate(ba))]
get suggested action, return them if they are legal, otherwise choose randomly.
getAction
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegameplayers/moduledecision.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegameplayers/moduledecision.py
BSD-3-Clause
def _legalizeIt(self, a): """ draw index from an array of values, filtering out illegal moves. """ if not min(a) >= 0: print(a) print((min(a))) print((self.module.params)) print((self.module.inputbuffer)) print((self.module.outputbuffer)) raise Exception('Non-positive value in array?') legals = self.game.getLegals(self.color) vals = ones(len(a))*(-100)*(1+self.temperature) for i in map(self._convertPosToIndex, legals): vals[i] = a[i] drawn = self._convertIndexToPos(drawGibbs(vals, self.temperature)) assert drawn in legals, (drawn, legals) return drawn
draw index from an array of values, filtering out illegal moves.
_legalizeIt
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/capturegameplayers/moduledecision.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/capturegameplayers/moduledecision.py
BSD-3-Clause
def getAction(self): """ get suggested action, return them if they are legal, otherwise choose randomly. """ ba = self.game.getBoardArray() # network is given inputs with self/other as input, not black/white if self.color != GomokuGame.BLACK: # invert values tmp = zeros(len(ba)) tmp[:len(ba)-1:2] = ba[1:len(ba):2] tmp[1:len(ba):2] = ba[:len(ba)-1:2] ba = tmp self.module.reset() return [self.color, self._legalizeIt(self.module.activate(ba))]
get suggested action, return them if they are legal, otherwise choose randomly.
getAction
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomokuplayers/moduledecision.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomokuplayers/moduledecision.py
BSD-3-Clause
def _legalizeIt(self, a): """ draw index from an array of values, filtering out illegal moves. """ if not min(a) >= 0: print(a) print((min(a))) print((self.module.params)) print((self.module.inputbuffer)) print((self.module.outputbuffer)) raise Exception('No positve value in array?') legals = self.game.getLegals(self.color) vals = ones(len(a))*(-100)*(1+self.temperature) for i in map(self._convertPosToIndex, legals): vals[i] = a[i] drawn = self._convertIndexToPos(drawGibbs(vals, self.temperature)) assert drawn in legals return drawn
draw index from an array of values, filtering out illegal moves.
_legalizeIt
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/gomokuplayers/moduledecision.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/gomokuplayers/moduledecision.py
BSD-3-Clause
def getReward(self): """ Final positive reward for winner, negative for loser. """ if self.isFinished(): win = (self.env.winner != self.opponent.color) moves = self.env.movesDone res = self.winnerReward - self.numMovesCoeff * (moves -self.minmoves)/(self.maxmoves-self.minmoves) if not win: res *= -1 if self.alternateStarting and self.switched: # opponent color has been inverted after the game! res *= -1 return res else: return 0
Final positive reward for winner, negative for loser.
getReward
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/capturetask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/capturetask.py
BSD-3-Clause
def f(self, x): """ If a module is given, wrap it into a ModuleDecidingAgent before evaluating it. Also, if applicable, average the result over multiple games. """ if isinstance(x, Module): agent = ModuleDecidingPlayer(x, self.env, greedySelection = True) elif isinstance(x, CapturePlayer): agent = x else: raise NotImplementedError('Missing implementation for '+x.__class__.__name__+' evaluation') res = 0 agent.game = self.env self.opponent.game = self.env for _ in range(self.averageOverGames): agent.color = -self.opponent.color x = EpisodicTask.f(self, agent) res += x return res / float(self.averageOverGames)
If a module is given, wrap it into a ModuleDecidingAgent before evaluating it. Also, if applicable, average the result over multiple games.
f
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/capturetask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/capturetask.py
BSD-3-Clause
def getReward(self): """ Final positive reward for winner, negative for loser. """ if self.isFinished(): if self.env.winner == self.env.DRAW: return 0 win = (self.env.winner != self.opponent.color) moves = self.env.movesDone res = self.winnerReward - self.numMovesCoeff * (moves -self.minmoves)/(self.maxmoves-self.minmoves) if not win: res *= -1 if self.alternateStarting and self.switched: # opponent color has been inverted after the game! res *= -1 return res else: return 0
Final positive reward for winner, negative for loser.
getReward
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/gomokutask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/gomokutask.py
BSD-3-Clause
def f(self, x): """ If a module is given, wrap it into a ModuleDecidingAgent before evaluating it. Also, if applicable, average the result over multiple games. """ if isinstance(x, Module): agent = ModuleDecidingPlayer(x, self.env, greedySelection = True) elif isinstance(x, GomokuPlayer): agent = x else: raise NotImplementedError('Missing implementation for '+x.__class__.__name__+' evaluation') res = 0 agent.game = self.env self.opponent.game = self.env for dummy in range(self.averageOverGames): agent.color = -self.opponent.color res += EpisodicTask.f(self, agent) return res / float(self.averageOverGames)
If a module is given, wrap it into a ModuleDecidingAgent before evaluating it. Also, if applicable, average the result over multiple games.
f
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/gomokutask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/gomokutask.py
BSD-3-Clause
def goUp(self, h): """ ready to go up one handicap? """ if self.results[h][1] >= self.minEvals: return self.winProp(h) > 0.6 return False
ready to go up one handicap?
goUp
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/handicaptask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/handicaptask.py
BSD-3-Clause
def goDown(self, h): """ have to go down one handicap? """ if self.results[h][1] >= self.minEvals: return self.winProp(h) < -0.6 return False
have to go down one handicap?
goDown
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/handicaptask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/handicaptask.py
BSD-3-Clause
def _oneGame(self, preset = None): """ a single black stone can be set as the first move. """ self.env.reset() if preset != None: self.env._setStone(GomokuGame.BLACK, preset) self.env.movesDone += 1 self.env.playToTheEnd(self.opponent, self.player) else: self.env.playToTheEnd(self.player, self.opponent) moves = self.env.movesDone win = self.env.winner == self.player.color if self.verbose: print(('Preset:', preset, 'T:', self.temp, 'Win:', win, 'after', moves, 'moves.')) res = 1 - self.numMovesCoeff * (moves -self.minmoves)/(self.maxmoves-self.minmoves) if win: return res else: return -res
a single black stone can be set as the first move.
_oneGame
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/relativegomokutask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/relativegomokutask.py
BSD-3-Clause
def _fixedStartingPos(self): """ a list of starting positions, not along the border, and respecting symmetry. """ res = [] if self.size < 3: return res for x in range(1, (self.size + 1) // 2): for y in range(x, (self.size + 1) // 2): res.append((x, y)) return res
a list of starting positions, not along the border, and respecting symmetry.
_fixedStartingPos
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/relativetask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/relativetask.py
BSD-3-Clause
def _oneGame(self, preset=None): """ a single black stone can be set as the first move. """ self.env.reset() if preset != None: self.env._setStone(CaptureGame.BLACK, preset) self.env.movesDone += 1 self.env.playToTheEnd(self.opponent, self.player) else: self.env.playToTheEnd(self.player, self.opponent) moves = self.env.movesDone win = self.env.winner == self.player.color if self.verbose: print(('Preset:', preset, 'T:', self.temp, 'Win:', win, 'after', moves, 'moves.')) res = 1 - self.numMovesCoeff * (moves - self.minmoves) / (self.maxmoves - self.minmoves) if win: return res else: return - res
a single black stone can be set as the first move.
_oneGame
python
pybrain/pybrain
pybrain/rl/environments/twoplayergames/tasks/relativetask.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/environments/twoplayergames/tasks/relativetask.py
BSD-3-Clause
def doInteractionsAndLearn(self, number = 1): """ Execute a number of steps while learning continuously. no reset is performed, such that consecutive calls to this function can be made. """ for _ in range(number): self._oneInteraction() self.agent.learn() return self.stepid
Execute a number of steps while learning continuously. no reset is performed, such that consecutive calls to this function can be made.
doInteractionsAndLearn
python
pybrain/pybrain
pybrain/rl/experiments/continuous.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/continuous.py
BSD-3-Clause
def _oneInteraction(self): """ Do an interaction between the Task and the Agent. """ if self.doOptimization: raise Exception('When using a black-box learning algorithm, only full episodes can be done.') else: return Experiment._oneInteraction(self)
Do an interaction between the Task and the Agent.
_oneInteraction
python
pybrain/pybrain
pybrain/rl/experiments/episodic.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/episodic.py
BSD-3-Clause
def doEpisodes(self, number = 1): """ Do one episode, and return the rewards of each step as a list. """ if self.doOptimization: self.optimizer.maxEvaluations += number self.optimizer.learn() else: all_rewards = [] for dummy in range(number): self.agent.newEpisode() rewards = [] self.stepid = 0 self.task.reset() while not self.task.isFinished(): r = self._oneInteraction() rewards.append(r) all_rewards.append(rewards) return all_rewards
Do one episode, and return the rewards of each step as a list.
doEpisodes
python
pybrain/pybrain
pybrain/rl/experiments/episodic.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/episodic.py
BSD-3-Clause
def doInteractions(self, number = 1): """ The default implementation directly maps the methods of the agent and the task. Returns the number of interactions done. """ for _ in range(number): self._oneInteraction() return self.stepid
The default implementation directly maps the methods of the agent and the task. Returns the number of interactions done.
doInteractions
python
pybrain/pybrain
pybrain/rl/experiments/experiment.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/experiment.py
BSD-3-Clause
def _oneInteraction(self): """ Give the observation to the agent, takes its resulting action and returns it to the task. Then gives the reward to the agent again and returns it. """ self.stepid += 1 self.agent.integrateObservation(self.task.getObservation()) self.task.performAction(self.agent.getAction()) reward = self.task.getReward() self.agent.giveReward(reward) return reward
Give the observation to the agent, takes its resulting action and returns it to the task. Then gives the reward to the agent again and returns it.
_oneInteraction
python
pybrain/pybrain
pybrain/rl/experiments/experiment.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/experiment.py
BSD-3-Clause
def _produceAllPairs(self): """ produce a list of all pairs of agents (assuming ab <> ba)""" res = [] for a in self.agents: for b in self.agents: if a != b: res.append((a, b)) return res
produce a list of all pairs of agents (assuming ab <> ba)
_produceAllPairs
python
pybrain/pybrain
pybrain/rl/experiments/tournament.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/tournament.py
BSD-3-Clause
def _oneGame(self, p1, p2): """ play one game between two agents p1 and p2.""" self.numGames += 1 self.env.reset() players = (p1, p2) p1.color = self.startcolor p2.color = -p1.color p1.newEpisode() p2.newEpisode() i = 0 while not self.env.gameOver(): p = players[i] i = (i + 1) % 2 # alternate act = p.getAction() if self.forcedLegality: tries = 0 while not self.env.isLegal(*act): tries += 1 # CHECKME: maybe the legality check is too specific? act = p.getAction() if tries > 50: raise Exception('No legal move produced!') self.env.performAction(act) if players not in self.results: self.results[players] = [] wincolor = self.env.getWinner() if wincolor == p1.color: winner = p1 else: winner = p2 self.results[players].append(winner)
play one game between two agents p1 and p2.
_oneGame
python
pybrain/pybrain
pybrain/rl/experiments/tournament.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/tournament.py
BSD-3-Clause
def organize(self, repeat=1): """ have all agents play all others in all orders, and repeat. """ for dummy in range(repeat): self.rounds += 1 for p1, p2 in self._produceAllPairs(): self._oneGame(p1, p2) return self.results
have all agents play all others in all orders, and repeat.
organize
python
pybrain/pybrain
pybrain/rl/experiments/tournament.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/tournament.py
BSD-3-Clause
def eloScore(self, startingscore=1500, k=32): """ compute the elo score of all the agents, given the games played in the tournament. Also checking for potentially initial scores among the agents ('elo' variable). """ # initialize elos = {} for a in self.agents: if 'elo' in a.__dict__: elos[a] = a.elo else: elos[a] = startingscore # adjust ratings for i, a1 in enumerate(self.agents[:-1]): for a2 in self.agents[i + 1:]: # compute score (in favor of a1) s = 0 outcomes = self.results[(a1, a2)] + self.results[(a2, a1)] for r in outcomes: if r == a1: s += 1. elif r == self.env.DRAW: s += 0.5 # what score would have been estimated? est = len(outcomes) / (1. + 10 ** ((elos[a2] - elos[a1]) / 400.)) delta = k * (s - est) elos[a1] += delta elos[a2] -= delta for a, e in list(elos.items()): a.elo = e return elos
compute the elo score of all the agents, given the games played in the tournament. Also checking for potentially initial scores among the agents ('elo' variable).
eloScore
python
pybrain/pybrain
pybrain/rl/experiments/tournament.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/experiments/tournament.py
BSD-3-Clause
def _setSigma(self, sigma): """ Wrapper method to set the sigmas (the parameters of the module) to a certain value. """ assert len(sigma) == self.dim self._params *= 0 self._params += sigma
Wrapper method to set the sigmas (the parameters of the module) to a certain value.
_setSigma
python
pybrain/pybrain
pybrain/rl/explorers/continuous/normal.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/continuous/normal.py
BSD-3-Clause
def _setSigma(self, sigma): """ Wrapper method to set the sigmas (the parameters of the module) to a certain value. """ assert len(sigma) == self.actiondim self._params *= 0 self._params += sigma
Wrapper method to set the sigmas (the parameters of the module) to a certain value.
_setSigma
python
pybrain/pybrain
pybrain/rl/explorers/continuous/sde.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/continuous/sde.py
BSD-3-Clause
def activate(self, state, action): """ The super class commonly ignores the state and simply passes the action through the module. implement _forwardImplementation() in subclasses. """ self.state = state return Module.activate(self, action)
The super class commonly ignores the state and simply passes the action through the module. implement _forwardImplementation() in subclasses.
activate
python
pybrain/pybrain
pybrain/rl/explorers/continuous/sde.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/continuous/sde.py
BSD-3-Clause
def activate(self, state, action): """ The super class ignores the state and simply passes the action through the module. implement _forwardImplementation() in subclasses. """ self._state = state return DiscreteExplorer.activate(self, state, action)
The super class ignores the state and simply passes the action through the module. implement _forwardImplementation() in subclasses.
activate
python
pybrain/pybrain
pybrain/rl/explorers/discrete/boltzmann.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/boltzmann.py
BSD-3-Clause
def _forwardImplementation(self, inbuf, outbuf): """ Draws a random number between 0 and 1. If the number is less than epsilon, a random action is chosen. If it is equal or larger than epsilon, the greedy action is returned. """ assert self.module values = self.module.getActionValues(self._state) action = drawGibbs(values, self.tau) self.tau *= self.decay outbuf[:] = array([action])
Draws a random number between 0 and 1. If the number is less than epsilon, a random action is chosen. If it is equal or larger than epsilon, the greedy action is returned.
_forwardImplementation
python
pybrain/pybrain
pybrain/rl/explorers/discrete/boltzmann.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/boltzmann.py
BSD-3-Clause
def _setModule(self, module): """ Tells the explorer the module (which has to be ActionValueTable). """ # removed: cause for circular import # assert isinstance(module, ActionValueInterface) self._module = module
Tells the explorer the module (which has to be ActionValueTable).
_setModule
python
pybrain/pybrain
pybrain/rl/explorers/discrete/discrete.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/discrete.py
BSD-3-Clause
def __init__(self, epsilon = 0.2, decay = 0.9998): """ TODO: the epsilon and decay parameters are currently not implemented. """ DiscreteExplorer.__init__(self) self.state = None
TODO: the epsilon and decay parameters are currently not implemented.
__init__
python
pybrain/pybrain
pybrain/rl/explorers/discrete/discretesde.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/discretesde.py
BSD-3-Clause
def activate(self, state, action): """ Save the current state for state-dependent exploration. """ self.state = state return DiscreteExplorer.activate(self, state, action)
Save the current state for state-dependent exploration.
activate
python
pybrain/pybrain
pybrain/rl/explorers/discrete/discretesde.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/discretesde.py
BSD-3-Clause
def _forwardImplementation(self, inbuf, outbuf): """ Activate the copied module instead of the original and feed it with the current state. """ if random.random() < 0.001: outbuf[:] = array([random.randint(self.module.numActions)]) else: outbuf[:] = self.explorerModule.activate(self.state)
Activate the copied module instead of the original and feed it with the current state.
_forwardImplementation
python
pybrain/pybrain
pybrain/rl/explorers/discrete/discretesde.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/discretesde.py
BSD-3-Clause
def newEpisode(self): """ Inform the explorer about the start of a new episode. """ self.explorerModule = deepcopy(self.module) if isinstance(self.explorerModule, ActionValueNetwork): self.explorerModule.network.mutationStd = 0.01 self.explorerModule.network.mutate() elif isinstance(self.explorerModule, ActionValueTable): self.explorerModule.mutationStd = 0.01 self.explorerModule.mutate()
Inform the explorer about the start of a new episode.
newEpisode
python
pybrain/pybrain
pybrain/rl/explorers/discrete/discretesde.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/discretesde.py
BSD-3-Clause
def _forwardImplementation(self, inbuf, outbuf): """ Draws a random number between 0 and 1. If the number is less than epsilon, a random action is chosen. If it is equal or larger than epsilon, the greedy action is returned. """ assert self.module if random.random() < self.epsilon: outbuf[:] = array([random.randint(self.module.numActions)]) else: outbuf[:] = inbuf self.epsilon *= self.decay
Draws a random number between 0 and 1. If the number is less than epsilon, a random action is chosen. If it is equal or larger than epsilon, the greedy action is returned.
_forwardImplementation
python
pybrain/pybrain
pybrain/rl/explorers/discrete/egreedy.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/explorers/discrete/egreedy.py
BSD-3-Clause
def _setModule(self, module): """ initialize gradient descender with module parameters and the loglh dataset with the outdim of the module. """ self._module = module # initialize explorer self._explorer = NormalExplorer(module.outdim) # build network self._initializeNetwork()
initialize gradient descender with module parameters and the loglh dataset with the outdim of the module.
_setModule
python
pybrain/pybrain
pybrain/rl/learners/directsearch/policygradient.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/directsearch/policygradient.py
BSD-3-Clause
def _setExplorer(self, explorer): """ assign non-standard explorer to the policy gradient learner. requires the module to be set beforehand. """ assert self._module self._explorer = explorer # build network self._initializeNetwork()
assign non-standard explorer to the policy gradient learner. requires the module to be set beforehand.
_setExplorer
python
pybrain/pybrain
pybrain/rl/learners/directsearch/policygradient.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/directsearch/policygradient.py
BSD-3-Clause
def _initializeNetwork(self): """ build the combined network consisting of the module and the explorer and initializing the log likelihoods dataset. """ self.network = FeedForwardNetwork() self.network.addInputModule(self._module) self.network.addOutputModule(self._explorer) self.network.addConnection(IdentityConnection(self._module, self._explorer)) self.network.sortModules() # initialize gradient descender self.gd.init(self.network.params) # initialize loglh dataset self.loglh = LoglhDataSet(self.network.paramdim)
build the combined network consisting of the module and the explorer and initializing the log likelihoods dataset.
_initializeNetwork
python
pybrain/pybrain
pybrain/rl/learners/directsearch/policygradient.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/directsearch/policygradient.py
BSD-3-Clause
def learn(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ assert self.dataset != None assert self.module != None # calculate the gradient with the specific function from subclass gradient = self.calculateGradient() # scale gradient if it has too large values if max(gradient) > 1000: gradient = gradient / max(gradient) * 1000 # update the parameters of the module p = self.gd(gradient.flatten()) self.network._setParameters(p) self.network.reset()
calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha.
learn
python
pybrain/pybrain
pybrain/rl/learners/directsearch/policygradient.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/directsearch/policygradient.py
BSD-3-Clause
def greedyEpisode(self): """ run one episode with greedy decisions, return the list of rewards recieved.""" rewards = [] self.task.reset() self.net.reset() while not self.task.isFinished(): obs = self.task.getObservation() act = self.net.activate(obs) chosen = argmax(act) self.task.performAction(chosen) reward = self.task.getReward() rewards.append(reward) return rewards
run one episode with greedy decisions, return the list of rewards recieved.
greedyEpisode
python
pybrain/pybrain
pybrain/rl/learners/directsearch/rwr.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/directsearch/rwr.py
BSD-3-Clause
def trueFeatureStats(T, R, fMap, discountFactor, stateProp=1, MAT_LIMIT=1e8): """ Gather the statistics needed for LSTD, assuming infinite data (true probabilities). Option: if stateProp is < 1, then only a proportion of all states will be seen as starting state for transitions """ dim = len(fMap) numStates = len(T) statMatrix = zeros((dim, dim)) statResidual = zeros(dim) ss = list(range(numStates)) repVersion = False if stateProp < 1: ss = random.sample(ss, int(numStates * stateProp)) elif dim * numStates**2 < MAT_LIMIT: repVersion = True # two variants, depending on how large we can afford our matrices to become. if repVersion: tmp1 = tile(fMap, (numStates,1,1)) tmp2 = transpose(tmp1, (2,1,0)) tmp3 = tmp2 - discountFactor * tmp1 tmp4 = tile(T, (dim,1,1)) tmp4 *= transpose(tmp1, (1,2,0)) statMatrix = tensordot(tmp3, tmp4, axes=[[0,2], [1,2]]).T statResidual = dot(R, dot(fMap, T).T) else: for sto in ss: tmp = fMap - discountFactor * repmat(fMap[:, sto], numStates, 1).T tmp2 = fMap * repmat(T[:, sto], dim, 1) statMatrix += dot(tmp2, tmp.T) statResidual += R[sto] * dot(fMap, T[:, sto]) return statMatrix, statResidual
Gather the statistics needed for LSTD, assuming infinite data (true probabilities). Option: if stateProp is < 1, then only a proportion of all states will be seen as starting state for transitions
trueFeatureStats
python
pybrain/pybrain
pybrain/rl/learners/modelbased/leastsquares.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/leastsquares.py
BSD-3-Clause
def LSTD_Qvalues(Ts, policy, R, fMap, discountFactor): """ LSTDQ is like LSTD, but with features replicated once for each possible action. Returns Q-values in a 2D array. """ numA = len(Ts) dim = len(Ts[0]) numF = len(fMap) fMapRep = zeros((numF * numA, dim * numA)) for a in range(numA): fMapRep[numF * a:numF * (a + 1), dim * a:dim * (a + 1)] = fMap statMatrix = zeros((numF * numA, numF * numA)) statResidual = zeros(numF * numA) for sto in range(dim): r = R[sto] fto = zeros(numF * numA) for nextA in range(numA): fto += fMapRep[:, sto + nextA * dim] * policy[sto][nextA] for sfrom in range(dim): for a in range(numA): ffrom = fMapRep[:, sfrom + a * dim] prob = Ts[a][sfrom, sto] statMatrix += outer(ffrom, ffrom - discountFactor * fto) * prob statResidual += ffrom * r * prob Qs = zeros((dim, numA)) w = lstsq(statMatrix, statResidual)[0] for a in range(numA): Qs[:,a] = dot(w[numF*a:numF*(a+1)], fMap) return Qs
LSTDQ is like LSTD, but with features replicated once for each possible action. Returns Q-values in a 2D array.
LSTD_Qvalues
python
pybrain/pybrain
pybrain/rl/learners/modelbased/leastsquares.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/leastsquares.py
BSD-3-Clause
def LSPI_policy(fMap, Ts, R, discountFactor, initpolicy=None, maxIters=20): """ LSPI is like policy iteration, but Q-values are estimated based on the feature map. Returns the best policy found. """ if initpolicy is None: policy, _ = randomPolicy(Ts) else: policy = initpolicy while maxIters > 0: Qs = LSTD_Qvalues(Ts, policy, R, fMap, discountFactor) newpolicy = greedyQPolicy(Qs) if sum(ravel(abs(newpolicy - policy))) < 1e-3: return policy, collapsedTransitions(Ts, policy) policy = newpolicy maxIters -= 1 return policy, collapsedTransitions(Ts, policy)
LSPI is like policy iteration, but Q-values are estimated based on the feature map. Returns the best policy found.
LSPI_policy
python
pybrain/pybrain
pybrain/rl/learners/modelbased/leastsquares.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/leastsquares.py
BSD-3-Clause
def LSTD_PI_policy(fMap, Ts, R, discountFactor, initpolicy=None, maxIters=20): """ Alternative version of LSPI using value functions instead of state-action values as intermediate. """ def veval(T): return LSTD_values(T, R, fMap, discountFactor) return policyIteration(Ts, R, discountFactor, VEvaluator=veval, initpolicy=initpolicy, maxIters=maxIters)
Alternative version of LSPI using value functions instead of state-action values as intermediate.
LSTD_PI_policy
python
pybrain/pybrain
pybrain/rl/learners/modelbased/leastsquares.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/leastsquares.py
BSD-3-Clause
def trueValues(T, R, discountFactor): """ Compute the true discounted value function for each state, given a policy (encoded as collapsed transition matrix). """ assert discountFactor < 1 distr = T.copy() res = dot(T, R) for i in range(1, int(10 / (1. - discountFactor))): distr = dot(distr, T) res += (discountFactor ** i) * dot(distr, R) return res
Compute the true discounted value function for each state, given a policy (encoded as collapsed transition matrix).
trueValues
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def trueQValues(Ts, R, discountFactor, policy): """ The true Q-values, given a model and a policy. """ T = collapsedTransitions(Ts, policy) V = trueValues(T, R, discountFactor) Vnext = V*discountFactor+R numA = len(Ts) dim = len(R) Qs = zeros((dim, numA)) for si in range(dim): for a in range(numA): Qs[si, a] = dot(Ts[a][si], Vnext) return Qs
The true Q-values, given a model and a policy.
trueQValues
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def collapsedTransitions(Ts, policy): """ Collapses a list of transition matrices (one per action) and a list of action probability vectors into a single transition matrix.""" res = zeros_like(Ts[0]) dim = len(Ts[0]) for ai, ap in enumerate(policy.T): res += Ts[ai] * repmat(ap, dim, 1).T return res
Collapses a list of transition matrices (one per action) and a list of action probability vectors into a single transition matrix.
collapsedTransitions
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def greedyPolicy(Ts, R, discountFactor, V): """ Find the greedy policy, (soft tie-breaking) given a value function and full transition model. """ dim = len(V) numA = len(Ts) Vnext = V*discountFactor+R policy = zeros((dim, numA)) for si in range(dim): actions = all_argmax([dot(T[si, :], Vnext) for T in Ts]) for a in actions: policy[si, a] = 1. / len(actions) return policy, collapsedTransitions(Ts, policy)
Find the greedy policy, (soft tie-breaking) given a value function and full transition model.
greedyPolicy
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def greedyQPolicy(Qs): """ Find the greedy deterministic policy, given the Q-values. """ dim = len(Qs) numA = len(Qs[0]) policy = zeros((dim, numA)) for si in range(dim): actions = all_argmax(Qs[si]) for a in actions: policy[si, a] = 1. / len(actions) return policy
Find the greedy deterministic policy, given the Q-values.
greedyQPolicy
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def randomDeterministic(Ts): """ Pick a random deterministic action for each state. """ numA = len(Ts) dim = len(Ts[0]) choices = (rand(dim) * numA).astype(int) policy = zeros((dim, numA)) for si, a in choices: policy[si, a] = 1 return policy, collapsedTransitions(Ts, policy)
Pick a random deterministic action for each state.
randomDeterministic
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def policyIteration(Ts, R, discountFactor, VEvaluator=None, initpolicy=None, maxIters=20): """ Given transition matrices (one per action), produce the optimal policy, using the policy iteration algorithm. A custom function that maps policies to value functions can be provided. """ if initpolicy is None: policy, T = randomPolicy(Ts) else: policy = initpolicy T = collapsedTransitions(Ts, policy) if VEvaluator is None: VEvaluator = lambda T: trueValues(T, R, discountFactor) while maxIters > 0: V = VEvaluator(T) newpolicy, T = greedyPolicy(Ts, R, discountFactor, V) # if the probabilities are not changing more than by 0.001, we're done. if sum(ravel(abs(newpolicy - policy))) < 1e-3: return policy, T policy = newpolicy maxIters -= 1 return policy, T
Given transition matrices (one per action), produce the optimal policy, using the policy iteration algorithm. A custom function that maps policies to value functions can be provided.
policyIteration
python
pybrain/pybrain
pybrain/rl/learners/modelbased/policyiteration.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/modelbased/policyiteration.py
BSD-3-Clause
def getMaxAction(self, state): """ Return the action with the maximal value for the given state. """ values = self.params.reshape(self.numRows, self.numColumns)[int(state), :].flatten() action = where(values == max(values))[0] action = choice(action) return action
Return the action with the maximal value for the given state.
getMaxAction
python
pybrain/pybrain
pybrain/rl/learners/valuebased/interface.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/interface.py
BSD-3-Clause
def _updateWeights(self, state, action, reward, next_state): """ state and next_state are vectors, action is an integer. """ td_error = reward + self.rewardDiscount * max(dot(self._theta, next_state)) - dot(self._theta[action], state) #print(action, reward, td_error,self._theta[action], state, dot(self._theta[action], state)) #print(self.learningRate * td_error * state) #print() self._theta[action] += self.learningRate * td_error * state
state and next_state are vectors, action is an integer.
_updateWeights
python
pybrain/pybrain
pybrain/rl/learners/valuebased/linearfa.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/linearfa.py
BSD-3-Clause
def _updateWeights(self, state, action, reward, next_state): """ state and next_state are vectors, action is an integer. """ self._updateEtraces(state, action) td_error = reward + self.rewardDiscount * max(dot(self._theta, next_state)) - dot(self._theta[action], state) self._theta += self.learningRate * td_error * self._etraces
state and next_state are vectors, action is an integer.
_updateWeights
python
pybrain/pybrain
pybrain/rl/learners/valuebased/linearfa.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/linearfa.py
BSD-3-Clause
def _updateWeights(self, state, action, reward, next_state, next_action): """ state and next_state are vectors, action is an integer. """ td_error = reward + self.rewardDiscount * dot(self._theta[next_action], next_state) - dot(self._theta[action], state) self._updateEtraces(state, action) self._theta += self.learningRate * td_error * self._etraces
state and next_state are vectors, action is an integer.
_updateWeights
python
pybrain/pybrain
pybrain/rl/learners/valuebased/linearfa.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/linearfa.py
BSD-3-Clause
def _updateWeights(self, state, action, reward, next_state, learned_policy=None): """ Policy is a function that returns a probability vector for all actions, given the current state(-features). """ if learned_policy is None: learned_policy = self._greedyPolicy self._updateEtraces(state, action) phi = zeros((self.num_actions, self.num_features)) phi[action] += state phi_n = outer(learned_policy(next_state), next_state) self._A += outer(ravel(self._etraces), ravel(phi - self.rewardDiscount * phi_n)) self._b += reward * ravel(self._etraces) self._theta = dot(pinv2(self._A), self._b).reshape(self.num_actions, self.num_features)
Policy is a function that returns a probability vector for all actions, given the current state(-features).
_updateWeights
python
pybrain/pybrain
pybrain/rl/learners/valuebased/linearfa.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/linearfa.py
BSD-3-Clause
def learn(self): """ Learn on the current dataset, either for many timesteps and even episodes (batchMode = True) or for a single timestep (batchMode = False). Batch mode is possible, because Q-Learning is an off-policy method. In batchMode, the algorithm goes through all the samples in the history and performs an update on each of them. if batchMode is False, only the last data sample is considered. The user himself has to make sure to keep the dataset consistent with the agent's history. """ if self.batchMode: samples = self.dataset else: samples = [[self.dataset.getSample()]] for seq in samples: # information from the previous episode (sequence) # should not influence the training on this episode self.laststate = None self.lastaction = None self.lastreward = None for state, action, reward in seq: state = int(state) action = int(action) # first learning call has no last state: skip if self.laststate == None: self.lastaction = action self.laststate = state self.lastreward = reward continue qvalue = self.module.getValue(self.laststate, self.lastaction) maxnext = self.module.getValue(state, self.module.getMaxAction(state)) self.module.updateValue(self.laststate, self.lastaction, qvalue + self.alpha * (self.lastreward + self.gamma * maxnext - qvalue)) # move state to oldstate self.laststate = state self.lastaction = action self.lastreward = reward
Learn on the current dataset, either for many timesteps and even episodes (batchMode = True) or for a single timestep (batchMode = False). Batch mode is possible, because Q-Learning is an off-policy method. In batchMode, the algorithm goes through all the samples in the history and performs an update on each of them. if batchMode is False, only the last data sample is considered. The user himself has to make sure to keep the dataset consistent with the agent's history.
learn
python
pybrain/pybrain
pybrain/rl/learners/valuebased/q.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/q.py
BSD-3-Clause
def _setModule(self, module): """ Set module and tell explorer about the module. """ if self.explorer: self.explorer.module = module self._module = module
Set module and tell explorer about the module.
_setModule
python
pybrain/pybrain
pybrain/rl/learners/valuebased/valuebased.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/valuebased.py
BSD-3-Clause
def _setExplorer(self, explorer): """ Set explorer and tell it the module, if already available. """ self._explorer = explorer if self.module: self._explorer.module = self.module
Set explorer and tell it the module, if already available.
_setExplorer
python
pybrain/pybrain
pybrain/rl/learners/valuebased/valuebased.py
https://github.com/pybrain/pybrain/blob/master/pybrain/rl/learners/valuebased/valuebased.py
BSD-3-Clause
def __init__(self, constructor, dimensions, name = None, baserename = False): """:arg constructor: a constructor method that returns a module :arg dimensions: tuple of dimensions. """ self.dims = dimensions if name != None: self.name = name # a dict where the tuple of coordinates is the key self.components = {} for coord in iterCombinations(self.dims): tmp = constructor() self.components[coord] = tmp tmp.name = self.name + str(coord) if baserename and isinstance(tmp, ModuleSlice): tmp.base.name = tmp.name self.componentIndim = tmp.indim self.componentOutdim = tmp.outdim
:arg constructor: a constructor method that returns a module :arg dimensions: tuple of dimensions.
__init__
python
pybrain/pybrain
pybrain/structure/modulemesh.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/modulemesh.py
BSD-3-Clause
def constructWithLayers(layerclass, layersize, dimensions, name = None): """ create the mesh using constructors that build layers of a specified size and class. """ c = lambda: layerclass(layersize) return ModuleMesh(c, dimensions, name)
create the mesh using constructors that build layers of a specified size and class.
constructWithLayers
python
pybrain/pybrain
pybrain/structure/modulemesh.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/modulemesh.py
BSD-3-Clause
def viewOnFlatLayer(layer, dimensions, name = None): """ Produces a ModuleMesh that is a mesh-view on a flat module. """ assert max(dimensions) > 1, "At least one dimension needs to be larger than one." def slicer(): nbunits = reduce(lambda x, y: x*y, dimensions, 1) insize = layer.indim // nbunits outsize = layer.outdim // nbunits for index in range(nbunits): yield ModuleSlice(layer, insize*index, insize*(index+1), outsize*index, outsize*(index+1)) c = slicer() return ModuleMesh(lambda: next(c), dimensions, name)
Produces a ModuleMesh that is a mesh-view on a flat module.
viewOnFlatLayer
python
pybrain/pybrain
pybrain/structure/modulemesh.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/modulemesh.py
BSD-3-Clause
def __init__(self, base, inSliceFrom = 0, inSliceTo = None, outSliceFrom = 0, outSliceTo = None): """ :key base: the base module that is sliced """ if isinstance(base, ModuleSlice): # tolerantly handle the case of a slice of another slice self.base = base.base self.inOffset = inSliceFrom + base.inSliceFrom self.outOffset = outSliceFrom + base.outSliceFrom if inSliceTo == None: inSliceTo = self.base.indim + base.inSliceFrom if outSliceTo == None: outSliceTo = self.base.outdim + base.outSliceFrom self.name = base.base.name else: self.base = base self.inOffset = inSliceFrom self.outOffset = outSliceFrom if inSliceTo == None: inSliceTo = self.base.indim if outSliceTo == None: outSliceTo = self.base.outdim self.name = base.name assert self.inOffset >= 0 and self.outOffset >= 0 self.indim = inSliceTo - inSliceFrom self.outdim = outSliceTo - outSliceFrom self.name += ('-slice:('+str(self.inOffset)+','+str(self.indim+self.inOffset)+')(' +str(self.outOffset)+','+str(self.outdim+self.outOffset)+')') # some slicing is required assert self.indim+self.outdim < base.indim+base.outdim
:key base: the base module that is sliced
__init__
python
pybrain/pybrain
pybrain/structure/moduleslice.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/moduleslice.py
BSD-3-Clause
def __init__(self, paramdim = 0, **args): """ initialize all parameters with random values, normally distributed around 0 :key stdParams: standard deviation of the values (default: 1). """ self.setArgs(**args) self.paramdim = paramdim if paramdim > 0: self._params = zeros(self.paramdim) # enable derivatives if it is a instance of Module or Connection # CHECKME: the import can not be global? from pybrain.structure.modules.module import Module from pybrain.structure.connections.connection import Connection if isinstance(self, Module) or isinstance(self, Connection): self.hasDerivatives = True if self.hasDerivatives: self._derivs = zeros(self.paramdim) self.randomize()
initialize all parameters with random values, normally distributed around 0 :key stdParams: standard deviation of the values (default: 1).
__init__
python
pybrain/pybrain
pybrain/structure/parametercontainer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/parametercontainer.py
BSD-3-Clause
def _setDerivatives(self, d, owner = None): """ :key d: an array of numbers of self.paramdim """ assert self.owner == owner assert size(d) == self.paramdim self._derivs = d
:key d: an array of numbers of self.paramdim
_setDerivatives
python
pybrain/pybrain
pybrain/structure/parametercontainer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/parametercontainer.py
BSD-3-Clause
def resetDerivatives(self): """ :note: this method only sets the values to zero, it does not initialize the array. """ assert self.hasDerivatives self._derivs *= 0
:note: this method only sets the values to zero, it does not initialize the array.
resetDerivatives
python
pybrain/pybrain
pybrain/structure/parametercontainer.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/parametercontainer.py
BSD-3-Clause
def __init__(self, inmod, outmod, name = None, inSliceFrom = 0, inSliceTo = None, outSliceFrom = 0, outSliceTo = None): """ Every connection requires an input and an output module. Optionally, it is possible to define slices on the buffers. :arg inmod: input module :arg outmod: output module :key inSliceFrom: starting index on the buffer of inmod (default = 0) :key inSliceTo: ending index on the buffer of inmod (default = last) :key outSliceFrom: starting index on the buffer of outmod (default = 0) :key outSliceTo: ending index on the buffer of outmod (default = last) """ self._name = name self.inSliceFrom = inSliceFrom self.outSliceFrom = outSliceFrom if inSliceTo is not None: self.inSliceTo = inSliceTo else: self.inSliceTo = inmod.outdim if outSliceTo is not None: self.outSliceTo = outSliceTo else: self.outSliceTo = outmod.indim if isinstance(inmod, ModuleSlice): self.inmod = inmod.base self.inSliceFrom += inmod.outOffset self.inSliceTo += inmod.outOffset else: self.inmod = inmod if isinstance(outmod, ModuleSlice): self.outmod = outmod.base self.outSliceFrom += outmod.inOffset self.outSliceTo += outmod.inOffset else: self.outmod = outmod self.indim = self.inSliceTo - self.inSliceFrom self.outdim = self.outSliceTo - self.outSliceFrom # arguments for for xml self.setArgs(inmod = self.inmod, outmod = self.outmod) if self.inSliceFrom > 0: self.setArgs(inSliceFrom = self.inSliceFrom) if self.outSliceFrom > 0: self.setArgs(outSliceFrom = self.outSliceFrom) if self.inSliceTo < self.inmod.outdim: self.setArgs(inSliceTo = self.inSliceTo) if self.outSliceTo < self.outmod.indim: self.setArgs(outSliceTo = self.outSliceTo)
Every connection requires an input and an output module. Optionally, it is possible to define slices on the buffers. :arg inmod: input module :arg outmod: output module :key inSliceFrom: starting index on the buffer of inmod (default = 0) :key inSliceTo: ending index on the buffer of inmod (default = last) :key outSliceFrom: starting index on the buffer of outmod (default = 0) :key outSliceTo: ending index on the buffer of outmod (default = last)
__init__
python
pybrain/pybrain
pybrain/structure/connections/connection.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/connections/connection.py
BSD-3-Clause
def forward(self, inmodOffset=0, outmodOffset=0): """Propagate the information from the incoming module's output buffer, adding it to the outgoing node's input buffer, and possibly transforming it on the way. For this transformation use inmodOffset as an offset for the inmod and outmodOffset as an offset for the outmodules offset.""" self._forwardImplementation( self.inmod.outputbuffer[inmodOffset, self.inSliceFrom:self.inSliceTo], self.outmod.inputbuffer[outmodOffset, self.outSliceFrom:self.outSliceTo])
Propagate the information from the incoming module's output buffer, adding it to the outgoing node's input buffer, and possibly transforming it on the way. For this transformation use inmodOffset as an offset for the inmod and outmodOffset as an offset for the outmodules offset.
forward
python
pybrain/pybrain
pybrain/structure/connections/connection.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/connections/connection.py
BSD-3-Clause
def backward(self, inmodOffset=0, outmodOffset=0): """Propagate the error found at the outgoing module, adding it to the incoming module's output-error buffer and doing the inverse transformation of forward propagation. For this transformation use inmodOffset as an offset for the inmod and outmodOffset as an offset for the outmodules offset. If appropriate, also compute the parameter derivatives. """ self._backwardImplementation( self.outmod.inputerror[outmodOffset, self.outSliceFrom:self.outSliceTo], self.inmod.outputerror[inmodOffset, self.inSliceFrom:self.inSliceTo], self.inmod.outputbuffer[inmodOffset, self.inSliceFrom:self.inSliceTo])
Propagate the error found at the outgoing module, adding it to the incoming module's output-error buffer and doing the inverse transformation of forward propagation. For this transformation use inmodOffset as an offset for the inmod and outmodOffset as an offset for the outmodules offset. If appropriate, also compute the parameter derivatives.
backward
python
pybrain/pybrain
pybrain/structure/connections/connection.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/connections/connection.py
BSD-3-Clause
def __repr__(self): """A simple representation (this should probably be expanded by subclasses). """ params = { 'class': self.__class__.__name__, 'name': self.name, 'inmod': self.inmod.name, 'outmod': self.outmod.name } return "<%(class)s '%(name)s': '%(inmod)s' -> '%(outmod)s'>" % params
A simple representation (this should probably be expanded by subclasses).
__repr__
python
pybrain/pybrain
pybrain/structure/connections/connection.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/connections/connection.py
BSD-3-Clause
def newSimilarInstance(self): """ Generates a new Evolvable of the same kind.""" res = self.copy() res.randomize() return res
Generates a new Evolvable of the same kind.
newSimilarInstance
python
pybrain/pybrain
pybrain/structure/evolvables/evolvable.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/evolvables/evolvable.py
BSD-3-Clause
def params(self): """ returns an array with (usually) only the unmasked parameters """ if self.returnZeros: return self.pcontainer.params else: x = zeros(self.paramdim) paramcount = 0 for i in range(len(self.maskableParams)): if self.mask[i] == True: x[paramcount] = self.maskableParams[i] paramcount += 1 return x
returns an array with (usually) only the unmasked parameters
params
python
pybrain/pybrain
pybrain/structure/evolvables/maskedparameters.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/evolvables/maskedparameters.py
BSD-3-Clause
def randomize(self, **args): """ an initial, random mask (with random params) with as many parameters enabled as allowed""" self.mask = zeros(self.pcontainer.paramdim, dtype=bool) onbits = [] for i in range(self.pcontainer.paramdim): if random() > self.maskOnProbability: self.mask[i] = True onbits.append(i) over = len(onbits) - self.maxComplexity if over > 0: for i in sample(onbits, over): self.mask[i] = False self.maskableParams = randn(self.pcontainer.paramdim)*self.stdParams self._applyMask()
an initial, random mask (with random params) with as many parameters enabled as allowed
randomize
python
pybrain/pybrain
pybrain/structure/evolvables/maskedparameters.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/evolvables/maskedparameters.py
BSD-3-Clause
def topologyMutate(self): """ flips some bits on the mask (but do not exceed the maximum of enabled parameters). """ for i in range(self.pcontainer.paramdim): if random() < self.maskFlipProbability: self.mask[i] = not self.mask[i] tooMany = sum(self.mask) - self.maxComplexity for i in range(tooMany): while True: ind = int(random()*self.pcontainer.paramdim) if self.mask[ind]: self.mask[ind] = False break if sum(self.mask) == 0: # CHECKME: minimum of one needs to be on ind = int(random()*self.pcontainer.paramdim) self.mask[ind] = True self._applyMask()
flips some bits on the mask (but do not exceed the maximum of enabled parameters).
topologyMutate
python
pybrain/pybrain
pybrain/structure/evolvables/maskedparameters.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/evolvables/maskedparameters.py
BSD-3-Clause
def mutate(self): """ add some gaussian noise to all parameters.""" # CHECKME: could this be partly outsourced to the pcontainer directly? for i in range(self.pcontainer.paramdim): self.maskableParams[i] += gauss(0, self.mutationStdev) self._applyMask()
add some gaussian noise to all parameters.
mutate
python
pybrain/pybrain
pybrain/structure/evolvables/maskedparameters.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/evolvables/maskedparameters.py
BSD-3-Clause
def newSimilarInstance(self): """ generate a new Evolvable with the same topology """ res = self.copy() res.randomize() return res
generate a new Evolvable with the same topology
newSimilarInstance
python
pybrain/pybrain
pybrain/structure/evolvables/topology.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/evolvables/topology.py
BSD-3-Clause
def __init__(self, outdim, hiddim=15): """ Create an EvolinoNetwork with for sequences of dimension outdim and hiddim dimension of the RNN Layer.""" indim = 0 Module.__init__(self, indim, outdim) self._network = RecurrentNetwork() self._in_layer = LinearLayer(indim + outdim) self._hid_layer = LSTMLayer(hiddim) self._out_layer = LinearLayer(outdim) self._bias = BiasUnit() self._network.addInputModule(self._in_layer) self._network.addModule(self._hid_layer) self._network.addModule(self._bias) self._network.addOutputModule(self._out_layer) self._in_to_hid_connection = FullConnection(self._in_layer, self._hid_layer) self._bias_to_hid_connection = FullConnection(self._bias, self._hid_layer) self._hid_to_out_connection = FullConnection(self._hid_layer, self._out_layer) self._network.addConnection(self._in_to_hid_connection) self._network.addConnection(self._bias_to_hid_connection) self._network.addConnection(self._hid_to_out_connection) self._recurrent_connection = FullConnection(self._hid_layer, self._hid_layer) self._network.addRecurrentConnection(self._recurrent_connection) self._network.sortModules() self._network.reset() self.offset = self._network.offset self.backprojectionFactor = 0.01
Create an EvolinoNetwork with for sequences of dimension outdim and hiddim dimension of the RNN Layer.
__init__
python
pybrain/pybrain
pybrain/structure/modules/evolinonetwork.py
https://github.com/pybrain/pybrain/blob/master/pybrain/structure/modules/evolinonetwork.py
BSD-3-Clause