repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
kgx
|
kgx-master/tests/integration/test_neo_loader.py
|
import os
import pytest
from kgx.transformer import Transformer
from tests import RESOURCE_DIR, TARGET_DIR
from tests.unit import clean_database
from kgx.config import get_logger
from tests.integration import (
check_container,
CONTAINER_NAME,
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
)
logger = get_logger()
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_csv_to_neo4j_load_to_graph_transform(clean_database):
"""
Test to load a csv KGX file into Neo4j.
"""
logger.debug("test_csv_to_neo4j_load...")
input_args1 = {
"filename": [
os.path.join(RESOURCE_DIR, "cm_nodes.csv"),
os.path.join(RESOURCE_DIR, "cm_edges.csv"),
],
"format": "csv",
}
t1 = Transformer()
t1.transform(input_args1)
output_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
t1.save(output_args)
"""
Continue sequentially to test read from Neo4j to write out back to CSV.
"""
logger.debug("test_neo4j_to_graph_transform")
input_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
output_filename = os.path.join(TARGET_DIR, "neo_graph")
output_args = {"filename": output_filename, "format": "csv"}
t = Transformer()
t.transform(input_args, output_args)
assert t.store.graph.number_of_nodes() == 10
assert t.store.graph.number_of_edges() == 11
assert os.path.exists(f"{output_filename}_nodes.csv")
assert os.path.exists(f"{output_filename}_edges.csv")
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_json_to_neo4j_load_to_graph_transform(clean_database):
"""
Test to load a csv KGX file into Neo4j.
"""
logger.debug("test_json_to_neo4j_load...")
input_args1 = {
"filename": [
os.path.join(RESOURCE_DIR, "json_edges.json")
],
"format": "json",
}
t1 = Transformer()
t1.transform(input_args1)
output_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
t1.save(output_args)
"""
Continue sequentially to test read from Neo4j to write out back to CSV.
"""
logger.debug("test_neo4j_to_graph_transform")
input_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
output_filename = os.path.join(TARGET_DIR, "neo_graph")
output_args = {"filename": output_filename, "format": "csv"}
t = Transformer()
t.transform(input_args, output_args)
assert os.path.exists(f"{output_filename}_nodes.csv")
assert os.path.exists(f"{output_filename}_edges.csv")
| 3,094 | 27.657407 | 78 |
py
|
kgx
|
kgx-master/docs/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# kgx documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 18 16:51:38 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../'))
github_doc_root = 'https://github.com/biolink/kgx/tree/master/docs/'
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx_rtd_theme',
'sphinx_click.ext',
'recommonmark'
]
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'kgx'
author = 'Chris Mungall, Deepak Unni, Kent Shefchek, Lance Hannestad, Richard Bruskiewich, Kenneth Bruskiewicz', 'Sierra Moxon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.7.0'
# The full version, including alpha/beta/rc tags.
release = '1.7.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Activate the theme.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': None,
'style_external_links': True,
'style_nav_header_background': '#54b39c',
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': -1,
'titles_only': False
}
html_show_sourcelink = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'kgxdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'kgx.tex', 'KGX Documentation',
'Chris Mungall, Deepak Unni, Kent Shefchek, Kenneth Bruskiewicz, Lance Hannestad, Richard Bruskiewich', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'KGX', 'KGX Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KGX', 'KGX Documentation',
author, 'kgx', '',
'Miscellaneous'),
]
| 6,156 | 28.88835 | 127 |
py
|
CoreNLP
|
CoreNLP-main/test/regression/test-rte-system.py
|
#!/usr/bin/env python
import os
import re
import time
import random
dataset = "RTE2_dev"
score = 0.0
minScore = 0.60
memory = "-ms2g -mx7g"
# Set up file paths -------------------------------------------------------------
dataDir = "/u/nlp/rte/data/byformat"
tmpDir = "/tmp/rte-regression.%d" % os.getpid()
os.mkdir(tmpDir)
rteFile = "%s/rte/%s.xml" % (dataDir, dataset)
kbeFile = "%s/%s.kbe.xml" % (tmpDir, dataset)
pipelineFile = "%s/%s.pipeline.info.xml" % (tmpDir, dataset)
alignFile = "%s/%s.align.xml" % (tmpDir, dataset)
logFile = "%s/%s.log" % (tmpDir, dataset)
regressionFile = "%s/regression/%s.regression.log" % (dataDir, dataset)
# Make KBE file from RTE file ---------------------------------------------------
def makeKBEFile():
javaclass = "edu.stanford.nlp.util.XMLTransformer"
xsltransformer = "/u/nlp/rte/data/resources/RTE_to_KBEval.xsl"
cmd = "java -server %s %s " % (memory, javaclass) + \
"-in %s " % rteFile + \
"-out %s " % kbeFile + \
"-transform %s " % xsltransformer + \
"> %s 2>&1 " % logFile
# print "cmd is:\n", cmd
os.system(cmd)
# Annotation --------------------------------------------------------------------
def doAnnotation():
javaclass = "edu.stanford.nlp.rte.RTEPipeline"
cmd = "java -server %s %s " % (memory, javaclass) + \
"-kbeIn %s " % kbeFile + \
"-infoOut %s " % pipelineFile + \
"> %s 2>&1 " % logFile
# print "cmd is:\n", cmd
os.system(cmd)
# Alignment & inference ---------------------------------------------------------
def doAlignmentAndInference():
aligner = "stochastic"
javaclass = "edu.stanford.nlp.rte.KBETester"
cmd = "java -server %s %s " % (memory, javaclass) + \
"-info %s " % pipelineFile + \
"-saveAlignments %s " % alignFile + \
"-aligner %s " % aligner + \
"-twoClass " + \
"-balancedData " + \
"-verbose 1 " + \
"> %s 2>&1 " % logFile
# print "cmd is:\n", cmd
os.system(cmd)
# Extract score -----------------------------------------------------------------
def extractScore():
for line in os.popen("grep '^Accuracy:' %s" % logFile):
line = line.strip()
# print line
fields = re.split('\s+', line)
score = float(fields[-1])
return score
# Get previous score ------------------------------------------------------------
def getPreviousScore():
prev = 0.0
for line in os.popen("grep '^PASS' %s" % regressionFile):
line = line.strip()
# print line
fields = re.split('\s+', line)
prev = float(fields[1])
return prev
# Save score --------------------------------------------------------------------
def saveScore(score, minScore, logFile):
if score >= minScore:
result = "PASS"
else:
result = "FAIL"
f = open(regressionFile, "a")
print >>f, \
"%s %.4f %.4f %s %s" % \
(result,
score,
minScore,
time.strftime("%Y%m%d-%H%M%S"),
logFile)
f.close()
# main --------------------------------------------------------------------------
makeKBEFile()
doAnnotation()
doAlignmentAndInference()
# score = random.random()
score = extractScore()
minScore = max(minScore, getPreviousScore())
if score >= minScore:
print "PASS score %.4f >= min %.4f" % (score, minScore)
else:
print "FAIL score %.4f >= min %.4f, output in %s" % (score, minScore, logFile)
saveScore(score, minScore, logFile)
| 3,434 | 25.022727 | 81 |
py
|
CoreNLP
|
CoreNLP-main/scripts/arabic-segmenter/tag_segmentation.py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import sys
import re
from optparse import OptionParser
from utf8utils import uprint, uopen
from edits import get_edits, SEG_MARKER
from output_to_tedeval import is_deleted
REWRITE_MARKER = "REW"
class FalseOptions(object):
def __getattr__(self, name): return False
FALSE_OPTIONS = FalseOptions()
class Accumulator(object):
def __init__(self, callback):
self.callback = callback
self.buffer = []
def add(self, line):
self.buffer.append(line)
def flush(self):
if len(self.buffer) != 0:
self.callback(self.buffer)
self.buffer = []
class Tagger(object):
def __init__(self, tagsfile):
self.tagsfile = tagsfile
self.prevline = None
self.ignored = 0
def __call__(self, words):
tagsline = '\n'
while tagsline == '\n':
tagsline = tagsfile.readline()
tags = get_tags(tagsline)
if len(tags) != len(words):
# print >> sys.stderr, "Number of tags doesn't match number of words"
# print >> sys.stderr, ' previous line: ' + self.prevline
# print >> sys.stderr, (' tags line: %s\n tags: %s\n words: %s' %
# (tagsline, ', '.join(tags), ', '.join(words)))
self.ignored += 1
# raw_input()
return
uprint(' '.join('|||'.join(pair) for pair in zip(words, tags)))
self.prevline = tagsline
def get_tags(line):
tags = []
pairs = [split_pair(token) for token in line.split()]
for pair in pairs:
if not is_deleted(pair[0]):
# Duplicate a tag several times if splitting numbers from
# miscellaneous characters would result in that segment
# turning into several tokens after tokenization.
tags += [pair[1]] * num_number_splits(pair[0])
return tags
def split_pair(token):
pos = token.rfind('|||')
return token[:pos], token[pos + 3:]
NUMBER_BOUNDARY = re.compile(r'(?<=[^0-9 -])(?=[0-9])|(?<=[0-9])(?=[^0-9 -])')
def num_number_splits(segment):
num_boundaries = len(NUMBER_BOUNDARY.findall(segment))
return num_boundaries + 1
def convert(infile, tagsfile):
tagger = Tagger(tagsfile)
accum = Accumulator(tagger)
for line in infile:
segs_norew, segs_rew = convert_line(line)
assert len(segs_norew) == len(segs_rew)
for norew, rew in zip(segs_norew, segs_rew):
accum.add('%s>>>%s' % (norew, rew))
if len(segs_norew) == 0:
accum.flush()
print >> sys.stderr, ('%d sentences ignored.' % tagger.ignored)
def convert_line(line):
if '\t' not in line:
return '', ''
line = line[:-1]
edits = get_edits(line, FALSE_OPTIONS, special_noseg=False)
raw, segmented = line.split('\t')
if edits is None:
norew = rew = segmented
else:
norew, rew = apply_edits(edits, raw)
segs_norew = norew.split(SEG_MARKER)
segs_rew = rew.split(SEG_MARKER)
return (unescape('- -'.join(segs_norew)).split(),
unescape('- -'.join(segs_rew)).split())
def unescape(s):
return (s.replace('#pm#', ':')
.replace('#lp#', '(')
.replace('#rp#', ')'))
def apply_edits(edits, raw):
if len(edits) != len(raw):
print >> sys.stderr, "Number of edits is not equal to number of characters"
print >> sys.stderr, (' word: %s\n edits: %s' %
(raw, ', '.join(edits)))
raise AssertionError
labels = [crf_label(raw[i], edits[i]) for i in range(len(raw))]
norew = ''.join(rewrite_with_label(raw[i], labels[i], False)
for i in range(len(raw)))
rew = ''.join(rewrite_with_label(raw[i], labels[i], True)
for i in range(len(raw)))
return norew, rew
def crf_label(char, edit):
if (edit == u' :+ا ' and char == u'ل'): return 'REW'
elif SEG_MARKER in edit: return 'BEGIN'
elif edit.strip() in (u'ي>ى', u'ت>ة', u'ى>ي', u'ه>ة', u'ة>ه'):
return 'REW'
else: return 'CONT'
def rewrite_with_label(char, label, apply_rewrites):
if label == 'BEGIN': return SEG_MARKER + char
elif label == 'CONT': return char
elif label == 'REW':
if char == u'ل':
return u':ال'
elif apply_rewrites:
if char in u'ته':
return u'ة'
elif char == u'ة':
return u'ه'
elif char == u'ي':
return u'ى'
elif char == u'ى':
return u'ي'
else:
return char
else:
assert False, 'unrecognized label: ' + label
def parse_args():
parser = OptionParser(usage='Usage: %prog <segmentation> <tags>')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Please provide a segmentation file and a tags file.')
return args
if __name__ == '__main__':
files = parse_args()
with uopen(files[0], 'r') as infile, uopen(files[1], 'r') as tagsfile:
convert(infile, tagsfile)
| 4,688 | 25.642045 | 79 |
py
|
CoreNLP
|
CoreNLP-main/scripts/arabic-segmenter/edits.py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import re
from utf8utils import uprint
NOSEG = '<noseg>'
SEG_MARKER = ':'
SEG = ' %s ' % SEG_MARKER
LONG_VOWELS = u'ايوى'
ALIFS = u'اأإٱآ'
HAAS = u'هح'
def get_edits(line, options, special_noseg=True):
if '\t' not in line:
if options.verbose:
uprint("ignoring line that doesn't have two parts:")
uprint(' ' + repr(line))
return
raw, seg = line.split('\t')
# Special cases:
# - an odd edit with no segmentations [e.g. ع -> على]
if special_noseg and raw != seg and SEG_MARKER not in seg:
return [u'<other>'] * len(raw)
# - token deleted
if seg == '':
return [u' <del> '] * len(raw)
# - nothing on the raw side
if raw == '':
if options.verbose:
uprint("ignoring line with empty raw text:")
uprint(' ' + repr(line))
return
edits = []
last_raw = ''
last_seg = ''
while len(raw) != 0:
# Possible edits, in order that they are searched for:
# :+Al // li + definite article + word starting with l
if raw.endswith(u'لل') and seg.endswith(u'ل%sالل' % SEG_MARKER):
edits.append(u' %s+ال' % SEG_MARKER)
seg = seg[:-3]
# +A:+A // mA + A... verbal negation spelled as just m
elif is_ma_alif(seg, raw):
edits.append(u' +ا%s+ا ' % SEG_MARKER)
seg = seg[:-3]
# x:x // shadda breaking: character duplicated on either side of
# segmentation
# x>xx // shadda breaking: character duplicated, no segmentation
elif is_shadda(seg, raw):
if seg.endswith(SEG_MARKER + raw[-1]):
edits.append(u' x:x ')
seg = seg[:-2]
else:
assert seg.endswith(raw[-1] * 2), repr(seg + '\t' + raw)
edits.append(u' x>xx ')
seg = seg[:-1]
# :+x // added an letter after segmentation (alif for
# li + definite article, noon for recovered first person
# prefix or y -> ny in dialect)
elif is_seg_plus(seg, raw):
edits.append(u' %s+%s ' % (SEG_MARKER, seg[-2]))
seg = seg[:-2]
# +x: // added a letter before segmentation (usually noon, for
# plurals, mim~A, Al~A, etc.)
elif is_plus_seg(seg, raw):
edits.append(u' +%s%s ' % (seg[-3], SEG_MARKER))
seg = seg[:-2]
# <del> // deleted lengthening effect (yAAAAAA -> yA)
elif is_lengthening(seg, raw, last_raw):
edits.append(u' <del> ')
seg += u' '
# : // ordinary segmentation boundary
elif seg.endswith(SEG_MARKER + raw[-1]):
edits.append(SEG)
seg = seg[:-1]
# <noseg> // character doesn't change, no segmentation added
elif len(seg) != 0 and seg[-1] == raw[-1]:
edits.append(NOSEG)
# <other> // normalized E or El to ElY
elif is_alaa_normalization(seg, raw):
edits.append(u'<other>')
seg = seg[:-2]
if raw[-1] != u'ع':
assert raw[-2] == u'ع'
seg = seg + ' '
# +V: // added a long vowel (verbal or dialect -wA ending, jussive
# normalization)
elif len(seg) >= 2 and seg[-2] == raw[-1] and seg[-1] in LONG_VOWELS:
if len(seg) >= 3 and seg[-3] == SEG_MARKER:
edits.append(u' %s+%s ' % (SEG_MARKER, seg[-1]))
seg = seg[:-2]
else:
edits.append(u' +%s ' % seg[-1])
seg = seg[:-1]
# y:+h // recover dialectal silent haa after segmentation
elif seg.endswith(u'ي' + SEG_MARKER + u'ه') and raw.endswith(u'ي'):
edits.append(u' ي%s+ه ' % SEG_MARKER)
seg = seg[:-2]
# <del> // deleted a long vowel (dialect ending normalization: mostly
# -kwA -> -kw and -kY -> -k) or dialectal silent haa
elif (len(raw) >= 2 and norm_endswith(seg, raw[-2], HAAS) and
raw[-1] in LONG_VOWELS + u'ه'):
edits.append(u' <del> ')
seg += u' '
# <del> // deleted diacritic
elif is_diacritic(raw[-1]):
edits.append(u' <del> ')
seg += u' '
# x>y: // change x to y after a segment boundary
elif (len(seg) >= 2 and seg[-2] == SEG_MARKER and
is_common_rewrite(seg, raw)):
edits.append(u' %s%s>%s ' % (SEG_MARKER, raw[-1], seg[-1]))
seg = seg[:-1]
# x>y // change x to y without a segmentation (orthography
# normalization)
elif is_common_rewrite(seg, raw):
edits.append(u' %s>%s ' % (raw[-1], seg[-1]))
else:
if options.verbose:
uprint('ignoring line with unknown edit:')
uprint(' ' + line)
uprint('(seg = %s; raw = %s)' % (seg, raw))
uprint('(edits = %s)' % edits)
return
last_raw = raw[-1]
seg = seg[:-1]
last_seg = raw[-1]
raw = raw[:-1]
if len(seg) != 0:
if options.verbose:
uprint('ignoring line with unknown edit:')
uprint(' ' + line)
uprint('(extra seg: %s)' % seg)
uprint('(edits = %s)' % edits)
return
edits.reverse()
return edits
def is_ma_alif(seg, raw):
return (len(seg) >= 5 and len(raw) >= 2 and
is_common_rewrite(seg[-1], raw[-1]) and
raw[-2] == u'م' and
seg[-5:-1] == u'ما%sا' % SEG_MARKER)
def is_seg_plus(seg, raw):
return (len(seg) >= 4 and len(raw) >= 2 and
is_common_rewrite(seg[-1], raw[-1]) and
seg[-2] != raw[-2] and
seg[-2] in u'اني' and
seg[-3] == SEG_MARKER and
is_common_rewrite(seg[-4], raw[-2]))
def is_plus_seg(seg, raw):
return (len(seg) >= 4 and len(raw) >= 2 and
is_common_rewrite(seg[-1], raw[-1]) and
seg[-2] == SEG_MARKER and
seg[-3] != raw[-2] and
seg[-3] in u'ان' and
is_common_rewrite(seg[-4], raw[-2]))
def is_shadda(seg, raw):
seg = seg.replace(SEG_MARKER, '')
if len(raw) == 0 or not seg.endswith(raw[-1]):
return False
last = seg[-1]
for i in range(2, min(len(seg) + 1, len(raw) + 1)):
if seg[-i] != last: return False
if seg[-i] != raw[-i]: return True
# equal through the min of the two lengths, so check if it's
# a beginning-of-word shadda
return seg == raw[-1] + raw
def is_lengthening(seg, raw, last):
seg = seg.replace(SEG_MARKER, '')
if len(raw) < 2 or len(seg) == 0: return False
if raw[-1] != raw[-2]: return False
if raw[-1] != seg[-1]: return False
if len(seg) >= 2 and raw[-1] == seg[-2]: return False
return True
DIACRITIC = re.compile(ur'[~_\u0640\u064b-\u065e\u0670]')
# tatweel dagger alif
# most diacritics
def is_diacritic(char):
return DIACRITIC.match(char) is not None
COMMON_REWRITES = [
u'تة', # recovered taa marbuta
u'يىئ', # normalized Egyptian yaa
u'وؤ', # normalized waw hamza
u'هةو', # normalized 3sg ending
HAAS, # normalized future particle
ALIFS, # normalized alifs
u'اأإئؤقءي', # normalized various hamzas (written or spoken)
u'ىهةا', # normalized words ending in /a/ sound
u'تثط', # normalized letters pronounced /t/
u'دذضظ', # normalized letters pronounced /d/
u'سص', # normalized letters pronounced /s/
u'زذظ', # normalized letters pronounced /z/
u'?–,،؟', # normalized punctuation
]
def is_common_rewrite(seg, raw):
if len(seg) == 0 or len(raw) == 0: return False
if seg == raw: return True
for group in COMMON_REWRITES:
if seg[-1] in group and raw[-1] in group:
return True
return False
def is_alaa_normalization(seg, raw):
return ((raw.endswith(u'ع') or raw.endswith(u'عل')) and
seg.endswith(u'على'))
def norm_endswith(str, target_ending, norm_group):
'''
Return True if `str` ends with `target_ending`, ignoring differences
between characters in `norm_group`. Otherwise return False.
'''
if len(str) < len(target_ending): return False
source_ending = str[-len(target_ending):]
assert len(source_ending) == len(target_ending)
for s, t in zip(source_ending, target_ending):
if s != t and (s not in norm_group or t not in norm_group):
return False
return True
| 8,790 | 34.735772 | 79 |
py
|
CoreNLP
|
CoreNLP-main/scripts/arabic-segmenter/output_to_tedeval.py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import sys
import codecs
import re
def convert(untok_filename, tok_filename):
with uopen(untok_filename, 'r') as input, \
uopen(tok_filename, 'r') as output, \
uopen(tok_filename + '.segmentation', 'w') as seg, \
uopen(tok_filename + '.ftree', 'w') as tree:
convert_files(input, output, seg, tree)
def get_filenames(argv):
if len(argv) != 3:
print 'Usage: %s <untok> <tok>' % argv[0]
print ' where'
print ' <untok> is the untokenized input file that was fed to the segmenter'
print ' <tok> is the existing segmenter output file'
print ' <tok>.segmentation will be the generated TEDEval seg file'
print ' <tok>.ftree will be the generated TEDEval tree file'
exit(1)
return argv[1], argv[2]
def uopen(filename, mode):
return codecs.open(filename, mode, encoding='utf-8')
def convert_files(input, output, seg, tree):
for input_line, output_line in zip(input, output):
process_line(input_line, output_line, seg, tree)
def process_line(input_line, output_line, seg, tree):
tree.write('(root')
input_words = sanitize(input_line).split(' ')
output_words = merge_segments(output_line).split(' ')
input_words = filter_deletions(input_words)
output_words = filter_deletions(output_words)
assert len(input_words) == len(output_words), str((input_line, output_line, input_words, output_words))
for input_word, output_word in zip(input_words, output_words):
for segment in output_word.split(':'):
tree.write(' (seg %s)' % segment)
seg.write('%s\t%s\n' % (input_word, output_word))
seg.write('\n')
tree.write(')\n')
def filter_deletions(words):
'''
Some tokens (ones consisting solely of a diacritic or tatweel) are deleted
by one or both segmenters. This deletes all such tokens from the output to
try to balance out the sentence.
'''
return [word for word in words if not is_deleted(word)]
def is_deleted(word):
return re.match(u'^[~_\u0640\u064b-\u065e\u0670]*$', word) is not None
# tatweel dagger alif
# most diacritics
def merge_segments(line):
return re.sub(r'\$(\w+)\$', r'#\1#',
re.sub(r'\(', r'#lp#',
re.sub(r'\)', r'#rp#',
re.sub(r'([^ ])# ', r'\1:',
re.sub(r' \+([^ ])', r':\1',
re.sub(r'([^ ])# \+([^ ])', r'\1:\2',
re.sub(r':', r'$pm$',
re.sub(r'#(\w+)#', r'$\1$',
line[:-1]))))))))
def sanitize(line):
return re.sub(r'\(', r'#lp#',
re.sub(r'\)', r'#rp#',
re.sub(r':', r'#pm#',
line[:-1])))
if __name__ == '__main__':
untok, tok = get_filenames(sys.argv)
convert(untok, tok)
| 2,788 | 34.303797 | 105 |
py
|
CoreNLP
|
CoreNLP-main/scripts/arabic-segmenter/utf8utils.py
|
#!/usr/bin/env python2.7
import codecs
def uopen(filename, mode):
return codecs.open(filename, mode, encoding='utf-8')
def uprint(text):
print(text.encode('utf-8'))
| 178 | 13.916667 | 56 |
py
|
CoreNLP
|
CoreNLP-main/scripts/pos-tagger/generate_currency_pos_examples.py
|
#!/u/nlp/packages/anaconda/bin/python
from random import randint
def print_examples_for_currency_symbol(curr_symbol):
# $14.20
random_small_price = str(randint(0,99))+"."+str(randint(0,9))+str(randint(0,9))
print("%s_$ %s_CD" % (curr_symbol, random_small_price))
# 14.20$
random_small_price = str(randint(0,99))+"."+str(randint(0,9))+str(randint(0,9))
print("%s_CD %s_$" % (random_small_price, curr_symbol))
# $2.14
random_small_price = str(randint(0,9))+"."+str(randint(0,9))+str(randint(0,9))
print("%s_$ %s_CD" % (curr_symbol, random_small_price))
# $2.14
random_small_price = str(randint(0,9))+"."+str(randint(0,9))+str(randint(0,9))
print("%s_CD %s_$" % (random_small_price, curr_symbol))
# $10
print("%s_$ 10_CD" % curr_symbol)
# 10$
print("10_CD %s_$" % curr_symbol)
# random $XXXX
random_four_digit = randint(1000,9999)
print("%s_$ %s_CD" % (curr_symbol, str(random_four_digit)))
# random XXXX$
random_four_digit = randint(1000,9999)
print("%s_$ %s_CD" % (curr_symbol, str(random_four_digit)))
# random $XXXX
random_four_digit = randint(1000,9999)
print("%s_CD %s_$" % (str(random_four_digit), curr_symbol))
# random XXXX$
random_four_digit = randint(1000,9999)
print("%s_CD %s_$" % (str(random_four_digit), curr_symbol))
# $500
print("%s_$ 500_CD" % curr_symbol)
# $50.00
print("%s_$ 50.00_CD" % curr_symbol)
# 50.00$
print("50.00_CD %s_$" % curr_symbol)
# $50
print("%s_$ 50_CD" % curr_symbol)
# 50$
print("50_CD %s_$" % curr_symbol)
# $1.00
print("%s_$ 1.00_CD" % curr_symbol)
# 1.00$
print("1.00_CD %s_$" % curr_symbol)
# $1,000
print("%s_$ 1,000_CD" % curr_symbol)
# 1,000$
print("1,000_CD %s_$" % curr_symbol)
# $1000000
print("%s_$ 1000000_CD" % curr_symbol)
# $1,000,000
print("%s_$ 1,000,000_CD" % curr_symbol)
# 1000000$
print("1000000000_CD %s_$" % curr_symbol)
# 1,000,000$
print("1,000,000,000_CD %s_$" % curr_symbol)
# $1000000
print("%s_$ 1000000000_CD" % curr_symbol)
# $1,000,000
print("%s_$ 1,000,000,000_CD" % curr_symbol)
# 1000000$
print("1000000000_CD %s_$" % curr_symbol)
# 1,000,000$
print("1,000,000,000_CD %s_$" % curr_symbol)
currency_chars = ["¥", "£", "€", "₹", "₪", "₽", "₩", "¢"]
for curr_char in currency_chars:
print_examples_for_currency_symbol(curr_char)
| 2,454 | 32.630137 | 83 |
py
|
CoreNLP
|
CoreNLP-main/scripts/chinese-segmenter/ctb9_seg_data.py
|
"""
This script extracts segmentation data from ctb9 in some various hardcoded ways.
For example, each possible file class was individually parsed.
Train/test split is chosen based on the advice given in the readme.
There is no suggested dev split and the test split is quite small, actually.
The results of using this script and some models can be found
in /u/nlp/data/chinese/ctb9, at least as of 2020-01-16.
Models can be built with the make script hopefully still located in
projects/core/scripts/chinese-segmenter/Makefile
A model can be tested with a command line such as:
java edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier /u/nlp/data/chinese/ctb9/seg/ctb9.train.chris6.ser.gz -testFile /u/nlp/data/chinese/ctb9/seg/ctb9.test.txt -serDictionary /u/nlp/data/chinese/ctb9/seg/dict-chris6.ser.gz > seg9.out 2>&1
"""
import glob
import re
def parse_xml(filename, lines):
new_lines = []
for i, line in enumerate(lines[7:]):
line = line.strip()
if line.startswith('<S ID') or line.startswith('<ENDTIME>') or line.startswith('<END_TIME>'):
continue
if (line == '</S>' or line == '<HEADLINE>' or line == '</HEADLINE>' or
line == '<TEXT>' or line == '</TEXT>' or line == '</BODY>' or
line == '<P>' or line == '</P>' or line == '</DOC>' or
line == '<TURN>' or line == '</TURN>'):
continue
if line[0] == '<':
raise ValueError("Unexpected XML tag in %s line %d: %s" % (filename, (i+7), line))
new_lines.append(line)
return new_lines
# p1su1 occurs in /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_5000.df.seg
# 13suid= occurs in /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_5200.df.seg
# headline_su1 occurs in /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_5336.df.seg
# psu1 occurs in /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_5363.df.seg
# hesu1 occurs in /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_5459.df.seg
# s1 occurs in /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_6000.sc.seg
SU_PATTERN = re.compile("(?:<su id|13suid)=(?:[0-9]+[A-B]|p[0-9]*su[0-9]+|headline_su[0-9]+|hesu[0-9]+|s[0-9]+)(?:>?)")
def parse_su(filename, lines):
new_lines = []
for i, line in enumerate(lines):
line = line.strip()
if SU_PATTERN.match(line):
continue
if line[0] == '<':
raise ValueError("Unexpected XML tag in %s line %d: %s" % (filename, (i+7), line))
new_lines.append(line)
return new_lines
SEG_PATTERN = re.compile('<seg id="[0-9]+">')
def parse_seg(filename, lines):
new_lines = []
for i, line in enumerate(lines):
line = line.strip()
if SEG_PATTERN.match(line) or line == '</seg>':
continue
if line == '< HEADLINE >' or line == '< DOC >':
continue
if line[0] == '<':
raise ValueError("Unexpected XML tag in %s line %d: %s" % (filename, (i+7), line))
new_lines.append(line)
return new_lines
SEGMENT_PATTERN = re.compile('<segment id="[0-9]+" .+>')
def parse_segment(filename, lines):
new_lines = []
for i, line in enumerate(lines):
line = line.strip()
if SEGMENT_PATTERN.match(line) or line == '</segment>':
continue
if line[0] == '<':
raise ValueError("Unexpected XML tag in %s line %d: %s" % (filename, (i+7), line))
new_lines.append(line)
return new_lines
MSG_PATTERN = re.compile('<msg id=s[0-9]+m[0-9]+.*>')
def parse_msg(filename, lines):
new_lines = []
for i, line in enumerate(lines):
line = line.strip()
if MSG_PATTERN.match(line):
continue
if line[0] == '<':
raise ValueError("Unexpected XML tag in %s line %d: %s" % (filename, (i+7), line))
new_lines.append(line)
return new_lines
def parse_raw(filename, lines):
new_lines = []
for i, line in enumerate(lines):
if line.startswith('< QUOTEPREVIOUSPOST') or line.startswith('< QUOTE PREVIOUSPOST'):
continue
if line[0] == '<':
raise ValueError("Unexpected XML tag in %s line %d: %s" % (filename, (i+7), line))
new_lines.append(line)
return new_lines
def read_file(filename):
lines = open(filename).readlines()
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_0050.nw.seg
if (lines[0].strip() == '<DOC>' and
lines[1].startswith('<DOCID>') and
lines[2].strip() == '<HEADER>' and
lines[5].strip() == '<BODY>'):
return parse_xml(filename, lines)
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_3046.bn.seg
if (lines[0].strip() == '<DOC>' and
(lines[1].startswith('<DOCID>') or lines[1].startswith('<DOCNO>')) and
lines[2].startswith('<DOCTYPE') and
lines[4].strip() == '<BODY>'):
return parse_xml(filename, lines)
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_7000.cs.seg
if SU_PATTERN.match(lines[0].strip()):
return parse_su(filename, lines)
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_4000.nw.seg
if SEG_PATTERN.match(lines[0].strip()):
return parse_seg(filename, lines)
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_4051.bn.seg
if SEGMENT_PATTERN.match(lines[0].strip()):
return parse_segment(filename, lines)
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_6006.sc.seg
# <msg id=s0m0000>
if MSG_PATTERN.match(lines[0].strip()):
return parse_msg(filename, lines)
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/chtb_4009.nw.seg
return parse_raw(filename, lines)
# raise ValueError("Unknown format: " + filename)
TEST_FILES = [1018, 1020, 1036, 1044, 1060, 1061, 1072, 1118, 1119, 1132, 1141, 1142, 1148]
# TODO: can extract this list directly from
# /u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/docs/ctb9.0-file-list.txt
# there's also dev file names there
def is_test_file(filenum):
if filenum in TEST_FILES:
return True
if filenum >= 1 and filenum <= 43:
return True
if filenum >= 144 and filenum <= 169:
return True
if filenum >= 900 and filenum <= 931:
return True
return False
def output_file(lines, filename):
repeats = set()
with open(filename, 'w') as fout:
for line in lines:
if line in repeats:
continue
fout.write('%s\n' % line)
repeats.add(line)
filters = [re.compile("p([.]?) [0-9]+"),
re.compile("[0-9]+ [/] [0-9.]+")]
def filter_bad_lines(lines):
"""
Filters some of the more common, essentially useless lines:
p. 55
2000 / 15
"""
lines = [x for x in lines if min(f.match(x) is None for f in filters)]
return lines
def main():
train_data = []
test_data = []
files = sorted(glob.glob('/u/scr/corpora/ldc/2016/LDC2016T13/ctb9.0/data/segmented/*.seg'))
for filename in files:
filenum = int(filename.split("_")[-1].split(".")[0])
new_lines = read_file(filename)
if is_test_file(filenum):
test_data.extend(new_lines)
else:
train_data.extend(new_lines)
output_file(filter_bad_lines(train_data), 'ctb9_train.txt')
output_file(filter_bad_lines(test_data), 'ctb9_test.txt')
if __name__ == '__main__':
main()
| 7,564 | 36.450495 | 245 |
py
|
CoreNLP
|
CoreNLP-main/scripts/truecase/pick_text.py
|
import argparse
import glob
import random
"""
After a wikiextractor step, this script picks num_lines lines
randomly, with equal probability, from all the lines extracted from
wikipedia.
"""
def parse_args():
parser = argparse.ArgumentParser(description='Turn the output of wikiextractor into lines which can be tokenized')
parser.add_argument('--path', default='text',
help='Where to find the output of wikiextractor')
parser.add_argument('--num_lines', type=int, default=2000000,
help='Number of lines to keep')
parser.add_argument('--output', default='wiki.raw.txt',
help='Where to output text')
args = parser.parse_args()
return args
def main():
args = parse_args()
text = []
files = glob.glob('%s/*/wiki*' % args.path)
total_seen = 0
for infile in files:
with open(infile) as fin:
for line in fin.readlines():
line.replace("<br>", " ")
line = line.strip()
if not line:
continue
if line.startswith("<"):
continue
if (line.count("|") > 5 or line.count(",") > 20 or
line.count(";") > 10 or line.count(":") > 10 or
line.count("•") > 5 or line.count("-") > 10):
# skip some random lists etc
continue
total_seen = total_seen + 1
if len(text) < args.num_lines:
text.append(line)
elif random.random() < args.num_lines / total_seen:
# randomly skip lines so lines have an equal
# probability of being accepted
index = random.randint(0, args.num_lines - 1)
text[index] = line
with open(args.output, 'w') as fout:
for line in text:
fout.write(line)
fout.write('\n\n')
if __name__ == "__main__":
main()
| 2,045 | 32.540984 | 118 |
py
|
CoreNLP
|
CoreNLP-main/data/edu/stanford/nlp/kbp/zh/semgrex/dump2depparse.py
|
import sys
import re
line = sys.stdin.readline()
while True:
line = line.strip()
if len(line) == 0:
break
if line[0] == '!':
continue
line = re.sub(' +', '\t', line)
line = line.split('\t')
gloss = line[0].split('/')
deps = [t.split('\\t') for t in line[1].split('\\n')]
gloss1 = ['root'] + gloss
for t in deps:
tgt = int(t[0])
src = int(t[1])
type_ = t[2]
sys.stdout.write("%s(%d-%s, %d-%s)\n" % (type_, src, gloss1[src], tgt, gloss1[tgt]))
sys.stdout.write('\n')
line = sys.stdin.readline()
| 592 | 19.448276 | 92 |
py
|
coderec_programming_states
|
coderec_programming_states-main/user_study_analysis/colorblind_colors.py
|
from matplotlib.colors import LinearSegmentedColormap, to_rgba_array
def discretemap(colormap, hexclrs):
"""
Produce a colormap from a list of discrete colors without interpolation.
"""
clrs = to_rgba_array(hexclrs)
clrs = np.vstack([clrs[0], clrs, clrs[-1]])
cdict = {}
for ki, key in enumerate(('red','green','blue')):
cdict[key] = [ (i/(len(clrs)-2.), clrs[i, ki], clrs[i+1, ki]) for i in range(len(clrs)-1) ]
return LinearSegmentedColormap(colormap, cdict)
class TOLcmaps(object):
"""
Class TOLcmaps definition.
"""
def __init__(self):
"""
"""
self.cmap = None
self.cname = None
self.namelist = (
'sunset_discrete', 'sunset', 'BuRd_discrete', 'BuRd',
'PRGn_discrete', 'PRGn', 'YlOrBr_discrete', 'YlOrBr', 'WhOrBr',
'iridescent', 'rainbow_PuRd', 'rainbow_PuBr', 'rainbow_WhRd',
'rainbow_WhBr', 'rainbow_discrete')
self.funcdict = dict(
zip(self.namelist,
(self.__sunset_discrete, self.__sunset, self.__BuRd_discrete,
self.__BuRd, self.__PRGn_discrete, self.__PRGn,
self.__YlOrBr_discrete, self.__YlOrBr, self.__WhOrBr,
self.__iridescent, self.__rainbow_PuRd, self.__rainbow_PuBr,
self.__rainbow_WhRd, self.__rainbow_WhBr,
self.__rainbow_discrete)))
def __sunset_discrete(self):
"""
Define colormap 'sunset_discrete'.
"""
clrs = ['#364B9A', '#4A7BB7', '#6EA6CD', '#98CAE1', '#C2E4EF',
'#EAECCC', '#FEDA8B', '#FDB366', '#F67E4B', '#DD3D2D',
'#A50026']
self.cmap = discretemap(self.cname, clrs)
self.cmap.set_bad('#FFFFFF')
def __sunset(self):
"""
Define colormap 'sunset'.
"""
clrs = ['#364B9A', '#4A7BB7', '#6EA6CD', '#98CAE1', '#C2E4EF',
'#EAECCC', '#FEDA8B', '#FDB366', '#F67E4B', '#DD3D2D',
'#A50026']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#FFFFFF')
def __BuRd_discrete(self):
"""
Define colormap 'BuRd_discrete'.
"""
clrs = ['#2166AC', '#4393C3', '#92C5DE', '#D1E5F0', '#F7F7F7',
'#FDDBC7', '#F4A582', '#D6604D', '#B2182B']
self.cmap = discretemap(self.cname, clrs)
self.cmap.set_bad('#FFEE99')
def __BuRd(self):
"""
Define colormap 'BuRd'.
"""
clrs = ['#2166AC', '#4393C3', '#92C5DE', '#D1E5F0', '#F7F7F7',
'#FDDBC7', '#F4A582', '#D6604D', '#B2182B']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#FFEE99')
def __PRGn_discrete(self):
"""
Define colormap 'PRGn_discrete'.
"""
clrs = ['#762A83', '#9970AB', '#C2A5CF', '#E7D4E8', '#F7F7F7',
'#D9F0D3', '#ACD39E', '#5AAE61', '#1B7837']
self.cmap = discretemap(self.cname, clrs)
self.cmap.set_bad('#FFEE99')
def __PRGn(self):
"""
Define colormap 'PRGn'.
"""
clrs = ['#762A83', '#9970AB', '#C2A5CF', '#E7D4E8', '#F7F7F7',
'#D9F0D3', '#ACD39E', '#5AAE61', '#1B7837']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#FFEE99')
def __YlOrBr_discrete(self):
"""
Define colormap 'YlOrBr_discrete'.
"""
clrs = ['#FFFFE5', '#FFF7BC', '#FEE391', '#FEC44F', '#FB9A29',
'#EC7014', '#CC4C02', '#993404', '#662506']
self.cmap = discretemap(self.cname, clrs)
self.cmap.set_bad('#888888')
def __YlOrBr(self):
"""
Define colormap 'YlOrBr'.
"""
clrs = ['#FFFFE5', '#FFF7BC', '#FEE391', '#FEC44F', '#FB9A29',
'#EC7014', '#CC4C02', '#993404', '#662506']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#888888')
def __WhOrBr(self):
"""
Define colormap 'WhOrBr'.
"""
clrs = ['#FFFFFF', '#FFF7BC', '#FEE391', '#FEC44F', '#FB9A29',
'#EC7014', '#CC4C02', '#993404', '#662506']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#888888')
def __iridescent(self):
"""
Define colormap 'iridescent'.
"""
clrs = ['#FEFBE9', '#FCF7D5', '#F5F3C1', '#EAF0B5', '#DDECBF',
'#D0E7CA', '#C2E3D2', '#B5DDD8', '#A8D8DC', '#9BD2E1',
'#8DCBE4', '#81C4E7', '#7BBCE7', '#7EB2E4', '#88A5DD',
'#9398D2', '#9B8AC4', '#9D7DB2', '#9A709E', '#906388',
'#805770', '#684957', '#46353A']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#999999')
def __rainbow_PuRd(self):
"""
Define colormap 'rainbow_PuRd'.
"""
clrs = ['#6F4C9B', '#6059A9', '#5568B8', '#4E79C5', '#4D8AC6',
'#4E96BC', '#549EB3', '#59A5A9', '#60AB9E', '#69B190',
'#77B77D', '#8CBC68', '#A6BE54', '#BEBC48', '#D1B541',
'#DDAA3C', '#E49C39', '#E78C35', '#E67932', '#E4632D',
'#DF4828', '#DA2222']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#FFFFFF')
def __rainbow_PuBr(self):
"""
Define colormap 'rainbow_PuBr'.
"""
clrs = ['#6F4C9B', '#6059A9', '#5568B8', '#4E79C5', '#4D8AC6',
'#4E96BC', '#549EB3', '#59A5A9', '#60AB9E', '#69B190',
'#77B77D', '#8CBC68', '#A6BE54', '#BEBC48', '#D1B541',
'#DDAA3C', '#E49C39', '#E78C35', '#E67932', '#E4632D',
'#DF4828', '#DA2222', '#B8221E', '#95211B', '#721E17',
'#521A13']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#FFFFFF')
def __rainbow_WhRd(self):
"""
Define colormap 'rainbow_WhRd'.
"""
clrs = ['#E8ECFB', '#DDD8EF', '#D1C1E1', '#C3A8D1', '#B58FC2',
'#A778B4', '#9B62A7', '#8C4E99', '#6F4C9B', '#6059A9',
'#5568B8', '#4E79C5', '#4D8AC6', '#4E96BC', '#549EB3',
'#59A5A9', '#60AB9E', '#69B190', '#77B77D', '#8CBC68',
'#A6BE54', '#BEBC48', '#D1B541', '#DDAA3C', '#E49C39',
'#E78C35', '#E67932', '#E4632D', '#DF4828', '#DA2222']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#666666')
def __rainbow_WhBr(self):
"""
Define colormap 'rainbow_WhBr'.
"""
clrs = ['#E8ECFB', '#DDD8EF', '#D1C1E1', '#C3A8D1', '#B58FC2',
'#A778B4', '#9B62A7', '#8C4E99', '#6F4C9B', '#6059A9',
'#5568B8', '#4E79C5', '#4D8AC6', '#4E96BC', '#549EB3',
'#59A5A9', '#60AB9E', '#69B190', '#77B77D', '#8CBC68',
'#A6BE54', '#BEBC48', '#D1B541', '#DDAA3C', '#E49C39',
'#E78C35', '#E67932', '#E4632D', '#DF4828', '#DA2222',
'#B8221E', '#95211B', '#721E17', '#521A13']
self.cmap = LinearSegmentedColormap.from_list(self.cname, clrs)
self.cmap.set_bad('#666666')
def __rainbow_discrete(self, lut=None):
"""
Define colormap 'rainbow_discrete'.
"""
clrs = ['#E8ECFB', '#D9CCE3', '#D1BBD7', '#CAACCB', '#BA8DB4',
'#AE76A3', '#AA6F9E', '#994F88', '#882E72', '#1965B0',
'#437DBF', '#5289C7', '#6195CF', '#7BAFDE', '#4EB265',
'#90C987', '#CAE0AB', '#F7F056', '#F7CB45', '#F6C141',
'#F4A736', '#F1932D', '#EE8026', '#E8601C', '#E65518',
'#DC050C', '#A5170E', '#72190E', '#42150A']
indexes = [[9], [9, 25], [9, 17, 25], [9, 14, 17, 25], [9, 13, 14, 17,
25], [9, 13, 14, 16, 17, 25], [8, 9, 13, 14, 16, 17, 25], [8,
9, 13, 14, 16, 17, 22, 25], [8, 9, 13, 14, 16, 17, 22, 25, 27],
[8, 9, 13, 14, 16, 17, 20, 23, 25, 27], [8, 9, 11, 13, 14, 16,
17, 20, 23, 25, 27], [2, 5, 8, 9, 11, 13, 14, 16, 17, 20, 23,
25], [2, 5, 8, 9, 11, 13, 14, 15, 16, 17, 20, 23, 25], [2, 5,
8, 9, 11, 13, 14, 15, 16, 17, 19, 21, 23, 25], [2, 5, 8, 9, 11,
13, 14, 15, 16, 17, 19, 21, 23, 25, 27], [2, 4, 6, 8, 9, 11,
13, 14, 15, 16, 17, 19, 21, 23, 25, 27], [2, 4, 6, 7, 8, 9, 11,
13, 14, 15, 16, 17, 19, 21, 23, 25, 27], [2, 4, 6, 7, 8, 9, 11,
13, 14, 15, 16, 17, 19, 21, 23, 25, 26, 27], [1, 3, 4, 6, 7, 8,
9, 11, 13, 14, 15, 16, 17, 19, 21, 23, 25, 26, 27], [1, 3, 4,
6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 19, 21, 23, 25, 26,
27], [1, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 20,
22, 24, 25, 26, 27], [1, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15,
16, 17, 18, 20, 22, 24, 25, 26, 27, 28], [0, 1, 3, 4, 6, 7, 8,
9, 10, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 25, 26, 27, 28]]
if lut is None or lut < 1 or lut > 23:
lut = 22
self.cmap = discretemap(self.cname, [ clrs[i] for i in indexes[lut-1] ])
if lut == 23:
self.cmap.set_bad('#777777')
else:
self.cmap.set_bad('#FFFFFF')
def show(self):
"""
List names of defined colormaps.
"""
print(' '.join(repr(n) for n in self.namelist))
def get(self, cname='rainbow_PuRd', lut=None):
"""
Return requested colormap, default is 'rainbow_PuRd'.
"""
self.cname = cname
if cname == 'rainbow_discrete':
self.__rainbow_discrete(lut)
else:
self.funcdict[cname]()
return self.cmap
def tol_cmap(colormap=None, lut=None):
"""
Continuous and discrete color sets for ordered data.
Return a matplotlib colormap.
Parameter lut is ignored for all colormaps except 'rainbow_discrete'.
"""
obj = TOLcmaps()
if colormap is None:
return obj.namelist
if colormap not in obj.namelist:
colormap = 'rainbow_PuRd'
print('*** Warning: requested colormap not defined,',
'known colormaps are {}.'.format(obj.namelist),
'Using {}.'.format(colormap))
return obj.get(colormap, lut)
def tol_cset(colorset=None):
"""
Discrete color sets for qualitative data.
Define a namedtuple instance with the colors.
Examples for: cset = tol_cset(<scheme>)
- cset.red and cset[1] give the same color (in default 'bright' colorset)
- cset._fields gives a tuple with all color names
- list(cset) gives a list with all colors
"""
from collections import namedtuple
namelist = ('bright', 'high-contrast', 'vibrant', 'muted', 'medium-contrast', 'light')
if colorset is None:
return namelist
if colorset not in namelist:
colorset = 'bright'
print('*** Warning: requested colorset not defined,',
'known colorsets are {}.'.format(namelist),
'Using {}.'.format(colorset))
if colorset == 'bright':
cset = namedtuple('Bcset',
'blue red green yellow cyan purple grey black')
return cset('#4477AA', '#EE6677', '#228833', '#CCBB44', '#66CCEE',
'#AA3377', '#BBBBBB', '#000000')
if colorset == 'high-contrast':
cset = namedtuple('Hcset',
'blue yellow red black')
return cset('#004488', '#DDAA33', '#BB5566', '#000000')
if colorset == 'vibrant':
cset = namedtuple('Vcset',
'orange blue cyan magenta red teal grey black')
return cset('#EE7733', '#0077BB', '#33BBEE', '#EE3377', '#CC3311',
'#009988', '#BBBBBB', '#000000')
if colorset == 'muted':
cset = namedtuple('Mcset',
'rose indigo sand green cyan wine teal olive purple pale_grey black')
return cset('#CC6677', '#332288', '#DDCC77', '#117733', '#88CCEE',
'#882255', '#44AA99', '#999933', '#AA4499', '#DDDDDD',
'#000000')
if colorset == 'medium-contrast':
cset = namedtuple('Mcset',
'light_blue dark_blue light_yellow dark_red dark_yellow light_red black')
return cset('#6699CC', '#004488', '#EECC66', '#994455', '#997700',
'#EE99AA', '#000000')
if colorset == 'light':
cset = namedtuple('Lcset',
'light_blue orange light_yellow pink light_cyan mint pear olive pale_grey black')
return cset('#77AADD', '#EE8866', '#EEDD88', '#FFAABB', '#99DDFF',
'#44BB99', '#BBCC33', '#AAAA00', '#DDDDDD', '#000000')
| 12,927 | 40.04127 | 101 |
py
|
coderec_programming_states
|
coderec_programming_states-main/user_study_webapp/app_study/server.py
|
from bottle import route, run, static_file
import bottle
import threading
import json
import pickle
import argparse
import re
from time import sleep
import os
import random
parser = argparse.ArgumentParser()
app = bottle.Bottle()
index = -1
session_id = 0
logs = ""
my_module = os.path.abspath(__file__)
parent_dir = os.path.dirname(my_module)
static_dir = os.path.join(parent_dir, 'static')
json_path = ""
video_path = ""
@app.get("/")
def home():
with open('index.html', encoding='utf-8') as fl:
html = fl.read()
return html
@app.get('/static/<filename>')
def server_static(filename):
return static_file(filename, root=static_dir)
@app.post('/update_json')
def update_json():
# receive json from request
data = bottle.request.json
# save data to json file
with open(json_path, 'w') as outfile:
json.dump(data, outfile)
return
@app.post('/initialize_html')
def initialize_html():
# receive json from request
# load json file
with open(json_path, 'r') as fl:
data = json.load(fl)
logs = data['logs']
labeled_so_far = 0
for i in range(len(logs)):
if logs[i]['label'] != 'not_labeled':
labeled_so_far += 1
data_json = {'json_path': json_path, 'video_path': video_path, 'labeled_so_far': labeled_so_far}
return data_json
class Demo(object):
def __init__(self):
run_event = threading.Event()
run_event.set()
self.close_thread = True
threading.Thread(target=self.demo_backend).start()
app.run(host='localhost', port=8080)
try:
while 1:
pass
except KeyboardInterrupt:
print("Closing server...")
self.close_thread = False
def demo_backend(self):
global query, response
while self.close_thread:
sleep(0.01)
pass
#if query:
# response = self.model.ask(query)
# query = []
parser.add_argument('-p', '--path', help='Path to logs json', required=False) # change to True
parser.add_argument('-v', '--video', help='Path to video', required=False) # change to True
def main():
args = parser.parse_args()
global session_id, json_path, video_path
json_path = args.path
video_path = args.video
#session_id = int(args.session)
demo = Demo()
if __name__ == "__main__":
main()
# python server.py -p static/test.json -v static/july3-4pm-linearreg_accepts_trim.mp4
| 2,489 | 26.666667 | 100 |
py
|
coderec_programming_states
|
coderec_programming_states-main/action_prediction/generate_features.py
|
from requests import session
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
import copy
import json
from tqdm import tqdm
import pickle
import logging
from tree_sitter import Language, Parser
logging.basicConfig(level=logging.INFO)
import argparse
import math
from get_code_label import get_prompt_label, parse_code
import torch
from transformers import AutoTokenizer, AutoModel
from datasets import Dataset, Features
from transformers import AutoModelForSequenceClassification
import os
from transformers import AutoTokenizer, AutoModelForMaskedLM
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', help='Path to extended logs frame', required=True) # change to True
parser.add_argument('-c', '--cudadevice', help='cuda device id', default=0, required=True, type=int)
parser.add_argument('-b', '--batchsize', help='batch size', default=1000, required=True, type=int)
parser.add_argument('-o', '--output', help='Output path of .pkl file', required=True) # change to True
parser.add_argument('-e', '--embedding', help='Whether to get embeddings for suggestion and prompt', required=True, type=int)
parser.add_argument('-m', '--maxusers', help='max users', default=100, required=True, type=int)
parser.add_argument('-a', '--onlyacceptreject', help='only get features for accept reject events (1 if yes 0 ow)', default=0, required=False, type=int)
def get_embedding_list(list_of_strs, batch_size=16):
def tokenize_function_embedding(examples):
prompt_token = tokenizer(examples['text'], return_tensors="pt", padding="max_length", truncation=True )['input_ids']
encoded_tokens = model(prompt_token.to(device)).pooler_output.detach().cpu().numpy()
dict = {'encoded_tokens': encoded_tokens}
return dict# overall_tokens
#a = df_observations[0][0].CurrentPrompt.to_numpy()
dataset = Dataset.from_dict({"text": list_of_strs })
ds_train_tokenized = dataset.map(tokenize_function_embedding, batched= True, batch_size=batch_size)
embeddings = [ds_train_tokenized[i]['encoded_tokens'] for i in range(len(ds_train_tokenized))]
return embeddings
def text_features(list_of_strs):
list_of_features = []
for str in list_of_strs:
numb_of_words = len(str.split())
# if includes #
includes_hash = '#' in str
# includes 'print'
includes_print = 'print' in str
# includes '='
includes_equal = '=' in str or '<=' in str or '>=' in str or '==' in str or '!=' in str
# includes 'for'
includes_for = 'for' in str
# includes 'while'
includes_while = 'while' in str
# includes 'if'
includes_if = 'if' in str
# includes 'else'
includes_else = 'else' in str
# includes 'def'
includes_def = 'def' in str
# includes 'class'
includes_class = 'class' in str
# includes 'import'
includes_import = 'import' in str
# includes 'from'
includes_from = 'from' in str
# includes 'return'
includes_return = 'return' in str
# includes 'try'
includes_try = 'try' in str
# includes 'except'
includes_except = 'except' in str
# includes 'raise'
includes_raise = 'raise' in str
# includes 'pass'
includes_pass = 'pass' in str
# includes 'continue'
includes_continue = 'continue' in str
# includes 'break'
includes_break = 'break' in str
# includes 'assert'
includes_assert = 'assert' in str
# includes '''
includes_quotes = '\'''' in str
# concatenate all
features = [numb_of_words, includes_quotes, includes_hash, includes_print, includes_equal, includes_for, includes_while, includes_if, includes_else, includes_def, includes_class, includes_import, includes_from, includes_return, includes_try, includes_except, includes_raise, includes_pass, includes_continue, includes_break, includes_assert]
list_of_features.append(features)
return list_of_features
def get_features(input_path, cudadevice, batchsize, include_embedding, output_path, maxusers, onlyAcceptReject):
# load pickle file
df_observations = pickle.load(open(input_path, 'rb'))
global device, tokenizer, model
device = torch.device('cuda:'+str(cudadevice) if torch.cuda.is_available() else 'cpu')
if include_embedding:
tokenizer = AutoTokenizer.from_pretrained("huggingface/CodeBERTa-small-v1")
model = AutoModel.from_pretrained("huggingface/CodeBERTa-small-v1").to(device)
include_editpercentage = True
include_timeinstate = True
include_codelabels = True
include_codeembeddings = include_embedding
include_measurements = True
include_userID = True
include_textfeatures = True
max_users = min(maxusers, len(df_observations))
df_observations_features = []
label_to_enum = {'codeinit': 0, 'function def': 1, 'test_assert': 2, 'import': 3,
'control flow': 4, 'print': 5, 'error handling': 6, 'assignment': 7, 'comment': 8,
'binary_operator': 9, 'comparison': 10, 'expression': 11, 'docstring':12, 'other': 13}
user_counter = 0
feature_dict = {'Measurements: compCharLen, confidence, documentLength, numLines, numTokens, promptCharLen, promptEndPos, quantile': 0,
'edit percentage': 1, 'time_in_state': 2, 'session_features':3, 'suggestion_label':4, 'prompt_label':5,
'suggestion_embedding':6, 'prompt_embedding':7, 'suggestion_text_features':8, 'prompt_text_features':9, 'statename':10}
for session in tqdm(df_observations):
df_features = []
logging.info(f'user {user_counter/len(df_observations)*100:.3f} \n \n' )
if user_counter >= max_users:
break
user_counter += 1
if len(session) == 0:
continue
session_features = []
prev_row = [0] * 8
# get prompt embedding
indices_to_keep = []
for i in range(len(session)):
row = session.iloc[i]
indices_to_keep.append(i)
suggs_text = session.CurrentSuggestion.to_numpy()[indices_to_keep]
prompts_text = session.CurrentPrompt.to_numpy()[indices_to_keep]
# for each prompt only keep last 3 lines
# split based on \n
prompts_text = [prompt.split('\n') for prompt in prompts_text]
prompts_text = [prompt[-3:] for prompt in prompts_text]
# join back together
prompts_text = ['\n'.join(prompt) for prompt in prompts_text]
if include_codeembeddings:
sugg_embedding = get_embedding_list(suggs_text)
prompt_embedding = get_embedding_list(prompts_text)
sugg_text_features = text_features(suggs_text)
prompt_text_features = text_features(prompts_text)
for i, index in enumerate(indices_to_keep):
observation = []
row = session.iloc[index]
row_og = session.iloc[index]
last_shown = copy.deepcopy(index)
found_shown = False
while not found_shown and last_shown >0:
last_shown -= 1
if session.iloc[last_shown]['StateName'] == 'Shown' or session.iloc[last_shown]['StateName'] == 'Replay':
found_shown = True
if not found_shown:
last_shown = max(0, index-1)
if row_og['StateName'] != 'Accepted' and row_og['StateName'] != 'Rejected':
continue
row = session.iloc[last_shown]
try:
# for Accepts and Rejects
measurement_features = [row['Measurements']['compCharLen'],
row['Measurements']['confidence'],
row['Measurements']['documentLength'],
row['Measurements']['numLines'],
row['Measurements']['numTokens'],
row['Measurements']['promptCharLen'],
row['Measurements']['promptEndPos'],
row['Measurements']['quantile'],
row['Measurements']['meanAlternativeLogProb'],
row['Measurements']['meanLogProb']]
prev_row = measurement_features
except:
# for shown or browsing
try:
measurement_features = [row['Measurements']['compCharLen'],
prev_row[1],
row['Measurements']['documentLength'],
row['Measurements']['numLines'],
row['Measurements']['numTokens'],
row['Measurements']['promptCharLen'],
row['Measurements']['promptEndPos'],
prev_row[7],
row['Measurements']['meanAlternativeLogProb'],
row['Measurements']['meanLogProb']]
except:
measurement_features = prev_row
current_suggestion = row['CurrentSuggestion']
# get embedding, get code feature
# CurrentPrompt
current_prompt = row['CurrentPrompt']
# get last 5 lines of the prompt
prompt_lines = current_prompt.split('\n')
prompt_lines_last5 = prompt_lines[-1:]
prompt_lines_last5_str = '\n'.join(prompt_lines_last5)
lenght_sug = len(current_suggestion)
lenght_prompt = len(current_prompt)
lenght_sug_words = len(current_suggestion.split(' '))
lenght_prompt_words = len(current_prompt.split(' '))
new_measurements = [lenght_sug, lenght_prompt, lenght_sug_words, lenght_prompt_words, index]
#measurement_features.extend(new_measurements)
new_measurements.extend(measurement_features)
edit_distance = row['EditPercentage']
# CurrentSuggestion
current_suggestion = row['CurrentSuggestion']
# get embedding, get code feature
# CurrentPrompt
current_prompt = row['CurrentPrompt']
# get last 5 lines of the prompt
prompt_lines = current_prompt.split('\n')
prompt_lines_last5 = prompt_lines[-1:]
prompt_lines_last5_str = '\n'.join(prompt_lines_last5)
time_spent_in_state = row['TimeSpentInState']
if include_measurements:
observation.append(new_measurements)
if include_editpercentage:
observation.append(edit_distance)
if include_timeinstate:
observation.append([time_spent_in_state])
observation.append([index, index/len(session), len(session)])
if include_codelabels:
sugg_label = get_prompt_label(current_suggestion)
sugg_label_enc = np.zeros(14)
sugg_label_enc[label_to_enum[sugg_label]] = 1
prompt_label = get_prompt_label(prompt_lines[-1]) # label last line
prompt_label_enc = np.zeros(14)
prompt_label_enc[label_to_enum[prompt_label]] = 1
observation.append(sugg_label_enc)
observation.append(prompt_label_enc)
if include_codeembeddings:
observation.append(sugg_embedding[i])
observation.append(prompt_embedding[i])
else:
observation.append(np.zeros(1))
observation.append(np.zeros(1))
if include_textfeatures:
observation.append(np.array(sugg_text_features[i]))
observation.append(np.array(prompt_text_features[i]))
# add label
observation.append(row_og['StateName'])
# make observation into numeric np array
observation = np.array(observation)#, dtype=np.float32)
session_features.append(observation)
df_observations_features.append(np.array(session_features))
pickle.dump([df_observations_features, feature_dict, ], open(output_path, 'wb'))
pickle.dump([df_observations_features, feature_dict, ], open(output_path, 'wb'))
def main():
args = parser.parse_args()
logging.info(args)
if args.embedding not in [0,1]:
raise ValueError('embedding argument must be 0 or 1')
get_features(args.path, args.cudadevice, args.batchsize, args.embedding, args.output, args.maxusers, args.onlyacceptreject)
if __name__ == '__main__':
main()
# call this script with
# python3 get_features.py --path ../data/observations.csv --cudadevice 0 --batchsize 32 --embedding True --output ../data/features.pkl --maxusers 100
| 13,075 | 41.732026 | 349 |
py
|
coderec_programming_states
|
coderec_programming_states-main/action_prediction/get_code_label.py
|
from tree_sitter import Language, Parser
# This is from Victor's code modified by Hussein
base_path = '/home/hussein/code_git'
Language.build_library(
# Store the library in the `build` directory
base_path + '/treesitterbuild/my-languages.so',
# Include one or more languages
[
base_path + '/tree-sitter-python'
]
)
PY_LANGUAGE = Language(base_path +'/treesitterbuild/my-languages.so', 'python')
parser = Parser()
parser.set_language(PY_LANGUAGE)
def traverse_tree(root, code_byte):
stack = [root]
tokens, token_types = [], []
while stack:
node = stack.pop(0)
if node:
if node.type != "module":
token = code_byte[node.start_byte:node.end_byte]
tokens.append(token)
token_types.append(node.type)
# print(node.type, "****> ", token)
if node.children:
for child in node.children:
stack.append(child)
return tokens, token_types
def parse_code(code_string):
code_byte = tree = (bytes(code_string, "utf8"))
tree = parser.parse(code_byte)
cursor = tree.walk()
return traverse_tree(cursor.node, code_byte)
python_label_dict={
"function def": ["function_definition", "def","class_definition ", "class"],
"import": ["import_from_statement", "import_statement", "import"],
"control flow": ["if_statement","elif_clause","for_statement","else_clause", "with_statement","return_statement", "with", "return","if","else","elif","while","for"],
"error handling": ["try_statement", "except_clause","raise_statement", "try", "except","raise"],
"test_assert": ["assert_statement"],
"binary_operator": ["binary_operator"],
"assignment": ["assignment"],
"comment": ["comment"],
"comparison": ["comparison_operator"],
"expression": ["expression_statement"],
# "syntax error": ["ERROR"]
}
rev_label_dict = {}
for key in python_label_dict.keys():
for v in python_label_dict[key]:
rev_label_dict[v] = key
# print(rev_label_dict)
def crude_label(prompt_token):
# ## Create dictionary of tokens indicative of a type of prompt.
label_dict = {"codeinit": ["!/usr/", "#!/usr/"],
"function def": ["def ","de f" ,"class "],
"test_assert": ["assert "],
"import": ["import ","from "],
"control flow": ["if ","while ","for ","while ","else ", "elif ","return ", "with "],
"print":["print(", "print"],
"error handling": ["try ", "catch ", "except ", "raise "],
"assignment":["="],
"comment": ["# "],
}
rev_dict = {}
for key in label_dict.keys():
for v in label_dict[key]:
rev_dict[v] = key
# print(rev_dict)
if (prompt_token.strip()[0:3] == '"""'):
return "docstring"
# assign a label if the propmt-token contains any of the tokens in our library.
for label in label_dict.keys():
for token in label_dict[label]:
if token.lower() in prompt_token.lower():
return label
else:
return "other"
def get_prompt_label(prompt_token):
prompt_token = prompt_token.replace("de f","def")
prompt_token = prompt_token.replace("impor t","import")
prompt_token = prompt_token.replace("ran ge","range")
if ("!/usr/" in prompt_token or "#!/usr/" in prompt_token):
return "codeinit"
if (prompt_token.strip()[0:3] == '"""' or prompt_token.strip()[0:3] == "'''" or '"""' in prompt_token or "'''" in prompt_token):
return "docstring"
# parse prompt
tokens, token_types = parse_code(prompt_token)
# assign a label if the propmt-token contains any of the tokens in our library.
for label in python_label_dict.keys():
for token_type in python_label_dict[label]:
if token_type in token_types:
if token_type == "expression_statement":
if "print" in str(tokens[token_types.index(token_type)]):
return "print"
return label
else:
return crude_label(prompt_token)
# return "other"
| 4,255 | 34.764706 | 169 |
py
|
coderec_programming_states
|
coderec_programming_states-main/action_prediction/action_prediction_xgb.py
|
from requests import session
import matplotlib.pyplot as plt
import pandas as pd
import json
import copy
import numpy as np, scipy.stats as st
import json
from tqdm import tqdm
import pickle
import logging
import argparse
import math
from xgboost import XGBClassifier
# logistic regression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from itertools import chain
import xgboost as xg
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
logging.basicConfig(level=logging.INFO)
from sklearn import preprocessing
# two local imports
from action_prediction_prep import get_features_labels
from action_prediction_prep import process_data
import matplotlib
# plot calibration of Copilot confidences with XGBoost predictions
from re import S
from scipy.stats.stats import pearsonr
from sklearn.calibration import calibration_curve
from sklearn.metrics import roc_auc_score
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', help='Path to features array', required=True) # change to True
parser.add_argument('-c', '--usegpu', help='to use gpu (0 or 1)', default=0, required=True, type=int)
parser.add_argument('-s', '--splitbyusers', help='split by users or session (1 or 0)', default=0, required=True, type=int)
parser.add_argument('-o', '--output', help='output path folder', required=True)
parser.add_argument('-t', '--testpercentage', help='test percentage', default = 0.2, type =float)
parser.add_argument('-v', '--valpercentage', help='val percentage', default =0.1, type=float)
def main():
args = parser.parse_args()
path = args.path
use_gpu = args.usegpu
splitbyusers = args.splitbyusers
output_path = args.output
test_percentage = args.testpercentage
val_percentage = args.valpercentage
REMOVE_S_AND_R = True # remove shown and replay
features_to_keep = np.array([0,4,5,6,7,8,9])
label_index = np.array([10])
feature_dict = {'Measurements: compCharLen, confidence, documentLength, numLines, numTokens, promptCharLen, promptEndPos, quantile': 0,
'edit percentage': 1, 'time_in_state': 2, 'session_features':3, 'suggestion_label':4, 'prompt_label':5,
'suggestion_embedding':6, 'prompt_embedding':7, 'suggestion_text_features':8, 'prompt_text_features':9, 'statename':10}
df_observations_features, df_observations_labels = get_features_labels(path, features_to_keep, label_index, REMOVE_S_AND_R)
# split into train and test
SEQUENCE_MODE = False # keep session as a sequence or split it into events
SPLIT_BY_USER = bool(splitbyusers) # otherwise split by session uniformly
ADD_PREVIOUS_STATES = True
PREDICT_ACTION = True # Otherwise predict time in state
NORMALIZE_DATA = False # normalize data
test_percentage = args.testpercentage
val_percentage = args.valpercentage
previous_states_to_keep = 3
if not PREDICT_ACTION and SPLIT_BY_USER:
raise ValueError('Cannot predict time and split by user')
X_train, X_test, X_val, y_train, y_test, y_val = process_data(df_observations_features, df_observations_labels,
REMOVE_S_AND_R, SEQUENCE_MODE, SPLIT_BY_USER, ADD_PREVIOUS_STATES, PREDICT_ACTION, NORMALIZE_DATA,
test_percentage, val_percentage, previous_states_to_keep)
# train model
if PREDICT_ACTION:
if use_gpu:
model = XGBClassifier(tree_method='gpu_hist')
else:
model = XGBClassifier()
model.fit(X_train, y_train)
# predict
y_pred = model.predict(X_test)
# evaluate
print("Accuracy:", accuracy_score(y_test, y_pred))
accuracy = accuracy_score(y_test, y_pred)
confusion_matrix_act = confusion_matrix(y_test, y_pred)
classification_report_act = classification_report(y_test, y_pred)
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred))
print("Classification Report:")
print(classification_report(y_test, y_pred))
y_pred_proba = model.predict_proba(X_test)
y_pred_proba = y_pred_proba[:,1]
print("AUC:", roc_auc_score(y_test, y_pred_proba))
auc = roc_auc_score(y_test, y_pred_proba)
pickle.dump([accuracy, confusion_matrix_act, classification_report_act, auc], open(output_path + '/action_prediction_results.pkl', 'wb'))
model.save_model(output_path+ "/model_trained.json")
# plot calibration curve
if PREDICT_ACTION:
y_pred_proba = model.predict_proba(X_test)[:,1]
fpr, tpr = calibration_curve(y_test, y_pred_proba, n_bins=10)
# Plot perfectly calibrated
plt.plot([0, 1], [0, 1], linestyle = '--', label = 'Ideally Calibrated')
# Plot model's calibration curve
plt.plot(tpr, fpr, marker = '.', label = 'XGBoost')
pickle.dump([tpr, fpr], open(output_path + '/xgb_calibration_curve.pkl', 'wb'))
leg = plt.legend(loc = 'upper left')
plt.xlabel('Average Predicted Probability in each bin')
plt.ylabel('Ratio of positives')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig(output_path + '/calibration_curve.pdf',dpi=1000)
plt.clf()
if PREDICT_ACTION:
# print a curve where x axis is ration and y axis is accuracy
coverages = []
accuracies = []
aucs = []
for treshold in np.arange(0.01, 1, 0.01):
treshold_high = treshold
y_pred_proba = model.predict_proba(X_test)[:,1]
y_pred_proba = np.array([max(y_pred_proba[i], 1- y_pred_proba[i]) for i in range(len(y_pred_proba))])
y_pred = model.predict(X_test)
y_pred_high_confidence = y_pred[y_pred_proba > treshold_high]
y_pred_proba_high_confidence = y_pred_proba[y_pred_proba > treshold_high]
y_test_high_confidence = y_test[y_pred_proba > treshold_high]
coverages.append(len(y_pred_high_confidence)/len(y_pred))
accuracies.append(accuracy_score(y_test_high_confidence, y_pred_high_confidence))
# pickle data
pickle.dump([coverages, accuracies], open(output_path + '/xgb_coverage_accuracy.pkl', 'wb'))
plt.plot(coverages, accuracies)
plt.xlabel('Coverage (based on tresholding model confidence)')
plt.ylabel('Accuracy')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# more detailed y axis
plt.savefig(output_path + '/acc_vs_coverage.pdf',dpi=1000)
plt.clf()
# learning curve
if PREDICT_ACTION:
# empty 2 d array
training_izes = []
max_trials = 1
training_data_percentage = np.array([0.005,0.01,0.05,0.1,0.25,0.5,0.75,0.99])
accuracies = [[] for _ in range(max_trials)]
aucs = [[] for _ in range(max_trials)]
for trial in range(max_trials):
training_sizes = []
for split_percentage in training_data_percentage:
# split train data using sklearn
_, X_train_frac, _, y_train_frac = train_test_split(X_train, y_train, test_size=split_percentage)
# train model
if use_gpu:
model = XGBClassifier(tree_method='gpu_hist')
else:
model = XGBClassifier()
model.fit(X_train_frac, y_train_frac)
# predict
y_pred = model.predict(X_test)
# evaluate
accuracies[trial].append(accuracy_score(y_test, y_pred))
aucs[trial].append(roc_auc_score(y_test, model.predict_proba(X_test)[:,1]))
training_sizes.append(len(X_train_frac))
# plot with error bars and means
accuracies = np.array(accuracies)
aucs = np.array(aucs)
pickle.dump([training_data_percentage, accuracies], open(output_path + '/xgb_learning_curve.pkl', 'wb'))
plt.errorbar(training_data_percentage, [np.mean(accuracies[:,i]) for i in range(len(accuracies[0]))], yerr=[np.std(accuracies[:,i]) for i in range(len(accuracies[0]))])
plt.xlabel('Training Data Size')
plt.ylabel('Accuracy')
plt.plot(0, np.mean(y_test), 'o', label='base rate')
plt.legend()
plt.savefig(output_path + '/learning_curve.pdf',dpi=1000)
plt.clf()
main()
| 8,716 | 41.940887 | 176 |
py
|
triple-descent-paper
|
triple-descent-paper-master/nn-numeric/main.py
|
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import scipy
from collections import defaultdict
from utils import rot
from utils import get_data, hinge_regression, hinge_classification
from model import FullyConnected
import argparse
def train_and_test(model, tr_data, te_data, crit, task, opt, epochs, checkpoints):
tr_losses = []
te_losses = []
te_accs = []
for epoch in range(epochs):
epoch_loss = 0
for (x,y) in tr_data:
x, y = x.to(device), y.to(device)
opt.zero_grad()
out = model(x)
loss = crit(out, y)
loss.backward()
epoch_loss += loss.item()/len(tr_data)
opt.step()
if epoch in checkpoints:
tr_losses.append(epoch_loss)
te_loss, te_acc = test(model, te_data, crit, task)
te_losses.append(te_loss)
te_accs.append(te_acc)
return tr_losses, te_losses, te_accs
def test(model, te_data, crit, task):
with torch.no_grad():
for (x,y) in te_data:
x, y = x.to(device), y.to(device)
out = model(x)
test_loss = crit(out, y).item()
if task=='classification':
preds = out.max(1)[1]
test_acc = preds.eq(y).sum().float()/len(y)
test_acc = test_acc.item()
else:
test_acc = 0
break
return test_loss, test_acc
def test_ensemble(models, te_data, crit, task):
with torch.no_grad():
for (x,y) in te_data:
x, y = x.to(device), y.to(device)
outs = torch.stack([model(x) for model in models])
out = outs.mean(dim=0)
test_loss = crit(out, y).item()
if task=='classification':
preds = out.max(1)[1]
test_acc = preds.eq(y).sum().float()/len(y)
test_acc = test_acc.item()
else:
test_acc = 0
break
return test_loss, test_acc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default=None, type=str)
parser.add_argument('--num_seeds', default=1, type=int)
parser.add_argument('--no_cuda', default=False, type=bool)
parser.add_argument('--task', default='classification', type=str)
parser.add_argument('--dataset', default='random', type=str)
parser.add_argument('--loss_type', default='default', type=str)
parser.add_argument('--depth', default=1, type=int)
parser.add_argument('--teacher_width', default=100, type=int)
parser.add_argument('--teacher_depth', default=2, type=int)
parser.add_argument('--width', default=20, type=int)
parser.add_argument('--activation', default='relu', type=str)
parser.add_argument('--epochs', default=1000, type=int)
parser.add_argument('--d', default=2, type=int)
parser.add_argument('--n', default=100, type=int)
parser.add_argument('--n_test', default=1000, type=int)
parser.add_argument('--noise', default=0.1, type=float)
parser.add_argument('--test_noise', default=True, type=bool)
parser.add_argument('--n_classes', default=None, type=int)
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--mom', default=0.9, type=float)
parser.add_argument('--wd', default=0., type=float)
parser.add_argument('--bs', default=1000000, type=int)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if args.no_cuda: device='cpu'
if not args.n_classes: args.n_classes = 1 if args.task=='regression' else 2
if not args.loss_type: args.loss_type = 'mse' if args.task=='regression' else 'nll'
if args.task=='classification':
if args.loss_type == 'linear_hinge':
crit = lambda x,y : hinge_classification(x,y, type='linear')
elif args.loss_type == 'quadratic_hinge':
crit = lambda x,y : hinge_classification(x,y, type='quadratic')
elif args.loss_type == 'nll':
crit = nn.CrossEntropyLoss()
else:
raise NotImplementedError
elif args.task=='regression':
if args.loss_type == 'linear_hinge':
crit = lambda x,y : hinge_regression(x,y, epsilon=args.epsilon, type='linear')
elif args.loss_type == 'quadratic_hinge':
crit = lambda x,y : hinge_regression(x,y, epsilon=args.epsilon, type='quadratic')
elif args.loss_type == 'mse':
crit = nn.MSELoss()
else:
raise NotImplementedError
else:
raise
torch.manual_seed(0)
with torch.no_grad():
teacher = FullyConnected(width=args.teacher_width, n_layers=args.teacher_depth, in_dim=args.d, out_dim=args.n_classes, activation=args.activation).to(device)
bs = min(args.bs, args.n)
n_batches = int(args.n/bs)
tr_data = get_data(args.dataset, args.task, n_batches, bs, args.d, args.noise, n_classes=args.n_classes, teacher=teacher)
test_noise = args.noise if args.test_noise else 0
te_data = get_data(args.dataset, args.task, 1, args.n_test, args.d, test_noise, n_classes=args.n_classes, teacher=teacher)
tr_losses = []
te_losses = []
te_accs = []
students = []
checkpoints = np.unique(np.logspace(0,np.log10(args.epochs),20).astype(int))
for seed in range(args.num_seeds):
torch.manual_seed(seed)
student = FullyConnected(width=args.width, n_layers=args.depth, in_dim=args.d, out_dim=args.n_classes, activation=args.activation).to(device)
opt = torch.optim.SGD(student.parameters(), lr=args.lr, momentum=args.mom, weight_decay=args.wd)
tr_loss_hist, te_loss_hist, te_acc_hist = train_and_test(student, tr_data, te_data, crit, args.task, opt, args.epochs, checkpoints)
tr_losses.append(tr_loss_hist)
te_losses.append(te_loss_hist)
te_accs.append(te_acc_hist)
students.append(student)
tr_losses, te_losses, te_accs = np.array(tr_losses), np.array(te_losses), np.array(te_accs)
tr_loss, te_loss, te_acc = np.mean(tr_losses, axis=0), np.mean(te_losses, axis=0), np.mean(te_accs, axis=0)
te_loss_ens, te_acc_ens = test_ensemble(students, te_data, crit, args.task)
dic = {'args':args, 'checkpoints':checkpoints,
'tr_loss':tr_loss, 'te_loss':te_loss, 'te_acc':te_acc,
'te_loss_ens':te_loss_ens, 'te_acc_ens':te_acc_ens}
print(dic)
torch.save(dic, args.name+'.pyT')
| 6,525 | 40.303797 | 165 |
py
|
triple-descent-paper
|
triple-descent-paper-master/nn-numeric/utils.py
|
# some useful functions
import os
import shutil
import math
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
import numpy as np
def hinge_regression(output, target, epsilon=.1, type='quadratic'):
power = 1 if type=='linear' else 2
delta = (output-target).abs()-epsilon
loss = torch.nn.functional.relu(delta)*delta.pow(power)
return loss.mean()
def hinge_classification(output,target,epsilon=.5, type='quadratic'):
power = 1 if type=='linear' else 2
output_size=output.size(1)
if output_size==1:
target = 2*target.double()-1
print(target,output)
return 0.5*(epsilon-output*target).mean()
delta = torch.zeros(output.size(0))
for i,(out,tar) in enumerate(zip(output,target)):
tar = int(tar)
delta[i] = epsilon + torch.cat((out[:tar],out[tar+1:])).max() - out[tar]
loss = 0.5 * torch.nn.functional.relu(delta).pow(power).mean()
return loss
def normalize(x):
mean = x.mean(dim=0, keepdim=True)
std = x.std(dim=0, keepdim=True)
std[std==0]=1
return (x-mean)/std
def get_data(dataset, task, n_batches, bs, d, noise, var=.5, n_classes=None, teacher=None, train=True):
n = n_batches*bs
if dataset=='random':
data = torch.randn(n, d)
else:
assert d**0.5 == int(d**0.5)
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(int(d**0.5)),
torchvision.transforms.ToTensor()])
dataset = getattr(torchvision.datasets, dataset.upper())('~/data', train=train, download=True, transform=transforms)
data = normalize(torch.cat([dataset[mu][0] for mu in range(n)]).view(n,d))
data = data*d**0.5/data.norm(dim=-1,keepdim=True)
with torch.no_grad():
dataset = []
# Gaussian Mixture
# if task=='classification':
# for i in range(n_batches):
# vectors = torch.randn(n_classes, d)
# if n_classes==2:
# vectors[0] = torch.ones(d)
# vectors[1] = -torch.ones(d)
# labels = torch.randint(n_classes,(bs,))
# x = torch.ones(bs,d)
# y = torch.ones(bs)
# for j, label in enumerate(labels):
# x[j] = vectors[label] + var * torch.randn(d)
# y[j] = label if np.random.random()>noise else np.random.randint(n_classes)
# dataset.append((x,y.long()))
if task=='classification':
for i in range(n_batches):
x = data[i*bs:(i+1)*bs]
y = teacher(x).max(1)[1].squeeze()
for j in range(len(y)):
if np.random.random()<noise:
y[j]= np.random.randint(n_classes)
dataset.append((x,y.long()))
elif task=='regression':
for i in range(n_batches):
x = data[i*bs:(i+1)*bs]
y = teacher(x)+noise*torch.randn((bs,1))
dataset.append((x,y))
return dataset
def rot(x, th):
with torch.no_grad():
rotation = torch.eye(len(x))
rotation[:2,:2] = torch.Tensor([[np.cos(th),np.sin(th)],[-np.sin(th), np.cos(th)]])
return rotation @ x
def who_am_i():
import subprocess
whoami = subprocess.run(['whoami'], stdout=subprocess.PIPE)
whoami = whoami.stdout.decode('utf-8')
whoami = whoami.strip('\n')
return whoami
def copy_py(dst_folder):
# and copy all .py's into dst_folder
if not os.path.exists(dst_folder):
print("Folder doesn't exist!")
return
for f in os.listdir():
if f.endswith('.py'):
shutil.copy2(f, dst_folder)
class FastMNIST(torchvision.datasets.MNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.data = self.data.unsqueeze(1).float().div(255)
self.data = self.data.sub_(self.data.mean()).div_(self.data.std())
self.data, self.targets = self.data.to(self.device), self.targets.to(self.device)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
return img, target, index
class FastFashionMNIST(torchvision.datasets.FashionMNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.data = self.data.unsqueeze(1).float().div(255)
self.data = self.data.sub_(self.data.mean()).div_(self.data.std())
self.data, self.targets = self.data.to(self.device), self.targets.to(self.device)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
return img, target, index
class FastCIFAR10(torchvision.datasets.CIFAR10):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.data = torch.from_numpy(self.data).float().div(255)
self.data = self.data.sub_(self.data.mean()).div_(self.data.std())
self.data, self.targets = self.data.to(self.device), torch.LongTensor(self.targets).to(self.device)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
return img, target, index
def get_pca(tr_data, te_data, input_size, normalized = True):
device = tr_data.device
tr_data.data = tr_data.data.view(tr_data.data.size(0),-1)
te_data.data = te_data.data.view(te_data.data.size(0),-1)
x = tr_data.data.cpu()
# DATA IS ALREADY NORMALIZED
# m = x.mean(0).expand_as(x)
# u,s,v = torch.svd(torch.t(x-m))
u,s,v = torch.svd(torch.t(x))
if normalized:
tr_data.data = (tr_data.data) @ u[:, :input_size].to(device) / s[:input_size].to(device) ** 0.5
te_data.data = (te_data.data) @ u[:, :input_size].to(device) / s[:input_size].to(device) ** 0.5
else:
tr_data.data = (tr_data.data) @ u[:, :input_size].to(device).to(device) ** 0.5
te_data.data = (te_data.data) @ u[:, :input_size].to(device).to(device) ** 0.5
return tr_data, te_data
| 6,540 | 38.167665 | 124 |
py
|
triple-descent-paper
|
triple-descent-paper-master/nn-numeric/model.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class MulConstant(nn.Module):
def __init__(self, constant=10):
super(MulConstant, self).__init__()
self.constant = constant
def forward(self, x):
return x * self.constant
def backward(self, g): # is this necessary?
return g * self.constant, None
class FullyConnected(nn.Module):
def __init__(self, sigma_0=1.4142135381698608, in_dim=28*28, width=512, n_layers=1, out_dim=10, bias=True, activation='relu', ntk_scaling=False):
super(FullyConnected, self).__init__()
self.in_dim = in_dim
self.width = width
self.n_layers = n_layers
self.bias = bias
self.out_dim = out_dim
if activation=='linear':
self.activation = nn.Identity()
if activation=='abs':
self.activation = nn.PReLU(init=-1)
self.activation.weight.requires_grad=False
if activation=='relu':
self.activation = nn.ReLU()
elif activation=='tanh':
self.activation = nn.Tanh()
self.layers = []
self.layers.append(nn.Linear(self.in_dim, self.width, bias=self.bias))
if ntk_scaling:
self.layers.append(MulConstant( 1 / (self.in_dim ** 0.5)))
self.layers.append(self.activation)
for i in range(self.n_layers-1):
self.layers.append(nn.Linear(self.width, self.width, bias=self.bias))
if ntk_scaling:
self.layers.append(MulConstant( 1 / (self.width ** 0.5)))
self.layers.append(self.activation)
self.layers.append(nn.Linear(self.width, self.out_dim, bias=self.bias),)
if ntk_scaling:
self.layers.append(MulConstant( 1 / (self.width ** 0.5)))
self.net = nn.Sequential(*self.layers)
# NTK initialization
if ntk_scaling:
with torch.no_grad():
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=sigma_0)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.net(x)
return x
| 2,314 | 34.615385 | 149 |
py
|
triple-descent-paper
|
triple-descent-paper-master/nn-numeric/run.py
|
import os
import time
import subprocess
import itertools
import collections
import argparse
import torch
import numpy as np
from utils import copy_py, who_am_i
def create_script(params):
script = '''#!/bin/bash
#SBATCH --gres=gpu:1
#SBATCH --mem=10GB
#SBATCH --nodes=1
#SBATCH --output={name}.out
#SBATCH --job-name={name}
#SBATCH --cpus-per-task=1
ulimit -n 64000
python main.py --name {name} --epochs {epochs} --noise {noise} --n {n} --width {width} --num_seeds {num_seeds} --lr {lr} --d {d} --test_noise {test_noise} --loss_type {loss_type} --n_classes 1 --task regression --no_cuda False --depth {depth} --wd {wd} --activation {activation} --dataset {dataset}
'''.format(**params)
with open('{}.sbatch'.format(params['name']), 'w') as f:
f.write(script)
# with open('{}.params'.format(params['name']), 'wb') as f:
# torch.save(params, f)
def send_script(file):
process = subprocess.Popen(['sbatch', file], stdout=subprocess.PIPE)
if __name__ == '__main__':
exp_dir = 'r.{}'.format(int(time.time()))
os.mkdir(exp_dir)
copy_py(exp_dir)
os.chdir(exp_dir)
widths = np.unique(np.logspace(0, 2.5, 20).astype(int))
ns = np.logspace(1,5,20).astype(int)
grid = collections.OrderedDict({
'width' : widths,
'n' : ns,
'depth': [1,2]
'wd' : [0., 0.05],
'activation' : ['tanh'],
'dataset' : ['random'],
'noise' : [0,0.5,5],
'lr' : [0.01],
'd' : [14*14],
'num_seeds' : [10],
'test_noise' : [False],
'loss_type' : ['mse'],
'epochs' : [1000],
})
def dict_product(d):
keys = d.keys()
for element in itertools.product(*d.values()):
yield dict(zip(keys, element))
for i, params in enumerate(dict_product(grid)):
torch.save(grid, 'params.pkl')
params['name'] = '{:06d}'.format(i)
create_script(params)
file_name = '{}.sbatch'.format(params['name'])
send_script(file_name)
| 2,016 | 27.814286 | 298 |
py
|
TSE22
|
TSE22-main/src/Script-Distro-Metrics-Projects.py
|
import requests
import csv
import datetime
#ADD YOUR CREDENTIALS TO CONTRIBUTE
credentials = [
{'user': 'TestSmell', 'token': 'YOUR CREDENTIALS'},
{'user': 'Flaky', 'token': 'YOUR CREDENTIALS'}
]
username = credentials[0]['user']
token = credentials[0]['token']
countSession = 0 #session indice on variable credentials
countRequest = 0 #init request count per repository
countCommits = 0 #init commit count per repository
def changeToken():
global credentials, countSession, countRequest, username, token
if(countSession >= (len(credentials)-1)):
countSession = 0
else:
countSession = countSession + 1
username= credentials[countSession]['user']
token= credentials[countSession]['token']
countRequest = 0
# create a re-usable session object with the user creds in-built
gh_session = requests.Session()
gh_session.auth = (username, token)
dates = []
ncomments = []
count = 0
# add flaky and/or non-flaky sheet path
with open('IssuesNonFlaky.csv') as f:
for line in f:
count = count+1
url = line
url = url.replace('https://github.com', 'https://api.github.com/repos')
url = url.replace(url[-1], "")
print(url)
results = requests.get(url, headers={'Authorization': 'token '+token})
countRequest = countRequest+1
result = results.json()
if(results.status_code == 200):
if result['created_at'] is None or result['closed_at'] is None:
dates.append('N/D')
ncomments.append('N/D')
else:
create_at = result['created_at']
crt_year = result['created_at'][0] + result['created_at'][1] + result['created_at'][2] + result['created_at'][3]
crt_mount = result['created_at'][5] + result['created_at'][6]
crt_day = result['created_at'][8] + result['created_at'][9]
d1 = datetime.date(int(crt_year),int(crt_mount),int(crt_day))
closed_at = result['closed_at']
cls_year = result['closed_at'][0] + result['closed_at'][1] + result['closed_at'][2] + result['closed_at'][3]
cls_mount = result['closed_at'][5] + result['closed_at'][6]
cls_day = result['closed_at'][8] + result['closed_at'][9]
d2 = datetime.date(int(cls_year),int(cls_mount),int(cls_day))
time_elapsed = d1 - d2
dates.append(time_elapsed.days)
ncomments.append(result['comments'])
#print(dates,ncomments)
if (countRequest == 100 or countRequest == 300 or countRequest == 500 or countRequest == 800 or countRequest == 930):
print('PRE-PRINT')
print(dates,ncomments)
#checks if request is close 1000
if (countRequest > 950):
changeToken()
else:
dates.append("N/A")
ncomments.append("N/A")
f.close()
print(dates,ncomments)
| 3,034 | 36.469136 | 129 |
py
|
TSE22
|
TSE22-main/src/Script-IssuesNonFlaky.py
|
import requests
import json
import csv
#ADD YOUR CREDENTIALS TO CONTRIBUTE
credentials = [
{'user': 'Flaky', 'token': 'YOUR CREDENTIALS'}
]
username = credentials[0]['user']
token = credentials[0]['token']
countSession = 0 #session indice on variable credentials
countRequest = 0 #init request count per repository
countCommits = 0 #init commit count per repository
def changeToken():
global credentials, countSession, countRequest, username, token
if(countSession >= (len(credentials)-1)):
countSession = 0
else:
countSession = countSession + 1
username= credentials[countSession]['user']
token= credentials[countSession]['token']
countRequest = 0
# create a re-usable session object with the user creds in-built
gh_session = requests.Session()
gh_session.auth = (username, token)
languages=["C","Go","Python","Java","JavaScript"]
search_str="test"
label_str="bug"
state_str="closed"
# Sorts the results of your query by the number of comments, reactions, reactions-+1, reactions--1, reactions-smile, reactions-thinking_face, reactions-heart, reactions-tada, or interactions. You can also sort results by how recently the items were created or updated, Default: best match
sort_str="created"
# Determines whether the first search result returned is the highest number of matches (desc) or lowest number of matches (asc). This parameter is ignored unless you provide sort. Default: desc
order_str="asc"
pagesize_str="100"
# print(result)
numpages=10
for language_str in languages:
print("------- {language} -------".format(language=language_str))
with open('IssuesNonFlaky.csv', 'a', newline='') as file:
writer = csv.writer(file)
fieldnames = ["URL"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
#writer.writeheader()
writer.writerow({"URL": language_str})
for i in range(1, numpages+1):
print(" Â page {} of {}".format(i, numpages))
#checks if request is close 1000
if (countRequest > 950):
changeToken()
# request a new page
repos_url='https://api.github.com/search/issues?q={query}+label:{label}+language:{language}+state:{state}+is:issue&order={order}&page={numpages}&per_page={pagesize}'.format(query=search_str,label=label_str,language=language_str,state=state_str,order=order_str,numpages=str(i),pagesize=pagesize_str)
countRequest = countRequest+1
print(repos_url)
# parse text in a json tree (object graph)
result = json.loads(gh_session.get(repos_url).text)
for item in result['items']:
print(item['html_url'])
with open('IssuesNonFlaky.csv', 'a', newline='') as file:
writer = csv.writer(file)
fieldnames = ["URL"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
#writer.writeheader()
writer.writerow({"URL": item["html_url"]})
| 3,176 | 41.932432 | 306 |
py
|
TSE22
|
TSE22-main/src/Script-flakiness.py
|
import requests
import json
import csv
# credentials (please, don't share)
username = ' '
token = ' '
# create a re-usable session object with the user creds in-built
gh_session = requests.Session()
gh_session.auth = (username, token)
languages=["go", "python", "java", "js"]
search_str="flaky AND test"
label_str="bug"
state_str="closed"
# Sorts the results of your query by the number of comments, reactions, reactions-+1, reactions--1, reactions-smile, reactions-thinking_face, reactions-heart, reactions-tada, or interactions. You can also sort results by how recently the items were created or updated, Default: best match
sort_str="created"
# Determines whether the first search result returned is the highest number of matches (desc) or lowest number of matches (asc). This parameter is ignored unless you provide sort. Default: desc
order_str="asc"
pagesize_str="100"
# print(result)
numpages=3
for language_str in languages:
print("------- {language} -------".format(language=language_str))
with open('analise.csv', 'a', newline='') as file:
writer = csv.writer(file)
fieldnames = ["URL"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
#writer.writeheader()
writer.writerow({"URL": language_str})
for i in range(1, numpages+1):
print(" Â page {} of {}".format(i, numpages))
# request a new page
repos_url='https://api.github.com/search/issues?q={query}+label:{label}+language:{language}+state:{state}&sort={sort}&order={order}&page={numpages}&per_page={pagesize}'.format(query=search_str,label=label_str,language=language_str,state=state_str,sort=sort_str,order=order_str,numpages=str(i),pagesize=pagesize_str)
# parse text in a json tree (object graph)
result = json.loads(gh_session.get(repos_url).text)
for item in result['items']:
print(item['html_url'])
with open('analise.csv', 'a', newline='') as file:
writer = csv.writer(file)
fieldnames = ["URL"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
#writer.writeheader()
writer.writerow({"URL": item["html_url"]})
| 2,417 | 46.411765 | 323 |
py
|
networkm
|
networkm-master/setup.py
|
from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools,re,sys
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
if len(sys.argv)>1 and sys.argv[1]=='version':
print(setup_cfg['version'])
exit()
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
'mit': ('MIT License', 'OSI Approved :: MIT License'),
'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'),
'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'),
'bsd3': ('BSD License', 'OSI Approved :: BSD License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8'.split()
lic = licenses.get(cfg['license'].lower(), (cfg['license'], None))
min_python = cfg['min_python']
requirements = ['pip', 'packaging']
if cfg.get('requirements'): requirements += cfg.get('requirements','').split()
if cfg.get('pip_requirements'): requirements += cfg.get('pip_requirements','').split()
dev_requirements = (cfg.get('dev_requirements') or '').split()
long_description = open('README.md').read()
# 
for ext in ['png', 'svg']:
long_description = re.sub(r'!\['+ext+'\]\((.*)\)', '+'/'+cfg['branch']+'/\\1)', long_description)
long_description = re.sub(r'src=\"(.*)\.'+ext+'\"', 'src=\"https://raw.githubusercontent.com/{}/{}'.format(cfg['user'],cfg['lib_name'])+'/'+cfg['branch']+'/\\1.'+ext+'\"', long_description)
setuptools.setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'Natural Language :: ' + cfg['language'].title(),
] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []),
url = cfg['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
extras_require={ 'dev': dev_requirements },
python_requires = '>=' + cfg['min_python'],
long_description = long_description,
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': cfg.get('console_scripts','').split() },
**setup_cfg)
| 3,094 | 46.615385 | 193 |
py
|
networkm
|
networkm-master/networkm/model_functions.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_model_functions.ipynb (unless otherwise specified).
__all__ = ['XOR', 'boolxor', 'smoothxor', 'MPX', 'MUX', 'boolmux', 'smoothmux', 'NOT', 'boolnot', 'smoothnot', 'COPY',
'boolcopy', 'smoothcopy', 'AND', 'booland', 'smoothand', 'OR', 'boolor', 'smoothor', 'NOR', 'NAND', 'XNOR',
'boonlor', 'smoothnor', 'boonlnand', 'smoothnand', 'NXOR', 'boonlxnor', 'smoothxnor', 'BOOL', 'sigmoid',
'nextafter', 'test', 'bool_model_iter', 'bool_initial_conditions', 'setup_bool_integral', 'bool_integral',
'bool_integral_risefall', 'plot_graph', 'bool_model']
# Cell
import warnings
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
import networkx as nx
import numpy as np
import sidis
rng=sidis.RNG(0)
import matplotlib.pyplot as plt
import typing
from typing import Optional, Tuple, Dict, Callable, Union, Mapping, Sequence, Iterable, Hashable, List, Any
from collections import namedtuple
import numba
from numba import njit
from .graph_functions import *
# Cell
@njit
def XOR(x : List[Union[int,float]]) -> Union[int,float]:
'''
Arbitrary input XOR using recursiveness.
'''
x0=x[0]
for x1 in x[1:]:
and1=x0*(1-x1)
and2=x1*(1-x0)
x0=and1+and2-and1*and2
return x0
# Cell
XOR.mask=0
boolxor=XOR
smoothxor=XOR
# Cell
@njit
def MPX(x : List[Union[int,float]]) -> Union[int,float]:
'''
Simply returns `x`.
'''
return x[0]
# Cell
MPX.mask=1
MUX=MPX
boolmux=MPX
smoothmux=MPX
# Cell
@njit
def NOT(x : List[Union[int,float]]) -> Union[int,float]:
'''
Return conjugate of `x`.
'''
return 1-x[0]
# Cell
NOT.mask=2
boolnot=NOT
smoothnot=NOT
# Cell
@njit
def COPY(x : List[Union[int,float]]) -> Union[int,float]:
'''
Simply returns `x`.
'''
return x[0]
# Cell
COPY.mask=3
boolcopy=COPY
smoothcopy=COPY
# Cell
@njit
def AND(x : List[Union[int,float]]) -> Union[int,float]:
'''
Return logical AND of `x` and `y`.
'''
x0=x[0]
for x1 in x[1:]:
x0=x0*x1
return x0
# Cell
AND.mask=4
booland=AND
smoothand=AND
# Cell
@njit
def OR(x : List[Union[int,float]]) -> Union[int,float]:
'''
Return logical AND of `x` and `y`.
'''
x0=x[0]
for x1 in x[1:]:
x0=x0+x1-x1*x0
return x0
# Cell
OR.mask=5
boolor=OR
smoothor=OR
# Cell
@njit
def NOR(x : List[Union[int,float]]) -> Union[int,float]:
'''
Return logical OR of `x` and `y`. See DeMorgan's Laws.
'''
x0=x[0]
for x1 in x[1:]:
x0=x0+x1-x1*x0
return 1-x0
NOR.mask=6
boonlor=NOR
smoothnor=NOR
@njit
def NAND(x : List[Union[int,float]]) -> Union[int,float]:
'''
Return logical OR of `x` and `y`. See DeMorgan's Laws.
'''
x0=x[0]
for x1 in x[1:]:
x0=x0*x1
return 1-x0
NAND.mask=7
boonlnand=NAND
smoothnand=NAND
@njit
def XNOR(x : List[Union[int,float]]) -> Union[int,float]:
'''
Arbitrary input XOR using recursiveness.
'''
x0=x[0]
for x1 in x[1:]:
and1=x0*(1-x1)
and2=x1*(1-x0)
x0=and1+and2-and1*and2
return 1-x0
XNOR.mask=8
NXOR=XNOR
boonlxnor=XNOR
smoothxnor=XNOR
# Cell
@njit
def BOOL(x : np.ndarray,
mask : int = 0):
'''
'x': 2-D array containing list of list of args
for the boolean function defined by `mask`.
'''
y=np.zeros(x.shape[0],dtype=x.dtype)
if mask==0:
for i in range(x.shape[0]):
y[i]=XOR(x[i])
elif mask==1:
for i in range(x.shape[0]):
y[i]=MPX(x[i])
elif mask==2:
for i in range(x.shape[0]):
y[i]=NOT(x[i])
elif mask==3:
for i in range(x.shape[0]):
y[i]=COPY(x[i])
elif mask==4:
for i in range(x.shape[0]):
y[i]=AND(x[i])
elif mask==5:
for i in range(x.shape[0]):
y[i]=OR(x[i])
elif mask==6:
for i in range(x.shape[0]):
y[i]=NOR(x[i])
elif mask==7:
for i in range(x.shape[0]):
y[i]=NAND(x[i])
elif mask==8:
for i in range(x.shape[0]):
y[i]=XNOR(x[i])
return y
# Cell
BOOL.masks={XOR:0,MPX:1,NOT:2,COPY:3,AND:4,OR:5,NOT:6,NAND:7,XNOR:8}
# Cell
nextafter=np.nextafter(0., 1.)
@njit
def sigmoid(x : Union[int,float,np.ndarray],
a : Union[int,float,np.ndarray] = np.inf):
'''
Sigmoid function; returns [1 + tanh( a*(x-0.5) )] / 2.
When `a`=np.inf, mimics np.rint function.
When `a`=0, returns constant 0.5.
When `a`=1, returns `x`.
When `a`>1, smoothly approaches np.rint.
Faster than the np.rint and round functions,
due to the order of operations on temporary variable `A`.
Adds small value to fix edge case x=0.5
which otherwise returns np.nan when a=np.inf.
'''
A=(1+np.tanh(a*(x-0.5+nextafter)))/2
return A
# Cell
test=sigmoid(np.array([0.1*i for i in range(11)]))
# Cell
def bool_model_iter(g,return_funcs=False):
'''
Construct a list of iterables used in the calculation
of a delay-differential-equation (dde) for a network `g`.
Returns a list-of-lists containing
[node_start,node_stop,in-degree,function-mask]
for each group of nodes sharing the same function.
'''
#make sure graph has been sorted
try:
sorting=g.sorting
except:
sort_graph(g)
sorting=g.sorting
node_funcs={n:g.nodes[n]['f'] for n in g.nodes}
#group functions as {func:[list of nodes with func]}
funcs=nx.utils.groups(sidis.cast(node_funcs,dict))
funcs={k:list(v) for k,v in funcs.items()}
#obtain index array of [first_node,last_node,in_degree,function_mask]
itr=np.array([[n[0], #first node having function group
n[-1]+1, #last node +1 for indexing
g.in_degree(n[0]), #in-degree
f.mask] #mask
for f,n in funcs.items()]).astype(np.int64)
if not return_funcs:
return itr
else:
return itr,funcs
# Cell
def bool_initial_conditions(g,
init : Optional[List] = None,
hold : Optional[List] = None,
steady : bool = True
):
'''
Parse the initial conditions of the network, including the node states
and holding times (times for which the nodes are forced to the initial state).
If there are multiplexer nodes, the initial conditions are applied only to these
nodes, and the rest are allowed to freely integrate without any hold time.
If there are not multiplexer nodes, and steady is True,
the initial condition is transformed according to the boolean function of each node.
For example, a one-node network that executes the NOT of itself would have the
initial condition [0] be transformed into [1].
This mimics the real behavior of the network in hardware with multiplexers.
'''
mpx_nodes = where(g,f=MPX)
if mpx_nodes:
original_nodes = [i for i in g.nodes if i not in mpx_nodes]
if init is None: #default to 1 logic high node
init = [1]+[0 for i in mpx_nodes[1:]]
#set original nodes to 0
init=np.array([0 for i in original_nodes]+list(init))
if hold is None: #default to hold time of 1 for mpxs
hold = [1 for i in mpx_nodes]
#don't hold non-mpxs
hold = np.array([0 for i in original_nodes]+list(hold))
else: #not mpx model; need to transform challenge
if init is None: #default to 1 node logic high
init = [1]+[0 for i in range(len(g)-1)]
if steady:
#transform init by evaluating boolean func
init=np.array([g.nodes[n]['f'](np.array([init[j] \
for j in list(g.predecessors(n))])) for n in g.nodes])
#default to hold time of 1
if hold is None:
hold = np.array([1 for i in g.nodes])
else:
hold = np.array(hold)
#init=np.array([0 for i in range(len(g)-len(init))]+list(init))
#hold=np.array([0 for i in range(len(g)-len(hold))]+list(hold))
return np.array(init),np.array(hold)
# Cell
def setup_bool_integral(g : nx.MultiDiGraph,
init : Optional[List[float]] = None,
hold : Optional[List[int]] = None,
T : int = 15,
dt : float = 0.01,
noise : float = 0,
steady : bool = True
):
'''
Setup the Boolean integral with the list of initial conditions `init`
and times to hold the nodes to those values `hold`. If `steady`, will
transform the initial conditions to their steady-state valus using the
dynamical functions `f`. `T`, `dt` determine the time and timestep
of the integal. `noise` is the amplitude of noise on each node
randomly drawn from [0,noise].
'''
iterator = bool_model_iter(g)
T=int(T/dt)
ndata=node_data(g,'a','tau')
edata=edge_data(g,'delay')
time_delays=np.array(edata['delay']).astype(np.int64)
sigmoid_constants=np.array(ndata['a']).astype(np.longdouble)
time_constants=np.array(ndata['tau']).astype(np.longdouble)
predecessors=np.concatenate([list(g.predecessors(n)) for n in g.nodes]).astype(np.int64)
noise=rng.random(-noise,noise,shape=(T,len(g))).astype(np.longdouble)
initial_conditions,hold_times=bool_initial_conditions(g,
init=init,
hold=hold,
steady=steady)
hold_times=(hold_times/dt).astype(np.int64)
time_delays=(time_delays/dt).astype(np.int64)
return iterator,time_delays,sigmoid_constants,time_constants,predecessors,noise,\
initial_conditions,hold_times
# Cell
@sidis.timer
@njit
def bool_integral(iterator : np.ndarray,
time_delays : np.ndarray,
sigmoid_constants : np.ndarray,
time_constants : np.ndarray,
predecessors : np.ndarray,
noise : np.ndarray,
initial_conditions : np.ndarray,
hold_times : np.ndarray,
dt : float
):
'''
Jit accelerated integral for boolean networks.
See `bool_integral_setup` and `bool_model_iter`.
Loops over time and the iterator and returns the
node state `x` of shape (T,N) where N is number
of nodes. Sets the `initial_conditions` explicitly
using `hold_times` for each node.
The `time_delays` and `predecessors` are arrays
ordered by edge index in the ordered graph.
Slices are taken and the arrays are reshaped.
Then, the squeezed input states of each neighbor
are applied to the logical function of each node
using the mask lookup table defined in the Boolean
functions themselves. The Bool func output is then
squeezed again, and the derivative calculated
with the Euler method.
'''
x=np.zeros(noise.shape).astype(np.longdouble)
dx=np.zeros(x.shape[-1]).astype(np.longdouble)
for t in range(x.shape[0]-1):
edge_index=0
#force node state to initial condition
if t<max(hold_times):
for n in range(x.shape[-1]):
if hold_times[n]>=t:
x[t,n]=initial_conditions[n]
#loop over iterator, calculate derivative using euler
for i in range(len(iterator)):
n1,n2,deg,mask=iterator[i]
d=-time_delays[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
d+=t
p=predecessors[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
a=sigmoid_constants[n1:n2].reshape((n2-n1,1))
edge_index+=(n2-n1)*deg
y=np.zeros((n2-n1,deg)).astype(np.longdouble)
for k in range(n2-n1):
for j in range(deg):
de=d[k,j]
pr=p[k,j]
y[k,j]=x[de,pr]
y=sigmoid(x=y,a=a)
dx[n1:n2]=BOOL(y,mask)
dx=sigmoid(dx,sigmoid_constants)
dxdt=(-x[t]+dx+noise[t])/time_constants
x[t+1]=x[t]+dt*dxdt
return x
# Cell
@sidis.timer
@njit
def bool_integral_risefall(iterator : np.ndarray,
time_delays : np.ndarray,
sigmoid_constants : np.ndarray,
time_constants : np.ndarray,
predecessors : np.ndarray,
noise : np.ndarray,
initial_conditions : np.ndarray,
hold_times : np.ndarray,
dt : float
):
'''
Almost identical to `bool_integral`, with the exception
that `time_constants` is an (N,2) vector, where N is the
number of nodes, and the [:,0] entry is the rise-time,
and the [:,1] entry is the fall-time. Their difference
tau[:,1]-tau[:,0] is modulated by the sigmoid function,
and used in the calculation of the new denominator of the
derivative, which is of the form
tau_rise+(tau_fall-tau_rise)*sigmoid(x)
'''
x=np.zeros(noise.shape).astype(np.longdouble)
dx=np.zeros(x.shape[-1]).astype(np.longdouble)
for t in range(x.shape[0]-1):
edge_index=0
#force node state to initial condition
if t<max(hold_times):
for n in range(x.shape[-1]):
if hold_times[n]>=t:
x[t,n]=initial_conditions[n]
#loop over iterator, calculate derivative using euler
for i in range(len(iterator)):
n1,n2,deg,mask=iterator[i]
d=-time_delays[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
d+=t
p=predecessors[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
a=sigmoid_constants[n1:n2].reshape((n2-n1,1))
edge_index+=(n2-n1)*deg
y=np.zeros((n2-n1,deg)).astype(np.longdouble)
for k in range(n2-n1):
for j in range(deg):
de=d[k,j]
pr=p[k,j]
y[k,j]=x[de,pr]
y=sigmoid(x=y,a=a)
dx[n1:n2]=BOOL(y,mask)
dx=sigmoid(dx,sigmoid_constants)
tau=time_constants[:,0]+(time_constants[:,1]-time_constants[:,0])*sigmoid(x[t],
sigmoid_constants)
dxdt=(-x[t]+dx+noise[t])/tau
x[t+1]=x[t]+dt*dxdt
return x
# Cell
def plot_graph(g,x,dt):
'''
Make separate plots of the node states `x` for each in-degree grouping.
'''
node_funcs={n:g.nodes[n]['f'] for n in g.nodes}
funcs=nx.utils.groups(sidis.cast(node_funcs,dict))
funcs={k:list(v) for k,v in funcs.items()}
for f,nodes in funcs.items():
for i in nodes:
plt.plot(np.arange(x.shape[0])*dt,x[:,i])
title=f'{f.__name__} Nodes: {nodes[0]} to {nodes[-1]}'
plt.title(title)
plt.xlabel(r'\tau')
plt.ylabel('Amplitude')
plt.show()
# Cell
def bool_model( g,
T : int = 15,
dt : float = 0.01,
noise :float = 0,
init : List[float] = None,
hold : List[int] = None,
a : Union[float,Tuple[callable,float,float]] = (rng.normal,20,0),
tau : Union[np.ndarray,Tuple[callable,np.ndarray,np.ndarray]] = (rng.normal,0.5,0),
f : callable = XOR,
delay : Union[float,Tuple[callable,float,float]] = (rng.random,0,0),
edge_replacements = dict(
lengths = 1,
delay = (rng.normal,0,0),
node_attrs = dict(
a = (rng.normal,20,0),
tau = (rng.normal,0.2,0),
f = MPX
),
label = lambda g,node,iterable : len(g)+iterable
),
plot : bool = True,
steady : bool = True):
'''
Model the dynamics of the graph `g` by giving the node attributes
f : logical function
a : sigmoid function
tau : time constant
and edge attributes
delay : time-delay
and parses each of these arguments if given as a tuple of a
randomization function and its args; see `parse_kwargs`.
Converts any edges with the given `edge_replacements` (see
`convert_edges` function for arguments); useful for `MPX`.
Sorts the graph in place using `sort_graph` in order to produce
an iterable with `bool_model_iter`. See also `setup_bool_integral`.
Initializes the dynamics to `init` using the hold times `hold`;
see `bool_initial_conditions`. Integrates with `bool_integral`
and `bool_integral_risefall` if `tau` is given as an array of
[rise_time,fall_time]. Returns the node state array `x`
and optionally plots the resulting dynamics with `plot_graph`.
'''
#give node/edge attrs
give_nodes(g,f=f,a=a,tau=tau)
give_edges(g,delay=delay)
#convert edges to new nodes
if edge_replacements:
convert_edges(g,edges=edge_replacements.get('edges'),
lengths=edge_replacements.get('lengths'),
node_data=edge_replacements.get('node_attrs'),
delay=edge_replacements.get('delay'))
#save new data
nodedata=node_data(g,'f','a','tau')
edgedata=edge_data(g,'delay')
#sort graph
sort_graph(g,relabel=True)
#setup integral
itr,delay,a,tau,pred,noise,init,hold=setup_bool_integral(g,
init=init,
hold=hold,
T=T,dt=dt,noise=noise,steady=steady)
if len(tau.shape)==1:
x=bool_integral(iterator=itr,
time_delays=delay,
sigmoid_constants=a,
time_constants=tau,
predecessors=pred,
noise=noise,
initial_conditions=init,
hold_times=hold,dt=dt)
else:
x=bool_integral_risefall(iterator=itr,
time_delays=delay,
sigmoid_constants=a,
time_constants=tau,
predecessors=pred,
noise=noise,
initial_conditions=init,
hold_times=hold,dt=dt)
#plot
if plot:
plot_graph(g=g,x=x,dt=dt)
return x
| 18,860 | 29.718241 | 118 |
py
|
networkm
|
networkm-master/networkm/puf_functions.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 03_hbn_puf.ipynb (unless otherwise specified).
__all__ = ['bool_diff', 'plot_lya', 'plot_mu', 'puf_statistics', 'bool_lyapunov', 'lya_fit', 'booleanize', 'analyze',
'BooleanNetworkEnsemble']
# Cell
import warnings
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
import networkx as nx
import numpy as np
import sidis
rng=sidis.RNG(0)
import matplotlib.pyplot as plt
import typing
from typing import Optional, Tuple, Dict, Callable, Union, Mapping, Sequence, Iterable, Hashable, List, Any
from collections import namedtuple
import einops
import numba
from numba import njit
from scipy import stats
import scipy.optimize as sciopt
import matplotlib as mpl
from .graph_functions import *
from .model_functions import *
from .network_class import *
# Cell
def bool_diff(comparisons):
'''
Compare timeseries to the 0th entry
'''
res=[]
base=comparisons[0]
for i,x in enumerate(comparisons):
res+=[x-base]
# Cell
def plot_lya(data,lstart=0,lend=15,g=0,show=True,err=True,lambda_xy=(10,-4.5),XY=(5,3),
fname=None#r'paper/LyapunovExponent.pdf'
):
mpl.rcParams['figure.figsize'] = [XY[0],XY[1]]
T=5
log_dist=data['log_dist'][...,0]
err_dist=data['log_dist'][...,1]
lya=data['lyapunov'][...,0]
dlya=data['lyapunov'][...,1]
for i in range(log_dist.shape[0]):
avgln=log_dist[i].mean(axis=0)
sigmaln=err_dist[i].mean(axis=0)
xdata=np.arange(lstart,lend+1)
ydata=np.asarray(avgln[lstart:lend+1])
sdata=np.asarray(sigmaln[lstart:lend+1])
m=lya[i].mean(axis=0)
m_sigma=dlya[i].mean(axis=0)
b=data['intercept'][i].mean(axis=0)
if err:
plt.errorbar(np.arange(len(avgln)), np.asarray(avgln), xerr=0, yerr=np.asarray(sigmaln),
fmt="k-", elinewidth=0.2, label='Model Data')
if not err:
plt.plot(np.arange(len(avgln)), np.asarray(avgln), color="black", label='Model Data')
plt.plot(xdata,m*xdata+b,"m--",linewidth="2", label=('Linear Fit from t=0 to '+str(lend)))
#plt.legend([r'y = '+str(np.around(m,decimals=3))+'t + '+str(np.around(b,decimals=3)),r'$<ln(d(t))>$'])
plt.xlabel(r'Time ($\tau$)')
plt.ylabel(r'$\langle\logD(t)\rangle$')
#plt.title('Fit of Lyapunov Exponent '+ r'$\lambda$ = '+str(np.around(m,decimals=3))+\
# r'$\pm$'+str(np.around(m_sigma,decimals=3)))
txt=r'$\langle\lambda_{max}\rangle$ = '+f'{np.mean(lya):.2f} $\pm$ {np.mean(dlya):.2f}'+r'$\tau$'
plt.annotate(xy=lambda_xy, text=txt, color='purple')
if fname:
plt.savefig(fname=fname,bbox_inches='tight',dpi=600)
plt.show()
# Cell
def plot_mu(data,X=7,Y=3,offset=-0.375,ncol=2,nrow=1,alpha=0.2,err_scale=0,
inter_xy=(1.5, 0.3),intra_xy=(14, 0.1),
fname=None#r'paper/MuPlot.pdf'
):
muintercolor='blue'
muintracolor='red'
subfontsize=12
import matplotlib as mpl
mpl.rcParams['axes.titley'] = offset
mpl.rcParams['axes.titley'] = offset
#Unpack
muinters=data['mu_inter'][...,0]
muintras=data['mu_intra'][...,0]
dinters=data['mu_inter'][...,1]*err_scale
dintras=data['mu_intra'][...,1]*err_scale
deltamus=data['mu_inter'][...,0]-data['mu_intra'][...,0]
ddeltamus=np.sqrt(np.square(data['mu_inter'][...,1])+np.square(data['mu_inter'][...,1]))*err_scale
maxmu=np.argmax(np.max(deltamus,axis=1))
minmu=np.argmin(np.max(deltamus,axis=1))
time_axis=np.arange(muinters.shape[-1])
fig, (ax1, ax2) = plt.subplots(nrow, ncol, figsize=[X, Y], sharey=True)
#ensemble
for i in range(len(muinters)):
ax1.errorbar(time_axis, muinters[i], dinters[i], color=muintercolor, alpha=alpha)
ax1.errorbar(time_axis, muintras[i], dintras[i], color=muintracolor, alpha=alpha)
#ax1.errorbar(time_axis, muinters[i], 0, color=muintercolor, alpha=alpha)
#ax1.errorbar(time_axis, muintras[i], 0, color=muintracolor, alpha=alpha)
#average
ax1.errorbar(time_axis,np.mean(muinters,axis=0),np.mean(dinters,axis=0),color=muintercolor,linewidth=2)
ax1.errorbar(time_axis,np.mean(muintras,axis=0),np.mean(dintras,axis=0),color=muintracolor,linewidth=2)
#text
#inter_x=np.argwhere(np.mean(muinters,axis=0))
ax1.annotate(xy=inter_xy, text=r'$\mu_{inter}$', color=muintercolor)
ax1.annotate(xy=intra_xy, text=r'$\mu_{intra}$', color=muintracolor)
ax1.set_xlabel(r'Time ($\tau$)')
ax1.set_ylabel(r'$\mu$')
#sub figure label
ax1.set_title('(a)', fontsize=subfontsize, loc='center')
#ensemble
for i in range(len(muinters)):
ax2.errorbar(time_axis, deltamus[i], ddeltamus[i], color='purple', alpha=alpha)
topt=np.ravel(np.argmax(deltamus[i]))
#average
ax2.errorbar(time_axis, np.mean(deltamus,axis=0), np.mean(ddeltamus,axis=0), color='purple', linewidth=2)
#text
ax2.annotate(xy=(time_axis[np.argmax(np.mean(deltamus,axis=0))],
np.max(deltamus+ddeltamus)*1.1), text=r'$t_{opt}$',ha='center')
ax2.vlines(ymax=np.max(np.mean(deltamus,axis=0)), ymin=0, x=time_axis[np.argmax(np.mean(deltamus,axis=0))],
linestyles='dashed', color='black')
ax2.set_xlabel(r'Time ($\tau$)')
ax2.set_yticks([0,0.1,0.2,0.3,0.4,0.5])
ax2.set_ylabel(r'$\Delta\mu$')
#subplot letter label
ax2.set_title('(b)', fontsize=subfontsize, loc='center')
if fname:
plt.savefig(fname=fname,bbox_inches='tight',dpi=600)
plt.show()
# Cell
def puf_statistics(responses : np.ndarray,
shape : str = 'challenge repeat time instance node'
) -> Tuple[Union[np.ndarray,float,int]]:
'''
Given an array of `responses` of a given `shape`, calculate the inter and intra
PUF statistics mu_inter and mu_intra and their standard deviations over time.
Return these as arrays over time, as well as their maximum separation mu_opt
and the time at which this occurs t_opt.
`shape` must be a permutation of the words 'challenge repeat time instance node'
and describes the input shape of the array.
'''
resp=einops.rearrange(responses,shape+
'-> challenge repeat time instance node').astype(float)
n_distinct=resp.shape[0]
n_repeat=resp.shape[1]
measure_time=resp.shape[2]
n_synth=resp.shape[3]
n=resp.shape[4]
#Number of pairwise combinations for inter and intra calculations.
l_intra=n_repeat*(n_repeat-1)/2
l_inter=n_synth*(n_synth-1)/2
#Pairwise differences of timeseries from responses used for comparison.
pdt_intra=np.zeros((n_distinct,n_synth,int(l_intra),measure_time))
pdt_inter=np.zeros((n_distinct,n_repeat,int(l_inter),measure_time))
#Loop over each pairwise combination and form fractional hamming distances at each time.
for i in range(n_distinct):
for g in range(n_synth):
l=0
for j in range(n_repeat):
for k in range(j+1,n_repeat):
pdt_intra[i,g,l]=np.sum(abs(resp[i,j,:,g,:]-resp[i,k,:,g,:]),axis=-1)/n
l+=1
for g in range(n_repeat):
l=0
for j in range(n_synth):
for k in range(j+1,n_synth):
pdt_inter[i,g,l]=np.sum(abs(resp[i,g,:,j,:]-resp[i,g,:,k,:]),axis=-1)/n
l+=1
#Compute means on a per-device and overall level.
#Intra block below. Copies along axes for quick vector calculations.
mu_intra_per_device=np.mean(pdt_intra,axis=(0,2))
mu_intra_per_device_copy=np.repeat(\
np.repeat(mu_intra_per_device[:,np.newaxis,:],\
l_intra,axis=1)[np.newaxis,:],n_distinct,axis=0)
sigma_intra_per_device= np.sqrt(np.mean((np.square(pdt_intra-mu_intra_per_device_copy)),axis=(0,2)))
mu_intra=np.mean(mu_intra_per_device,axis=0)
sigma_intra=np.mean(sigma_intra_per_device,axis=0)
#Inter block below. Copies along axes for quick vector calculations.
mu_inter_per_device=np.mean(pdt_inter,axis=(0,2))
mu_inter_per_device_copy=np.repeat(np.repeat(\
mu_inter_per_device[:,np.newaxis,:],l_inter,axis=1)[np.newaxis,:],\
n_distinct,axis=0)
sigma_inter_per_device= np.sqrt(np.mean((np.square(pdt_inter-mu_inter_per_device_copy)),axis=(0,2)))
mu_inter=np.mean(mu_inter_per_device,axis=0)
sigma_inter=np.mean(sigma_inter_per_device,axis=0)
#Find optimum measurement time and save time series.
t_opt=np.argmin(mu_intra-mu_inter)
mu_opt=mu_inter[t_opt]-mu_intra[t_opt]
return mu_inter,mu_intra,sigma_inter,sigma_intra,mu_opt,t_opt
# Cell
def bool_lyapunov(responses : np.ndarray,
window : int = 5,
shape : str = 'challenge repeat time node'
) -> Tuple[np.ndarray]:
"""
Boolean timeseries Lyapunov exponent calculator.
Takes as input the time series of PUF responses
and calculates the maximum Lyapunov exponent to determine if the system is chaotic.
Args:
responses: array of PUF responses for a single instance
window: Temporal window length for comparison of Boolean distance between time series.
shape: input shape of PUF responses for a single instance
"""
T=window
resp=einops.rearrange(responses,shape+
'-> challenge repeat time node').astype(float)
nrp= resp.shape[1] #number of reps
nch= resp.shape[0] #number of challenges
measure_time=resp.shape[2]
n=resp.shape[-1]
#We loop over pairwise combinations of timeseries comparisons and compute the
#average Boolean distance within the window length.
clist=[]
for c in range(nch):
ilist=[]
for i in range(1,nrp):
tlist=[]
for t in range(measure_time-T):
d=np.sum(abs(resp[c,i,t:t+T]-resp[c,0,t:t+T]))/(n*T)
if d!=0:
first_t=t
break
else:
first_t=0
for t in range(first_t,measure_time-T):
d=np.sum(abs(resp[c,i,t:t+T]-resp[c,0,t:t+T]))/(n*T)
tlist+=[d]
if tlist!=[]:
ilist+=[tlist]
if ilist!=[]:
clist+=[ilist]
avgln=[0 for t in range(measure_time-T)]
tcounterlist=[0 for t in range(measure_time-T)]
longesttime=-1
for c in range(nch):
for i in range(len(clist[c])):
for t in range(len(clist[c][i])):
tcounterlist[t]+=1
avgln[t]+=np.log(clist[c][i][t]) if clist[c][i][t]!=0 else np.log(0.01)
longesttime=t if t>longesttime else longesttime
avgln=avgln[:longesttime+1]
tcounterlist=tcounterlist[:longesttime+1]
for t in range(len(avgln)):
avgln[t]=avgln[t]/tcounterlist[t]
sigmaln=[0 for t in range(len(avgln))]
for c in range(nch):
for i in range(len(clist[c])):
for t in range(len(clist[c][i])):
xi=np.log(clist[c][i][t]) if clist[c][i][t]!=0 else np.log(0.01)
sigmaln[t]+=(xi-avgln[t])**2
for t in range(len(avgln)):
sigmaln[t]=np.sqrt(sigmaln[t]/(tcounterlist[t]-1))
return avgln,sigmaln
# Cell
def lya_fit(avgln,sigmaln,lstart=0,lend=5,intercept=False):
"""
Fits average logarithm of boolean distances calculated in lya() function.
Calculates resulting maximum lyapunov exponent.
Args:
lstart: Start of linear fit.
lend: End of linear fit.
"""
linearstart=lstart
linearend=lend
xdata=np.arange(linearstart,linearend+1)
ydata=np.asarray(avgln[linearstart:linearend+1])
sdata=np.asarray(sigmaln[linearstart:linearend+1])
def lin(x,m,b):
return m*x+b
popt, pcov = sciopt.curve_fit(lin, xdata, ydata, sigma=sdata, absolute_sigma=True)
m=popt[0]
b=popt[1]
p_sigma = np.sqrt(np.diag(pcov))
m_sigma = p_sigma[0]
residuals = ydata- lin(xdata, m,b)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((ydata-np.mean(ydata))**2)
r_squared = 1 - (ss_res / ss_tot)
lya_b=b
lya_max=m
lya_max_err=m_sigma
if not intercept:
return lya_max,lya_max_err
else:
return lya_max,lya_max_err,lya_b
# Cell
@njit
def booleanize(vn, threshold=0.5):
'''
Convert the numpy array `vn` into a bitstream
according to `threshold`; values of `vn>=threshold`
will be set to `1`, and values of `vn<threshold`
will be set to `0`. If `threshold` is not supplied,
it defaults to halfway between the range of `vn`.
'''
if threshold is None:
threshold=(np.max(vn)-np.min(vn))/2
B=np.zeros(vn.shape).astype(vn.dtype)
for s in np.ndindex(vn.shape):
if vn[s]>=threshold:
B[s]+=1.
return B
# Cell
def analyze(responses,
shape = 'design instance challenge repeat time node',
window = 5
):
'''
Calculates the `puf_statistics` and `boolean_lyapunov` for an array of `responses`.
'''
responses = einops.rearrange(responses,
shape+'->design instance challenge repeat time node')
classes,instances,challenges,repeats,T,N=responses.shape
boolean = booleanize(responses).astype(int)
mu_inter=np.empty((classes,T,2))
mu_intra=np.empty((classes,T,2))
delta_mu=np.empty((classes,2))
t_opt=np.empty((classes))
for s in range(responses.shape[0]):
muinter,muintra,dmuinter,dmuintra,muopt,topt=puf_statistics(
responses=boolean[s],shape='instance challenge repeat time node')
mu_inter[s,:,0]=muinter
mu_inter[s,:,1]=dmuinter
mu_intra[s,:,0]=muintra
mu_intra[s,:,1]=dmuintra
t_opt[s]=topt
delta_mu[s,0]=muopt
delta_mu[s,1]=np.sqrt((muintra[topt]*dmuinter[topt])**2+(muinter[topt]*dmuintra[topt])**2)
log_dist=np.empty((classes,instances,T,2))
lyapunov=np.empty((classes,instances,2))
intercept=np.empty((classes,instances))
for s in np.ndindex(responses.shape[:2]):
avgln,sigmaln=bool_lyapunov(boolean[s],window,shape= 'challenge repeat time node')
lya,dlya,b=lya_fit(avgln,sigmaln,lstart=0,lend=int(T/2),intercept=True)
a=np.array([avgln,sigmaln])
a=a.T
log_dist[s][:len(a)]=a
a=np.array([lya,dlya])
a=a.T
lyapunov[s]=a
intercept[s]=b
return dict(mu_inter=mu_inter,mu_intra=mu_intra,delta_mu=delta_mu,t_opt=t_opt,
log_dist=log_dist,lyapunov=lyapunov,intercept=intercept)
# Cell
class BooleanNetworkEnsemble:
def __init__(self,
classes=3,
instances=3,
challenges=3,
repeats=3,
scale=0.1,
variables=('a','tau','hold'),
steady=False,
override=False,
decimation=None,
g = (nx.random_regular_graph,3,256),
a = np.inf,
tau = (rng.normal,1,0.1),
f = NOT,
delay = 0.,
edge_replacements = None,
T = 25,
dt = 0.01,
noise = 0.,
view = 'out',
plot = False,
init = None,
hold = None,
):
self.bn_kwargs=dict(g=g,a=a,tau=tau,f=f,delay=delay,edge_replacements=edge_replacements,
T=T,dt=dt,noise=noise,view=view,plot=plot,init=init,hold=hold)
self.query_kwargs=dict(instances=instances,challenges=challenges,repeats=repeats,scale=scale,
variables=variables,steady=steady,override=override,decimation=decimation)
self.classes=[]
self.responses=[]
self(classes,**self.bn_kwargs)
self.query(**self.query_kwargs)
def __call__(self,classes,**bn_kwargs):
if isinstance(classes,int):
for k,v in self.bn_kwargs.items():
if k not in bn_kwargs:
bn_kwargs[k]=v
if k=='g':
g=parse_kwargs(g=bn_kwargs['g'])['g']
bn_kwargs['g']=g
for i in range(classes):
self.classes+=[BooleanNetwork(**bn_kwargs)]
else:
self.classes+=[classes]
@sidis.timer
def query(self,**query_kwargs):
for k,v in self.query_kwargs.items():
if k not in query_kwargs:
query_kwargs[k]=v
if isinstance(self.responses,np.ndarray):
self.responses=np.ndarray.tolist(self.responses)
for b in self.classes:
self.responses+=[b.query(**query_kwargs)[...,b.dynamical_nodes]]
self.responses=np.array(self.responses)
#self.__dict__.update(analyze(self.responses))
self.data=analyze(self.responses)
def __getitem__(self,i):
return self.classes[i]
| 17,116 | 36.869469 | 117 |
py
|
networkm
|
networkm-master/networkm/graph_functions.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_graph_functions.ipynb (unless otherwise specified).
__all__ = ['ring', 'print_graph', 'parse_kwargs', 'give_nodes', 'give_edges', 'node_attrs', 'edge_attrs', 'node_data',
'edge_data', 'argwhere', 'kwargwhere', 'where', 'convert_edges', 'relabel_graph', 'sort_graph']
# Cell
import warnings
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
import networkx as nx
import numpy as np
import sidis
rng=sidis.RNG(0)
import matplotlib.pyplot as plt
import typing
from typing import Optional, Tuple, Dict, Callable, Union, Mapping, Sequence, Iterable, Hashable, List, Any
from collections import namedtuple
# Cell
def ring(N : int = 3,
left : bool = True,
right : bool = False,
loop : bool = False):
'''
Return `g`, a ring topology networkx graph with `N` nodes.
Booleans `left`, `right`, `loop` determine the directed edges.
'''
g=nx.MultiDiGraph()
e=[]
if left:
e+=[(i,(i-1)%N) for i in range(N)]
if right:
e+=[(i,(i+1)%N) for i in range(N)]
if loop:
e+=[(i,i) for i in range(N)]
g.add_nodes_from([i for i in range(N)])
g.add_edges_from(e)
return g
# Internal Cell
def table(iterable : Iterable, header : Iterable[str]):
'''
Creates a simple ASCII table from an iterable and a header.
Modified from
https://stackoverflow.com/questions/5909873/how-can-i-pretty-print-ascii-tables-with-python
'''
max_len = [len(x) for x in header]
for row in iterable:
row = [row] if type(row) not in (list, tuple) else row
for index, col in enumerate(row):
if max_len[index] < len(str(col)):
max_len[index] = len(str(col))
output = '|' + ''.join([h + ' ' * (l - len(h)) + '|' for h, l in zip(header, max_len)]) + '\n'
for row in iterable:
row = [row] if type(row) not in (list, tuple) else row
output += '|' + ''.join([str(c) + ' ' * (l - len(str(c))) + '|' for c, l in zip(row, max_len)]) + '\n'
return output
# Cell
def print_graph(g : nx.MultiDiGraph,
string=False):
'''
Print the 'node', predecessors', and 'successors' for every node in graph `g`.
The predecessors are the nodes flowing into a node,
and the successors are the nodes flowing out.
Example use:
g=ring(N=3,left=True,right=True,loop=True)
print_graph(g)
'''
data = [[n, list(g.predecessors(n)), list(g.successors(n))] for n in g.nodes]
for i in range(len(data)):
data[i][1]=', '.join([str(i) for i in data[i][1]])
data[i][2]=', '.join([str(i) for i in data[i][2]])
header=['Node', 'Predecessors', 'Successors']
if not string:
print(table(data,header))
else:
return table(data,header)
# Cell
def parse_kwargs(**kwargs):
'''
Evaluate delayed function calls by replacing
kwarg=(func,*farg,dict(**fkwarg))
with
kwarg=func(*farg,**fkwarg)
Example: kwargs = {a : (np.random.random,1)}
becomes kwargs = {a : np.random.random(1)}
each time this function is called.
Used to randomize kwarg assignment for
an exterior function, e.g setting node
and edge attributes.
'''
newkwargs={k:v for k,v in kwargs.items()}
for k,v in kwargs.items():
if type(v) is tuple and callable(v[0]):
if len(v)==1:
newkwargs[k]=v[0]()
elif len(v)==2:
if type(v[-1]) is dict:
newkwargs[k]=v[0](**v[1])
else:
newkwargs[k]=v[0](v[1])
else:
if type(v[-1]) is dict:
newkwargs[k]=v[0](*v[1:-1],**v[-1])
else:
newkwargs[k]=v[0](*v[1:])
return newkwargs
# Cell
def give_nodes(g : nx.MultiDiGraph,
data : Dict[Hashable,dict] = None,
nodes : Iterable = None,
**kwargs):
'''
Parse and apply any 'kwargs' to a set of 'nodes'.
If given, 'data' is a dict-of-dicts keyed by node.
The inner dict is given to the corresponding node.
'''
if nodes is None:
nodes=g.nodes
if kwargs:
[sidis.give(g.nodes[n],**parse_kwargs(**kwargs)) for n in nodes]
if data:
for k,v in data.items():
try:
g.nodes[k].update(parse_kwargs(**v))
except KeyError:
pass
# Internal Cell
def parse_edges(edges : Union[tuple,List[tuple]],
default_key : Hashable = 0
):
'''
Parse a single edge or list of edges
into a list of 3-tuples for iterating over
a MultiDiGraph, which requires keys.
'''
if type(edges) is tuple:
edges=[edges]
if type(edges) is not list:
edges=list(edges)
for i in range(len(edges)):
if len(edges[i])==4: #discard data, last entry
edges[i]=(edges[i][0],edges[i][1],edges[i][2])
if len(edges[i])==2: #include key, 3rd entry
edges[i]=(edges[i][0],edges[i][1],default_key)
return edges
# Cell
def give_edges(g : nx.MultiDiGraph,
data : Dict[Hashable,dict] = None,
edges : Iterable = None,
**kwargs):
'''
Parse and apply any 'kwargs' to a set of 'edges'.
If given, 'data' is a dict-of-dicts keyed by edge.
The inner dict is given to the corresponding edge.
'''
if edges is None:
edges=g.edges
edges = parse_edges(edges)
if kwargs:
[sidis.give(g.edges[e],**parse_kwargs(**kwargs)) for e in edges]
if data:
for k,v in data.items():
try:
g.edges[k].update(parse_kwargs(**v))
except:
pass
# Cell
def node_attrs(g):
'''
Unique node data keys.
'''
attrs=[]
for n in g.nodes:
for attr in g.nodes[n]:
attrs+=[attr]
return list(set(attrs))
# Cell
def edge_attrs(g):
'''
Unique edge data keys.
'''
attrs=[]
for e in g.edges:
for attr in g.edges[e]:
attrs+=[attr]
return list(set(attrs))
# Cell
def node_data(g,*args):
'''
Return node attributes 'args' as an array.
NOTE: The ordering of the array corresponds to the
ordering of the nodes in the graph.
'''
if not args:
args=node_attrs(g)
node_data={}
[sidis.give(node_data,str(arg),
np.squeeze(np.array([sidis.get(g.nodes[n],arg) for n in g.nodes])))
for arg in args]
return node_data
# Cell
def edge_data(g,*args):
'''
Return edge attributes 'args' as an array.
NOTE: The ordering of the array corresponds to the
ordering of the edges in the graph.
'''
if not args:
args=edge_attrs(g)
edge_data={}
[sidis.give(edge_data,str(arg), np.array([sidis.get(g.edges[e],arg) for e in g.edges]))
for arg in args]
return edge_data
# Cell
def argwhere(*args : List[np.ndarray]):
'''
Simplified version of np.argwhere for multiple arrays.
Returns list of indices where args hold.
'''
with warnings.catch_warnings(): #ignore numpy warning
warnings.simplefilter("ignore")
if not args:
return None
elif len(args)==1:
return list(np.ravel(np.argwhere(args[0])).astype(int))
else:
i=[] #indices
for arg in args:
res=list(np.ravel(np.argwhere(arg)).astype(int))
i+=[res]
if len(i)==1:
i=i[0]
if np.any(i):
return list(i)
# Cell
def kwargwhere(g : nx.MultiDiGraph,**kwargs : Dict[str,Any]):
'''
Return the node and edges where
the kwarg equalities hold in the graph.
'''
node_k=node_attrs(g)
edge_k=edge_attrs(g)
node_i=[]
edge_i=[]
for k,v in kwargs.items():
n_i=[]
e_i=[]
if k in node_k:
for n in g.nodes:
if g.nodes[n].get(k)==v:
n_i+=[n]
node_i+=[n_i]
if k in edge_k:
for e in g.edges:
if g.edges[e].get(k)==v:
e_i+=[e]
edge_i+=[e_i]
if len(node_i)==1:
node_i=node_i[0]
if len(edge_i)==1:
edge_i=edge_i[0]
if node_i and edge_i:
return node_i,edge_i
elif node_i:
return node_i
elif edge_i:
return edge_i
# Cell
def where(g,*args,**kwargs):
'''
Combine the 'argwhere' and 'kwargwhere' functions for the graph.
'''
arg_i=argwhere(*args)
kwarg_i=kwargwhere(g,**kwargs)
if arg_i and kwarg_i:
return arg_i,kwarg_i
elif arg_i:
return arg_i
elif kwarg_i:
return kwarg_i
# Internal Cell
def parse_lengths(g : nx.MultiDiGraph,
edges : Union[tuple,List[tuple]],
lengths : Union[str,int,List[int]] = 1) -> Union[list,List[list]]:
'''
Convert `lengths` corresponding to attributes of each edge into a list of lists.
`lengths` can be a single integer, an integer for each edge, or a string
giving the edge attribute holding the length.
'''
if type(lengths) is int:
lengths={e:lengths for e in edges}
elif type(lengths) is str:
lengths={e:g.edges[e].get(lengths) for e in edges}
return lengths
# Cell
def convert_edges(g : nx.MultiDiGraph,
edges : Union[None,tuple,List[tuple]] = None,
lengths : Union[str,int,dict] = 1,
node_data : dict = {},
label : callable = lambda g,node,iterable : len(g)+iterable,
**edge_data
):
'''
Converts `edges` in `g` to paths of the given `lengths`.
The new paths follow a tree structure, and each new node
inherits `node_data` and is labeled with `label`.
The tree structure finds the roots (set of starting nodes)
in the list of `edges`, and then creates trunks corresponding
to the paths of maximum length for each node. Then, branches are
added from the trunk to each of the leaves (terminal nodes),
made from new nodes equal to the lengths associated with each path.
'''
#default to all edges
if edges is None:
edges=g.edges
#parse args
edges=parse_edges(edges=g.edges(keys=True),default_key=0)
lengths=parse_lengths(g=g,edges=edges,lengths=lengths)
#unique first nodes
roots=set([e[0] for e in edges])
#max path lengths on a per-starting node basis
trunks={r:max([lengths[e] for e in g.out_edges(r,keys=True) if e in edges])
for r in roots}
#sort roots by longest trunk length to create largest trunks first
roots=sorted(roots,
key=lambda r: trunks[r],
reverse=True)
#terminal nodes for each branch
leaves={r:list(g.successors(r)) for r in roots}
#now build trunks, then create branches from trunk to edges
for r in roots:
trunk=[label(g,node=r,iterable=i) for i in range(trunks[r])]
if trunk!=[]:
nx.add_path(g,[r]+trunk,**parse_kwargs(**edge_data))
give_nodes(g,nodes=trunk,**node_data)
for edge,length in lengths.items():
if edge[0]==r: #branch from root
if length==trunks[r]: #go to leaf using trunk endpoint
branch=[trunk[-1]]+[edge[1]]
else: #create new branch from somewhere in trunk
branch=[trunk[length-1]]+[edge[1]]
nx.add_path(g,branch,**g.edges[edge]) #apply old edge data
give_nodes(g,nodes=branch[:-1],**node_data)
#trim old edges
for e in edges:
g.remove_edge(*e)
# Cell
def relabel_graph(g : nx.MultiDiGraph,
mapping : Union[None,callable,dict] = None):
'''
Relabel nodes in place with desired 'mapping', and store the
`mapping` and `inverse_mapping` as attributes of `g`.
Can be called again without args to relabel to the original map,
which switches the `mapping` and `inverse_mapping`.
If `mapping` is None and `g` has no `mapping`,
defaults to replacing nodes with integers.
If `mapping` is None and `g` has a `mapping`, uses that.
Otherwise, `mapping` is a callable or dict keyed with old node labels
as keys and new node labels as values.
'''
if mapping is None:
if not g.__dict__.get('mapping'):
mapping={n:i for i,n in enumerate(g.nodes)}
else:
mapping=g.mapping
elif callable(mapping):
mapping=mapping(g)
inverse_mapping={v:k for k,v in mapping.items()}
def relabel_nodes(G, mapping):
H = nx.MultiDiGraph()
H.add_nodes_from(mapping.get(n, n) for n in G)
H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items())
if G.is_multigraph():
new_edges = [
(mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy())
for (n1, n2, k, d) in G.edges(keys=True, data=True)
]
# check for conflicting edge-keys
undirected = not G.is_directed()
seen_edges = set()
for i, (source, target, key, data) in enumerate(new_edges):
while (source, target, key) in seen_edges:
if not isinstance(key, (int, float)):
key = 0
key += 1
seen_edges.add((source, target, key))
if undirected:
seen_edges.add((target, source, key))
new_edges[i] = (source, target, key, data)
H.add_edges_from(new_edges)
else:
H.add_edges_from(
(mapping.get(n1, n1), mapping.get(n2, n2), d.copy())
for (n1, n2, d) in G.edges(data=True)
)
H.graph.update(G.graph)
return H
gnew=relabel_nodes(g,mapping)
g.__dict__.update(gnew.__dict__)
g.mapping=inverse_mapping
g.inverse_mapping=mapping
# Cell
def sort_graph(g : nx.MultiDiGraph,
nodes_by='in_degree', #g.in_degree, #sorting this function over nodes
node_key=lambda t:sidis.get(t,-1,-1), #last element of sorting tuple
node_args=(), #not accessing any attributes by default
nodes_ascending=True,
edges_by=None, #not generating function evals to sort
edge_key=None,#orders edges, defaults to linear comb of node sort
edge_args=(), #not accessing any edge attrs by default
edges_ascending=False,
relabel=False #relabel to integers
) -> None:
'''
Sort the graph in place by changing node and edge order.
See `sidis.sort` documentation for explanation of by, key, and args.
Default behavior is to sort nodes by in-degree, and edges by increasing node label,
after relabling nodes to integers. Stores result in 'sorting' attribute.
'''
#parse args; get node sorting attr if str
if type(nodes_by) is str:
nodes_by=sidis.get(g,nodes_by)
#if no edge key given default to ordering by linear comb of node func
if edge_key is None:
edge_key=lambda t:100*nodes_by(t[0])-10*nodes_by(t[1])
#sort nodes
node_sorting=sidis.sort(g.nodes,
*node_args,
by=nodes_by,
key=node_key,
reverse=nodes_ascending)
#sort returns tuples of (node,nodes_by(node)), so extract nodes and data
if nodes_by is None:
nodes=[(n,g.nodes[n]) for n in node_sorting]
else:
nodes=[(n[0],g.nodes[n[0]]) for n in node_sorting]
#sort edges
edge_sorting=sidis.sort(list(g.edges(keys=True)),
*edge_args,
by=edges_by,
key=edge_key,
reverse=edges_ascending)
#extract edge,data tuple
if edges_by is None:
edges=[(*e,g.edges[e]) for e in edge_sorting]
else:
edges=[(*e[0],g.edges[e[0]]) for e in edge_sorting]
#wipe graph and add new nodes/edges in order
g.clear()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
#relabel to new ranking if desired
if relabel:
mapping={n:i for i,n in enumerate([node[0] for node in nodes])}
relabel_graph(g,mapping)
new_node_sorting=[]
for node,rank in node_sorting:
new_node_sorting+=[(g.inverse_mapping[node],rank)]
node_sorting=new_node_sorting
sorting=nx.utils.groups(dict(node_sorting))
g.sorting={k:list(v) for k,v in sorting.items()}
| 16,858 | 30.809434 | 118 |
py
|
networkm
|
networkm-master/networkm/_nbdev.py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"ring": "00_graph_functions.ipynb",
"table": "00_graph_functions.ipynb",
"print_graph": "00_graph_functions.ipynb",
"parse_kwargs": "00_graph_functions.ipynb",
"give_nodes": "00_graph_functions.ipynb",
"parse_edges": "00_graph_functions.ipynb",
"give_edges": "00_graph_functions.ipynb",
"node_attrs": "00_graph_functions.ipynb",
"edge_attrs": "00_graph_functions.ipynb",
"node_data": "00_graph_functions.ipynb",
"edge_data": "00_graph_functions.ipynb",
"argwhere": "00_graph_functions.ipynb",
"kwargwhere": "00_graph_functions.ipynb",
"where": "00_graph_functions.ipynb",
"parse_lengths": "00_graph_functions.ipynb",
"convert_edges": "00_graph_functions.ipynb",
"relabel_graph": "00_graph_functions.ipynb",
"sort_graph": "00_graph_functions.ipynb",
"XOR": "01_model_functions.ipynb",
"XOR.mask": "01_model_functions.ipynb",
"boolxor": "01_model_functions.ipynb",
"smoothxor": "01_model_functions.ipynb",
"MPX": "01_model_functions.ipynb",
"MPX.mask": "01_model_functions.ipynb",
"MUX": "01_model_functions.ipynb",
"boolmux": "01_model_functions.ipynb",
"smoothmux": "01_model_functions.ipynb",
"NOT": "01_model_functions.ipynb",
"NOT.mask": "01_model_functions.ipynb",
"boolnot": "01_model_functions.ipynb",
"smoothnot": "01_model_functions.ipynb",
"COPY": "01_model_functions.ipynb",
"COPY.mask": "01_model_functions.ipynb",
"boolcopy": "01_model_functions.ipynb",
"smoothcopy": "01_model_functions.ipynb",
"AND": "01_model_functions.ipynb",
"AND.mask": "01_model_functions.ipynb",
"booland": "01_model_functions.ipynb",
"smoothand": "01_model_functions.ipynb",
"OR": "01_model_functions.ipynb",
"OR.mask": "01_model_functions.ipynb",
"boolor": "01_model_functions.ipynb",
"smoothor": "01_model_functions.ipynb",
"NOR": "01_model_functions.ipynb",
"NAND": "01_model_functions.ipynb",
"XNOR": "01_model_functions.ipynb",
"NOR.mask": "01_model_functions.ipynb",
"boonlor": "01_model_functions.ipynb",
"smoothnor": "01_model_functions.ipynb",
"NAND.mask": "01_model_functions.ipynb",
"boonlnand": "01_model_functions.ipynb",
"smoothnand": "01_model_functions.ipynb",
"XNOR.mask": "01_model_functions.ipynb",
"NXOR": "01_model_functions.ipynb",
"boonlxnor": "01_model_functions.ipynb",
"smoothxnor": "01_model_functions.ipynb",
"BOOL": "01_model_functions.ipynb",
"BOOL.masks": "01_model_functions.ipynb",
"sigmoid": "01_model_functions.ipynb",
"nextafter": "01_model_functions.ipynb",
"test": "01_model_functions.ipynb",
"bool_model_iter": "01_model_functions.ipynb",
"bool_initial_conditions": "01_model_functions.ipynb",
"setup_bool_integral": "01_model_functions.ipynb",
"bool_integral": "01_model_functions.ipynb",
"bool_integral_risefall": "01_model_functions.ipynb",
"plot_graph": "02_network_class.ipynb",
"bool_model": "01_model_functions.ipynb",
"bn_integral": "02_network_class.ipynb",
"bn_integral_risefall": "02_network_class.ipynb",
"fig_params": "02_network_class.ipynb",
"force_aspect": "02_network_class.ipynb",
"logic_colors": "02_network_class.ipynb",
"differential_labels": "02_network_class.ipynb",
"plot_timeseries": "02_network_class.ipynb",
"perturb": "02_network_class.ipynb",
"stack": "02_network_class.ipynb",
"perturb_init": "02_network_class.ipynb",
"BooleanNetwork": "02_network_class.ipynb",
"ring_bn": "02_network_class.ipynb",
"random_bn": "02_network_class.ipynb",
"mpx_edges": "02_network_class.ipynb",
"delay_edges": "02_network_class.ipynb",
"plot_comparison": "02_network_class.ipynb",
"bool_diff": "03_hbn_puf.ipynb",
"plot_lya": "03_hbn_puf.ipynb",
"plot_mu": "03_hbn_puf.ipynb",
"puf_statistics": "03_hbn_puf.ipynb",
"bool_lyapunov": "03_hbn_puf.ipynb",
"lya_fit": "03_hbn_puf.ipynb",
"booleanize": "03_hbn_puf.ipynb",
"analyze": "03_hbn_puf.ipynb",
"BooleanNetworkEnsemble": "03_hbn_puf.ipynb"}
modules = ["graph_functions.py",
"model_functions.py",
"network_class.py",
"puf_functions.py"]
doc_url = "https://Noeloikeau.github.io/networkm/"
git_url = "https://github.com/Noeloikeau/networkm/tree/master/"
def custom_doc_links(name): return None
| 4,961 | 44.522936 | 63 |
py
|
networkm
|
networkm-master/networkm/__init__.py
|
__version__ = "0.0.1"
import warnings
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
import networkx as nx
import numpy as np
import sidis
rng=sidis.RNG(0)
import matplotlib.pyplot as plt
import typing
from typing import Optional, Tuple, Dict, Callable, Union, Mapping, Sequence, Iterable, Hashable, List, Any
from collections import namedtuple
from networkm.graph_functions import *
from networkm.model_functions import *
from networkm.network_class import *
from networkm.puf_functions import *
| 593 | 30.263158 | 111 |
py
|
networkm
|
networkm-master/networkm/network_class.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_network_class.ipynb (unless otherwise specified).
__all__ = ['bn_integral', 'bn_integral_risefall', 'fig_params', 'force_aspect', 'logic_colors', 'plot_graph',
'differential_labels', 'plot_timeseries', 'perturb', 'stack', 'perturb_init', 'BooleanNetwork', 'ring_bn',
'random_bn', 'mpx_edges', 'delay_edges', 'plot_comparison']
# Cell
import warnings
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
import networkx as nx
import numpy as np
import sidis
rng=sidis.RNG(0)
import matplotlib.pyplot as plt
import typing
from typing import Optional, Tuple, Dict, Callable, Union, Mapping, Sequence, Iterable, Hashable, List, Any
from collections import namedtuple
import numba
from numba import njit
from .graph_functions import *
from .model_functions import *
import matplotlib as mpl
# Cell
@njit
def bn_integral(iterator,
time_delays,
sigmoid_constants,
time_constants,
predecessors,
initial_condition_matrix,
hold_times,
dt,
T,
noise_scale,
decimation,
repeats=1
):
'''
Jit accelerated integration 'BooleanNetwork' class. Nearly identical to
`bool_integral` with a loop over repeated `initial_conditions` and without
a noise argument, in favor of generating noise internal to the function.
'''
C,N=initial_condition_matrix.shape #challenges,repeats,nodes
responses = np.zeros((C,repeats,int(T/decimation),N))
for c,r in np.ndindex(C,repeats): #diff inits
initial_conditions=initial_condition_matrix[c]
x=np.zeros((T,N)).astype(np.longdouble)
dx=np.zeros(x.shape[-1]).astype(np.longdouble)
for t in range(x.shape[0]-1):
noise=np.empty(x.shape[1])
for n in range(x.shape[1]):
noise[n]=np.random.random()*noise_scale
#noise=noise*noise_scale
edge_index=0
if t<max(hold_times):
for n in range(x.shape[-1]):
if hold_times[n]>=t:
x[t,n]=initial_conditions[n]
for i in range(len(iterator)):
n1,n2,deg,mask=iterator[i]
d=-time_delays[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
d+=t
p=predecessors[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
a=sigmoid_constants[n1:n2].reshape((n2-n1,1))
edge_index+=(n2-n1)*deg
y=np.zeros((n2-n1,deg))#.astype(np.longdouble)
for k in range(n2-n1):
for j in range(deg):
de=d[k,j]
pr=p[k,j]
y[k,j]=x[de,pr]
y=sigmoid(x=y,a=a)
dx[n1:n2]=BOOL(y,mask)
dx=sigmoid(dx,sigmoid_constants)
dxdt=(-x[t]+dx+noise)/time_constants
x[t+1]=x[t]+dt*dxdt
responses[c,r]=x[::decimation]
return responses
#export
@njit
def bn_integral_risefall(iterator,
time_delays,
sigmoid_constants,
time_constants,
predecessors,
initial_condition_matrix,
hold_times,
dt,
T,
noise_scale,
decimation,
repeats=1
):
'''
Jit accelerated integration 'BooleanNetwork' class. Nearly identical to
`bool_integral_risefall` with a loop over repeated `initial_conditions` and without
a noise argument, in favor of generating noise internal to the function.
'''
C,N=initial_condition_matrix.shape #challenges,repeats,nodes
responses = np.zeros((C,repeats,int(T/decimation),N))
for c,r in np.ndindex(C,repeats): #diff inits
initial_conditions=initial_condition_matrix[c]
x=np.zeros((T,N)).astype(np.longdouble)
dx=np.zeros(x.shape[-1]).astype(np.longdouble)
for t in range(x.shape[0]-1):
noise=np.empty(x.shape[1])
for n in range(x.shape[1]):
noise[n]=np.random.random()*noise_scale
#noise=noise*noise_scale
edge_index=0
if t<max(hold_times):
for n in range(x.shape[-1]):
if hold_times[n]>=t:
x[t,n]=initial_conditions[n]
for i in range(len(iterator)):
n1,n2,deg,mask=iterator[i]
d=-time_delays[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
d+=t
p=predecessors[edge_index:edge_index+(n2-n1)*deg].reshape((n2-n1,deg))
a=sigmoid_constants[n1:n2].reshape((n2-n1,1))
edge_index+=(n2-n1)*deg
y=np.zeros((n2-n1,deg))#.astype(np.longdouble)
for k in range(n2-n1):
for j in range(deg):
de=d[k,j]
pr=p[k,j]
y[k,j]=x[de,pr]
y=sigmoid(x=y,a=a)
dx[n1:n2]=BOOL(y,mask)
dx=sigmoid(dx,sigmoid_constants)
tau=time_constants[:,0]+(time_constants[:,1]-time_constants[:,0]
)*sigmoid(x[t],sigmoid_constants)
dxdt=(-x[t]+dx+noise)/tau
x[t+1]=x[t]+dt*dxdt
responses[c,r]=x[::decimation]
return responses
# Cell
def fig_params(X : float = 3.5,Y : float = 3,
hspace : float = 0.0,
offset : float = -.4,
font : str = 'Times New Roman',
fontsize : int = 12,
ticksize : int = 6,
tickwidth : int = 2,
linewidth : int = 2,
reset : bool = False,
):
'''
Changes the `rcParams` for plotting, with the option to `reset` to default.
'''
if reset:
mpl.rcParams.update(mpl.rcParamsDefault)
return
else:
#figure font Times New Roman,Helvetica, Arial, Cambria, or Symbol
mpl.rcParams['font.family'] = font
mpl.rcParams['font.size'] = fontsize
mpl.rcParams['axes.titlesize'] = fontsize
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['axes.titley'] = offset
mpl.rcParams['xtick.major.size'] = ticksize
mpl.rcParams['xtick.major.width'] = tickwidth
mpl.rcParams['xtick.minor.size'] = ticksize//2
mpl.rcParams['xtick.minor.width'] = tickwidth//2
mpl.rcParams['xtick.direction']='out'
mpl.rcParams['ytick.major.size'] = ticksize
mpl.rcParams['ytick.major.width'] = tickwidth
mpl.rcParams['ytick.minor.size'] = ticksize//2
mpl.rcParams['ytick.minor.width'] = tickwidth//2
mpl.rcParams['ytick.direction']='out'
mpl.rcParams['figure.figsize'] = [X,Y]
mpl.rcParams['figure.subplot.hspace'] = hspace
# Cell
def force_aspect(ax,aspect=1):
'''
Helper function to force the aspect of the matplotlib 'ax' axes object.
'''
try:
im = ax.get_images()
extent = im[0].get_extent()
except:
x,y=ax.get_xlim(),ax.get_ylim()
extent = [x[0],x[1],y[0],y[1]]
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
# Cell
logic_colors={XOR:'r',NOT:'b',MPX:'g',AND:'y',OR:'m',COPY:'c',XNOR:'k',NAND:'w'}
# Cell
def plot_graph(b,XY=(5,5),
layout=nx.kamada_kawai_layout,layout_kwargs=dict(weight='delay'),
colors=logic_colors,
ls='--',lw=0.1,head_width=0.035,color='black',shape='full'):
'''
Plot the boolean network 'b' using 'layout'.
Defaults to using the path-length cost function 'nx.kamada_kawai_layout'
which attempts to force the path between each node to be scaled by the
'weight', in this case the time-delay. Other args are for plotting arrows.
'''
#mpl.rcParams['figure.figsize'] = [XY[0],XY[1]]
try:
d=b.layout
except:
d=layout(b,**layout_kwargs)
b.layout=d
xy=np.array(list(d.values()))
fig,ax=plt.subplots(figsize=XY)
for func,nodes in b.funcs.items():
ax.scatter(xy[nodes,0],xy[nodes,1],c=[colors[b.f[n]] for n in nodes],label=func.__name__)
force_aspect(ax)
for v,(x,y) in d.items():
p=b.in_edges(v,keys=True)
for e in p:
u=e[0]
x0,y0=d[u]
dx,dy=x0-x,y0-y
ax.arrow(x,y,dx,dy,
shape=shape,
color=color,
linestyle=ls,
lw=lw,
length_includes_head=True,
head_width=head_width)
#from matplotlib.font_manager import FontProperties
#fontP = FontProperties()
#fontP.set_size(set_size)
lgd=fig.legend(loc='lower center',
#bbox_to_anchor=(0.5,1.1),
ncol=len(b.funcs))
#prop=fontP,
#fancybox=True, shadow=True)
#ax.set_title(layout.__name__)
ax.axis('off')
plt.show()
#fig_params(reset=True)
# Cell
def differential_labels(bn,nodes=None):
'''
Generate text of the differential equations for each node
'''
if nodes is None:
nodes=bn.nodes
template=lambda i,p: r'$\tau_{i} \.x_{i} = -x_{i}(t)+{p}{eps}$'.format(i=i,p=p,
eps='+\epsilon' if bn.noise else '')
labels=[]
delay='delay'
f='f'
for i in nodes:
p=','.join([f'X_{e[0]}(t-{bn.edges[e][delay]:.1f})' for e in bn.in_edges(i,keys=True)])
p=f'{bn.nodes[i][f].__name__}['+p
p+=']'
labels+=[template(i=i,p=p)]
return labels
# Cell
def plot_timeseries(xs,labels,colors,t0=0,t1=-1,alpha=0.3,
loc='upper center',
bbox_to_anchor=(0.5, 1.3),
ncol=1,
set_size='small',
fname=None,
shrink=0.8,
dt=0.01,
ls='-',
xy=(3,3),
handlelength=0,
handletextpad=0,
fancybox=True,
shadow=True,
left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.1,
fontsize=10
):
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size(set_size)
clip = lambda v,t0=t0,t1=t1: np.clip(v[t0:t1],0,1)
xs = [clip(x) for x in xs]
I=len(xs)
fig,ax=plt.subplots(nrows=I,ncols=1,sharex=True, sharey=False,figsize=xy)
time_axis=np.around((np.arange(xs[0].shape[0])*dt),2)
plots=[]
for i,x in enumerate(xs):
p,=ax[i].plot(time_axis,x,color=colors[i],
ls=ls,
label=labels[i])
plots+=[p]
#ax[i].spines['right'].set_visible(False)
#ax[i].spines['top'].set_visible(False)
#ax[i].spines['bottom'].set_visible(False if i!=I-1 else True)
ax[i].xaxis.set_visible(False if i!=I-1 else True)
#ax[i].legend(loc=loc, bbox_to_anchor=bbox_to_anchor,
# ncol=ncol, fancybox=True, shadow=True,prop=fontP)
#leg = ax[i].legend(handlelength=handlelength,
# handletextpad=handletextpad, loc=loc, bbox_to_anchor=bbox_to_anchor,
# ncol=ncol, fancybox=fancybox, shadow=shadow,prop=fontP)#,mode="expand", borderaxespad=0.)
#for item in leg.legendHandles:
# item.set_visible(False)
ax[i].set_title(labels[i],fontsize=fontsize)
ax[i].set_ylabel(r'$x_{0}$'.format(i))
#force_aspect(ax[i],len(xs))
#box = ax[i].get_position()
#ax[i].set_position([box.x0, box.y0, box.width * shrink, box.height])
plt.xlabel(r'Time ($\tau$)')
#lgd=fig.legend(handles=plots,loc=loc, bbox_to_anchor=bbox_to_anchor,
# ncol=ncol, fancybox=True, shadow=True,prop=fontP)
if fname:
plt.savefig(fname=fname,bbox_inches='tight',dpi=600)
fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
plt.tight_layout()
plt.show()
# Cell
def perturb(a,scale=0.1):
'''
Helper function for randomly shuffling entries of an array 'a'
by the fraction 'scale'.
'''
return (a*(1.+rng.random(-scale/2.,scale/2.,shape=a.shape))).astype(a.dtype)
# Cell
def stack(a,times=1,axis=0):
'''
Helper function for stacking an array 'a' along its 'axis'
a certain number of 'times'.
'''
return np.repeat(np.expand_dims(a,axis),times,axis=axis).astype(a.dtype)
# Cell
def perturb_init(init):
'''
Helper function for generating all single bit flips
of a binary array 'init', e.g, init=[0,0] returns
[0,0],[1,0],[0,1].
'''
I=len(init)
inits=stack(init,axis=0,times=I+1)
for n in range(I):
inits[n+1][n]=sidis.NOT(inits[n+1][n])
return inits
# Cell
class BooleanNetwork(nx.MultiDiGraph):
'''
Model the dynamics of the graph `g` by giving the node attributes.
f : logical function.
a : sigmoid function.
tau : time constant.
and edge attributes
delay : time-delay
Each can be dict keyed by node, a single value, or tuple of callable/args choosing val;
see `parse_kwargs`. Converts any edges with the given `edge_replacements` (see
`convert_edges` function for arguments); useful for `MPX`.
Sorts the graph in place using `sort_graph` in order to produce
an iterable with `bool_model_iter`. See also `setup_bool_integral`.
Initializes the dynamics to `init` using the hold times `hold`;
see `bn_initial_conditions`. Integrates with `bool_integral`
and `bn_integral_risefall` if `tau` is given as an array of
[rise_time,fall_time]. Returns the node state array `x`
and optionally plots the resulting dynamics with `plot_graph`.
Includes most functions from the `graph_functions` library.
Other args:
edge_replacements: dict of replacements for edges
T: integration time
dt: integration timestep
noise: amplitude of noise fluctuation at each timestep
steady: whether to map boolean functions over initial condition first
view: whether to change default edge view (in/out)
plot: whether to automatically integrate and plot the given vals
init: default initial condition to apply
hold: default hold times for initial conditions
take_attrs: whether to override the above and absorb any attributes from `g`
'''
def __init__(self,
g : nx.MultiDiGraph = ring(N=3,right=True,left=False,loop=False),
a : Union[float,Dict[Any,float],Tuple[Union[Callable,float]]] = np.inf,
tau : Union[float,Dict[Any,float],Tuple[Union[Callable,float]]] = 1.,
f : Union[Callable,Dict[Any,float],Tuple[Union[Callable,float]]] = NOT,
delay : Union[float,Dict[Any,float],Tuple[Union[Callable,float]]] = 0.,
edge_replacements : Optional[dict] = None,
T : int = 25,
dt : Union[float,int] = 0.01,
noise : Union[float,int] = 0.,
steady : bool = False,
view : str = 'out',
plot : bool = False,
init : Union[float,Dict[Any,float],Tuple[Union[Callable,float]]] = None,
hold : Union[float,Dict[Any,float],Tuple[Union[Callable,float]]] = None,
take_attrs : bool = True
):
#super init
super().__init__(g)
#change edge view, if desired
self.view(view)
#give node/edge attrs
self.give_nodes(a=a,tau=tau,f=f)
self.give_edges(delay=delay)
#replace them with current data, if available
if take_attrs:
self.take_attrs(g)
#list original topology
self.original_nodes=list(g.nodes)
self.original_edges=list(g.edges)
#replace edges with nodes if applicable
if edge_replacements:
self.convert_edges(edge_replacements)
self.new_nodes=[n for n in self.nodes if n not in self.original_nodes]
self.new_edges=[n for n in self.edges if n not in self.original_edges]
else:
self.new_nodes=None
self.new_edges=None
#sort the graph object in place and convert node labels to integers
self.sort(relabel=True)
#create jit integrator iterator and store groups of funcs
self.iter,self.funcs=bool_model_iter(self,return_funcs=True)
#set aliases for functions as self attrs
for k,v in self.funcs.items():
setattr(self,k.__name__,v)
#set nodes to be forced for initial conds
try:
self.dynamical_nodes=self.MPX
except:
self.dynamical_nodes=self.original_nodes
#set noise and timescales
self.noise=noise
self.dt=dt
self.T=int(T/dt)
#save node/edge data as self attr arrays
self.node_data(save=True)
self.edge_data(save=True)
#ensure float type to prevent integer division
self.a=self.a.astype(float)
self.tau=self.tau.astype(float)
self.delay=self.delay.astype(float)
#if hold given as a float, treat it as a constant
if isinstance(hold,(int,float)):
hold=[hold for n in self.dynamical_nodes]
#if hold given as a tuple, treat it as a delayed func eval
elif isinstance(hold,tuple):
hold=[parse_kwargs(h=hold)['h'] for n in self.dynamical_nodes]
#if init given as a tuple, treat it as a delayed func eval
#elif isinstance(hold,tuple):
# init=[parse_kwargs(i=init)['i'] for n in self.dynamical_nodes]
#convert the initial conditions to match the dynamical nodes, steady state optional
self.init,self.hold=bool_initial_conditions(self,init=init,hold=hold,steady=steady)
self.init=np.array(self.init).astype(np.float64)
#predecessor array alias
self.predar=np.concatenate([list(self.predecessors(n)) for n in self.nodes])
#choose integral based on if different rise/falls exist
if len(self.tau.shape)!=1:
self.integral=bn_integral_risefall
else:
self.integral=bn_integral
#if plot, integrate default and show timeseries
if plot:
self.x=self.integrate()
self.plot(self.x)
def take_attrs(self,
g,
node_attrs=['a','tau','f'],
edge_attrs=['delay']
):
'''
Absorb the 'node_attrs' and 'edge_attrs' from graph 'g'.
'''
for v in g.nodes:
for a in node_attrs:
try:
self.nodes[v][a]=g.nodes[v][a]
except:
pass
for e in g.edges:
for a in edge_attrs:
try:
self.edges[e][a]=g.edges[e][a]
except:
pass
def parse_initial_condition(self,
init=None,
hold=None,
steady=False,
override=False):
'''
Store any given 'init' and 'hold' values as self attrs.
'''
if init is not None and hold is not None: #if both given, replace current
self.init,self.hold=bool_initial_conditions(self,init=init,hold=hold,steady=steady)
elif init is None and hold is not None: #replace hold
_,self.hold=bool_initial_conditions(self,init=init,hold=hold,steady=steady)
elif init is not None and hold is None:
self.init,_=bool_initial_conditions(self,init=init,hold=hold,steady=steady)
#else save current vals if both none
self.init,self.hold=self.init.astype(np.longdouble),self.hold.astype(np.int64)
def vary(self,*args,scale=0.1):
'''
Perturb the attributes given as 'args' by 'scale'.
e.g, vary('a',0.1) sets bn.a = perturb(bn.a,0.1)
'''
for a in args:
if isinstance(a,(float,int)):
scale=a
else:
setattr(self,a,perturb(getattr(self,a),scale=scale))
def random_init(self): #random initial conditions
'''
Random initial condition compatible with 'parse_initial_condition'
'''
return rng.random(0,1,asint=True,shape=(len(self.dynamical_nodes)))
def integrate(self,
init=None,
hold=None,
noise=None,
T=None,
dt=None,
steady=False,
override=False,
repeats=1,
decimation=1,
save=False #whether to save as self.x
):
'''
Integrate the given dynamics.
init: initial condition of the dynamical nodes.
can be a matrix, i.e multiple arrays of initial conditions
hold: hold times of dynamical nodes
noise: float value for max random noise at each timestep
T,dt: integration time and timestep (T gets converted to T/dt internally)
steady: whether to map the node boolean functions over the init first,
before starting the integration. For MPXs, this is ignored
override: whether to ignore processing of init and hold and use
the exact values given, e.g non-standard inits for non-dynamical nodes
repeats: how many times to repeat each init
decimation: how many timesteps to ignore in the output, e.g x[::decimation]
'''
#update self attrs
if dt is not None:
self.dt=dt
if decimation is None:
decimation=int(1/dt)
if T is None:
T=self.T
else:
self.T=int(T/self.dt)
if noise is not None:
self.noise=noise
#typecast
if isinstance(hold,(int,float)):
hold=[hold for n in self.dynamical_nodes]
if init is not None and not isinstance(init,np.ndarray):
init=np.array(init)
if hold is not None and not isinstance(hold,np.ndarray):
hold=np.array(hold)
#if multiple inits given, parse each and store as matrix
if isinstance(init,np.ndarray) and len(init.shape)>1:
inits=np.zeros((init.shape[0],len(self))).astype(np.float64)
for i in range(len(init)):
self.parse_initial_condition(init=init[i],hold=hold,steady=steady,override=override)
inits[i]=self.init
self.init=inits
else:#only one init, parse if given otherwise use currently saved init/hold
if init is None and hold is None:
pass
else:
self.parse_initial_condition(init=init,hold=hold,steady=steady,override=override)
#final check that init and hold are array type
self.init=np.array(self.init)
self.hold=np.array(self.hold)
#expand dims of init for jit integrator, which accepts a matrix
self.init=self.init if len(self.init.shape)==2 else stack(self.init,times=1,axis=0)
#integrate with appropriate type of each var for jit
x=self.integral(iterator=self.iter.astype(np.int64),
time_delays=(self.delay/self.dt).astype(np.int64),
sigmoid_constants=self.a.astype(np.longdouble),
time_constants=self.tau.astype(np.longdouble),
predecessors=self.predar.astype(np.int64),
initial_condition_matrix=self.init.astype(np.longdouble),
hold_times=(self.hold/self.dt).astype(np.int64),
dt=self.dt,
T=self.T,
noise_scale=self.noise,
decimation=decimation,
repeats=repeats
)
#remove any extra dimensions and return
self.init=np.squeeze(self.init)
x=np.squeeze(x)
if not save:
return x
else:
self.x=x
def query(self,
challenges=1,
repeats=1,
instances=1,
scale=0.1,
variables=('a','tau','hold'),
noise=None,
T=None,
dt=None,
steady=False,
override=False,
decimation=1,
hold=None):
'''
Integrate the initial conditions 'challenges' 'repeats' number of times.
Do this for each 'instances', which correspond to perturbed 'variables'
mimicing the effects of process variation on e.g node time constants.
`challenges` can be an array of inits, or an integer, which specifies
the number of random inits to choose.
'''
#default decimation to inverse timestep if not given
if decimation is None:
decimation=int(1/self.dt)
#if int, choose random inits
if isinstance(challenges,int):
inits=np.array([self.random_init() for c in range(challenges)])
challenges=len(inits)
else: #use given inits
inits=challenges
challenges=len(inits)
#integration results
responses=np.zeros((instances,challenges,repeats,int(self.T/decimation),len(self)))
#vary properties of each instance and integrate
for i in range(instances):
self.vary(*variables,scale=scale)
responses[i]=self.integrate(init=inits,hold=hold,noise=noise,dt=dt,T=T,steady=steady,
override=override,repeats=repeats,decimation=decimation)
return responses
def view(self,view='in'):
'''
Change default edge view
'''
if view=='in':
self.edge_view=nx.classes.reportviews.InMultiEdgeView
elif view=='out':
self.edge_view=nx.classes.reportviews.OutMultiEdgeView
@nx.MultiDiGraph.edges.getter
def edges(self):
return self.edge_view(self)
def sort(self,**kwargs):
'''
See `sort`
'''
sort_graph(self,**kwargs)
def relabel(self,mapping=None):
'''
See `relabel`
'''
relabel_graph(self,mapping)
def node_attrs(self):
'''
See `node_attrs`
'''
return node_attrs(self)
def edge_attrs(self):
'''
See `edge_attrs`
'''
return edge_attrs(self)
def give_nodes(self,data=None,nodes=None,**kwargs):
'''
See `give_nodes`
'''
give_nodes(self,data=data,nodes=nodes,**kwargs)
def give_edges(self,data=None,edges=None,**kwargs):
'''
See `give_edges`
'''
give_edges(self,data=data,edges=edges,**kwargs)
def give_self(self,data=None,**kwargs):
'''
Give any arg dict and kwargs to self as attrs
'''
try:
sidis.give(self,**parse_kwargs(**kwargs))
except:
pass
if data:
for k,v in data.items():
try:
self.__dict__.update(parse_kwargs(**v))
except KeyError:
pass
def clear_nodes(self,*args):
'''
Remove arg entries from node dict
'''
if not args:
args=self.node_attrs()
[[sidis.give(self.node[n],arg,None) for n in self.nodes] for arg in args]
def clear_edges(self,*args):
'''
Remove arg entries from edge dict
'''
if not args:
args=self.edge_attrs()
[[sidis.give(self.edges[e],arg,None) for e in self.edges] for arg in args]
def node_data(self,*args,save=False):
'''
Set node attributes as self attribute array.
'''
if not save:
return node_data(self,*args)
else:
self.give_self(**node_data(self,*args))
def edge_data(self,*args,save=False):
'''
Set edge attributes as self attribute array.
'''
if not save:
return edge_data(self,*args)
else:
self.give_self(**edge_data(self,*args))
def convert_edges(self,edge_replacements=None):
'''
see `convert_edges`; this function pipelines those
arguments for BooleanNetwork attrs
and sets defaults to multiplexing behavior
'''
if edge_replacements is not None:
label=edge_replacements.get('label')
edges=edge_replacements.get('edges')
lengths=edge_replacements.get('lengths')
a=edge_replacements.get('a')
tau=edge_replacements.get('tau')
f=edge_replacements.get('f')
delay=edge_replacements.get('delay')
if label is None:
label = lambda g,node,iterable : len(g)+iterable
if lengths is None:
lengths=1
if delay is None:
delay = 0
if a is None:
a=np.inf
if tau is None:
tau=1
if f is None:
f=MPX
convert_edges(self,edges=edges,lengths=lengths,delay=delay,label=label,
node_data=dict(a=a,f=f,tau=tau))
def where(self,*args,**kwargs):
'''
See `where`
'''
return where(self,*args,**kwargs)
def edgewhere(self,*args):
'''
`where` but args assume edges
'''
return np.array(self.edges)[self.where(*args)]
def nodewhere(self,*args):
'''
`where` but args assume nodes
'''
return np.array(self.nodes)[self.where(*args)]
def __str__(self):
'''
Yield string of the graph topology
'''
return print_graph(self,string=True)
def __repr__(self):
'''
Truncate self string as representation
'''
s=str(self)
S=super().__repr__()
spl=s.split('\n')
if len(spl)>10:
spl=spl[:5]+['...']+spl[-5:]
return S+'\n'+('\n').join(spl)
def plot(self,x=None):
'''
Make separate plots of the node states `x` for each in-degree grouping.
'''
if x is None:
try:
x=self.x
except:
self.integrate(save=True)
x=self.x
for f,nodes in self.funcs.items():
for i in nodes:
plt.plot(np.arange(x.shape[0])*self.dt,x[:,i])
title=f'{f.__name__} Nodes: {nodes[0]} to {nodes[-1]}'
plt.title(title)
plt.xlabel(r'Time $(\tau)$')
plt.ylabel('Amplitude')
plt.show()
def plot_3d(self,
x=None,
Tstart=0,
Tstop=None,
nodes=None):
'''
Plot 3-D graph of three nodes over given timespan.
Default to first 3 nodes and full timerange.
'''
if x is None:
try:
x=self.x
except:
self.integrate(save=True)
x=self.x
if nodes is None:
nodes=list(self.nodes)[:3]
if Tstop is None:
Tstop=self.T
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[Tstart:Tstop,nodes[0]],
x[Tstart:Tstop,nodes[1]],
x[Tstart:Tstop,nodes[2]],
c=np.arange(Tstop-Tstart), cmap='YlGn')
def plot_graph(self,**kwargs):
'''
see `plot_graph`
'''
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
plot_graph(self,**kwargs)
def plot_timeseries(self,nodes=None,xs=None,labels=None,colors=None,**kwargs):
if nodes is None:
nodes=self.nodes
nodes=list(nodes)
if xs is None:
try:
xs=np.squeeze(self.x)
except:
self.integrate(save=True)
xs=np.squeeze(self.x)
xs=xs[:,nodes]
if labels is None:
labels=np.array(differential_labels(self))[nodes]
if colors is None:
colors=[logic_colors[self.f[n]] for n in nodes]
plot_timeseries(xs=xs.T,labels=labels,colors=colors,**kwargs)
# Cell
mpx_edges = dict(delay=0,a=np.inf,tau=0.5,f=MPX)
delay_edges = dict(f=COPY,lengths='delay',a=np.inf,tau=1,delay=0)
def ring_bn(N=3,
left=True,
right=False,
loop=False,
a=np.inf,
tau=1,
delay=0,
f=NOT,
edge_replacements=None,
dt=0.01,
T=15,
noise=0,
init=None,
hold=None,
steady=False,
plot=False,
view='out'):
'''
Factory function for producing a `ring` `BooleanNetwork`.
'''
g=ring(N=N,left=left,right=right,loop=loop)
return BooleanNetwork(g=g,a=a,tau=tau,f=f,delay=delay,edge_replacements=edge_replacements,
T=T,dt=dt,noise=noise,init=init,hold=hold,steady=steady,plot=plot,view=view)
def random_bn(N=256,
k=3,
a=np.inf,
tau=1,
delay=0,
f=NOT,
edge_replacements=None,
dt=0.01,
T=25,
noise=0,
init=None,
hold=None,
steady=False,
plot=False,
view='out',
seed=None):
'''
Factory function for producing a `nx.random_regular_graph` `BooleanNetwork`.
'''
g=nx.random_regular_graph(k,N,seed=seed)
return BooleanNetwork(g=g,a=a,tau=tau,f=f,delay=delay,edge_replacements=edge_replacements,
T=T,dt=dt,noise=noise,init=init,hold=hold,steady=steady,plot=plot,view=view)
# Cell
def plot_comparison(x0,x,y0,y,i=0,t0=0,t1=-1,alpha=0.3,
loc='upper center',
bbox_to_anchor=(0.475, 1.2),
ncol=2,
set_size='x-small',
shrink=1,
fname=None,
figsize=(3.5,3)
):
#plt.rcParams['figure.figsize'] = [X,Y]
z = lambda v,t0=t0,t1=t1,i=i: np.clip(v[t0:t1,i],0,1)
base=z(x0)
comparisons=[z(x),z(y0),z(y)]
colors=['black','red','blue','purple']
labels=['noise','heterogeneity','noise $&$ heterogeneity']
I=len(comparisons)
fig,ax=plt.subplots(I,sharex=True, sharey=True,figsize=figsize)
t=np.around((np.arange(x0.shape[0]-1)*0.01),2)
plots=[]
for i in range(I):
p,=ax[i].plot(t,comparisons[i],color=colors[i+1],ls='--',label=labels[i])
plots+=[p]
p_base,=ax[i].plot(t,base,color=colors[0],label=r'original node')
#ax[i].legend(loc='upper right',bbox_to_anchor=(1.45, 1))
X=np.abs(np.rint(base)-np.rint(comparisons[i]))
p_bool,=ax[i].plot(t,X,color='green',alpha=alpha,label=r'bit difference')
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].spines['bottom'].set_visible(False if i!=I-1 else True)
ax[i].xaxis.set_visible(False if i!=I-1 else True)
box = ax[i].get_position()
ax[i].set_position([box.x0, box.y0, box.width * shrink, box.height])
plt.xlabel(r'Time ($\tau$)')
ax[1].set_ylabel(r'Amplitude')
plots+=[p_base,p_bool]
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size(set_size)
lgd=fig.legend(handles=plots,loc=loc, bbox_to_anchor=bbox_to_anchor,
ncol=ncol, fancybox=True, shadow=True,prop=fontP)
if fname:
plt.savefig(fname=fname,bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.tight_layout()
plt.show()
| 36,576 | 33.935053 | 117 |
py
|
reco-gym
|
reco-gym-master/setup.py
|
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="recogym",
version="0.1.2.4",
description="Open-AI gym reinforcement learning environment for recommendation",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/criteo-research/reco-gym",
author="Criteo AI Lab",
license="Apache License",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
packages=find_packages(exclude=("tests",)),
include_package_data=True,
install_requires=[
"absl-py",
"pandas",
"matplotlib",
"simplegeneric",
"gym",
"torch",
"tensorflow",
"numba",
"tqdm",
"datetime",
"jupyter",
"scipy",
"numpy",
"scikit-learn",
],
)
| 1,140 | 24.355556 | 84 |
py
|
reco-gym
|
reco-gym-master/quality_test.py
|
import subprocess
import tempfile
import nbformat
import unittest
class TestNotebookConsistency(unittest.TestCase):
@staticmethod
def _execute_notebook(path):
"""
Execute a Jupyter Notebook from scratch and convert it into another Jupyter Notebook.
:returns a converted Jupyter Notebook
"""
with tempfile.NamedTemporaryFile(suffix = ".ipynb") as tmp_notebook:
args = [
"jupyter", "nbconvert",
"--to", "notebook",
"--execute",
"--ExecutePreprocessor.timeout=3600",
"--output", tmp_notebook.name,
path
]
subprocess.check_call(args)
tmp_notebook.seek(0)
return nbformat.read(tmp_notebook, nbformat.current_nbformat)
@staticmethod
def _analise_notebook(notebook):
"""
Analise notebook cell outputs.
The function goes through all cell outputs and finds either error or warning.
:returns a tuple of errors (0th) and warnings (1st)
"""
errors = []
warnings = []
for cell in notebook.cells:
if 'outputs' in cell:
for output in cell['outputs']:
if output.output_type == "error":
errors.append(output)
if output.output_type == "warning":
warnings.append(output)
return errors, warnings
def test_notebooks(self, with_replacement = False):
"""
Launch Jupyter Notebooks Tests
This function automates the process of validating of Jupyter Notebooks
via launching them from scratch and checking that throughout the launching session
no error/warning occurs.
Note #1: it is assumed that the current directory
is the same where a test file is located.
Note #2: the name of the Notebook should be defined without the extension `*.ipynb'.
with_replacement: when the flag is set `True' and a Jupyter Notebook
has successfully passed tests, the Notebook will be replaced
with a newly generated Notebook with all rendered data, graphs, etc..
"""
for case in {
'Getting Started',
'Compare Agents',
'Likelihood Agents',
'Inverse Propensity Score',
'Explore Exploit Evolution',
'Complex Time Behaviour',
'Pure Organic vs Bandit - Number of Online Users',
'Organic vs Likelihood',
'IPS vs Non-IPS',
'Epsilon Worse',
}:
with self.subTest(i = case):
try:
notebook = self._execute_notebook(case)
except Exception:
self.fail(f"Case has not passed: {case}")
errors, warnings = self._analise_notebook(notebook)
self.assertEqual(errors, [], f"Case '{case}': NOK -- Errors: {errors}")
self.assertEqual(warnings, [], f"Case '{case}': NOK -- Warnings: {warnings}")
if with_replacement and len(errors) == 0 and len(warnings) == 0:
with open(f"{case}.new.ipynb", mode = 'w') as file:
file.seek(0)
file.write(f"{notebook}")
print(f"Case '{case}': OK")
if __name__ == '__main__':
unittest.main()
| 3,437 | 34.8125 | 93 |
py
|
reco-gym
|
reco-gym-master/sim_test.py
|
import argparse
import datetime
import glob
import os
import types
import pandas as pd
from recogym import (
competition_score,
AgentInit,
)
if __name__ == "__main__":
import tensorflow as tf2
print(f'TensorFlow V2: {tf2.__version__}')
import tensorflow.compat.v1 as tf1
print(f'TensorFlow V2: {tf1.__version__}')
parser = argparse.ArgumentParser()
parser.add_argument('--P', type=int, default=100, help='Number of products')
parser.add_argument('--UO', type=int, default=100, help='Number of organic users to train on')
parser.add_argument('--U', type=int, default=100, help='Number of users to train on')
parser.add_argument('--Utest', type=int, default=1000, help='Number of users to test')
parser.add_argument('--seed', type=int, default=100, help='Seed')
parser.add_argument('--K', type=int, default=20, help='Number of latent factors')
parser.add_argument('--F', type=int, default=20,
help='Number of flips, how different is bandit from organic')
parser.add_argument('--log_epsilon', type=float, default=0.05,
help='Pop logging policy epsilon')
parser.add_argument('--sigma_omega', type=float, default=0.01, help='sigma_omega')
parser.add_argument('--entries_dir', type=str, default='my_entries',
help='directory with agent files for a leaderboard of small baselines for P small try setting to leaderboard_entries')
parser.add_argument('--with_cache', type=bool, default=False,
help='Do use cache for training data or not')
args = parser.parse_args()
P, UO, U, Utest, seed, num_flips, K, sigma_omega, log_epsilon, entries_dir, with_cache = (
args.P,
args.UO,
args.U,
args.Utest,
args.seed,
args.F,
args.K,
args.sigma_omega,
args.log_epsilon,
args.entries_dir,
args.with_cache,
)
print(args)
adf = []
start = datetime.datetime.now()
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
for agent_file in glob.glob(entries_dir + '/*.py'):
print(f'Agent: {agent_file}')
try:
tmp_module = types.ModuleType('tmp_module')
exec(
open(agent_file).read(),
tmp_module.__dict__
)
if hasattr(tmp_module, 'TestAgent'):
agent_class = tmp_module.TestAgent
agent_configs = tmp_module.test_agent_args
agent_name = 'Test Agent'
else:
if hasattr(tmp_module, 'agent'):
for agent_key in tmp_module.agent.keys():
agent_class = tmp_module.agent[agent_key][AgentInit.CTOR]
agent_configs = tmp_module.agent[agent_key][AgentInit.DEF_ARGS]
agent_name = agent_key
else:
print('There is no Agent to test!')
continue
df = competition_score(
P,
UO,
U,
Utest,
seed,
K,
num_flips,
log_epsilon,
sigma_omega,
agent_class,
agent_configs,
agent_name,
with_cache
)
df = df.join(pd.DataFrame({
'entry': [agent_file]
}))
print(df)
adf.append(df)
except Exception as ex:
print(f'Agent @ "{agent_file}" failed: {str(ex)}')
out_dir = entries_dir + '_' + str(P) + '_' + str(U) + '_' + str(Utest) + '_' + str(start)
os.mkdir(out_dir)
fp = open(out_dir + '/config.txt', 'w')
fp.write(str(args))
fp.close()
leaderboard = pd.concat(adf)
leaderboard = leaderboard.sort_values(by='q0.500', ascending=False)
leaderboard.to_csv(out_dir + '/leaderboard.csv')
| 3,974 | 33.267241 | 142 |
py
|
reco-gym
|
reco-gym-master/deterministic_test.py
|
from copy import deepcopy
import gym
import numpy as np
from recogym import Configuration
from recogym import env_0_args, env_1_args, test_agent
from recogym.agents import BanditCount, bandit_count_args
from recogym.agents import BanditMFSquare, bandit_mf_square_args
from recogym.agents import LogregMulticlassIpsAgent, logreg_multiclass_ips_args
from recogym.agents import LogregPolyAgent, logreg_poly_args
from recogym.agents import NnIpsAgent, nn_ips_args
from recogym.agents import OrganicCount, organic_count_args
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
from recogym.agents import RandomAgent, random_args
# Add a new environment here.
env_test = {
"reco-gym-v1": env_1_args,
"reco-gym-v0": env_0_args,
}
RandomSeed = 42
# Add a new agent here.
agent_test = {
'prod2vec': BanditMFSquare(Configuration(bandit_mf_square_args)),
'logistic': BanditCount(Configuration(bandit_count_args)),
'randomagent': RandomAgent(Configuration({
**random_args,
'random_seed': RandomSeed,
})),
'logreg_multiclass_ips': LogregMulticlassIpsAgent(Configuration({
**logreg_multiclass_ips_args,
'select_randomly': False,
})),
'logreg_multiclass_ips R': LogregMulticlassIpsAgent(Configuration({
**logreg_multiclass_ips_args,
'select_randomly': True,
'random_seed': RandomSeed,
})),
'organic_counter': OrganicCount(Configuration(organic_count_args)),
'organic_user_counter': OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
'select_randomly': False,
})),
'organic_user_counter R': OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
'select_randomly': True,
'random_seed': RandomSeed,
})),
'logreg_poly': LogregPolyAgent(Configuration({
**logreg_poly_args,
'with_ips': False,
})),
'logreg_poly_ips': LogregPolyAgent(Configuration({
**logreg_poly_args,
'with_ips': True,
})),
}
eval_size = 5
organic_size = 5
samples = 200 # Set a big value to train model based on classifications.
def is_env_deterministic(env, users=5):
c_env = deepcopy(env)
logs_a = c_env.generate_logs(num_offline_users=users, num_organic_offline_users=users)
c_env = deepcopy(env)
logs_b = c_env.generate_logs(num_offline_users=users, num_organic_offline_users=users)
return np.mean(logs_a[~ np.isnan(logs_b.v)].v == logs_b[~ np.isnan(logs_b.v)].v) == 1.
# return logs_a.equals(logs_b) # this can return false.. it isn't clear why atm ... most likely types differ..
if __name__ == "__main__":
for env_name in env_test.keys():
env = gym.make(env_name)
env.init_gym(env_test[env_name])
if is_env_deterministic(env):
print(f"{env_name} appears deterministic")
else:
print(f"{env_name} is NOT deterministic")
for agent_name in agent_test.keys():
agent = agent_test[agent_name]
a = test_agent(deepcopy(env), deepcopy(agent), samples, eval_size, organic_size)
print(f"{agent_name} runs")
b = test_agent(deepcopy(env), deepcopy(agent), samples, eval_size, organic_size)
if a == b:
print(f"{agent_name} appears deterministic")
else:
print(f"{agent_name} is NOT deterministic")
| 3,417 | 34.978947 | 114 |
py
|
reco-gym
|
reco-gym-master/my_entries/test_agent.py
|
import numpy as np
from recogym import Configuration, build_agent_init, to_categorical
from recogym.agents import Agent
test_agent_args = {
'num_products': 10,
'with_ps_all': False,
}
class TestAgent(Agent):
"""Organic counter agent"""
def __init__(self, config = Configuration(test_agent_args)):
super(TestAgent, self).__init__(config)
self.co_counts = np.zeros((self.config.num_products, self.config.num_products))
self.corr = None
def act(self, observation, reward, done):
"""Make a recommendation"""
self.update_lpv(observation)
action = self.co_counts[self.last_product_viewed, :].argmax()
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, reward, done),
**{
'a': self.co_counts[self.last_product_viewed, :].argmax(),
'ps': 1.0,
'ps-a': ps_all,
},
}
def train(self, observation, action, reward, done = False):
"""Train the model in an online fashion"""
if observation.sessions():
A = to_categorical(
[session['v'] for session in observation.sessions()],
self.config.num_products
)
B = A.sum(0).reshape((self.config.num_products, 1))
self.co_counts = self.co_counts + np.matmul(B, B.T)
def update_lpv(self, observation):
"""updates the last product viewed based on the observation"""
if observation.sessions():
self.last_product_viewed = observation.sessions()[-1]['v']
| 1,736 | 29.473684 | 87 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/likelihood.py
|
from recogym import build_agent_init
from recogym.agents import LogregPolyAgent, logreg_poly_args
agent = build_agent_init('Likelihood', LogregPolyAgent, {**logreg_poly_args, })
| 179 | 35 | 79 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/likelihood_with_time.py
|
import numpy as np
from recogym import build_agent_init
from recogym.agents import LogregPolyAgent, logreg_poly_args
agent = build_agent_init(
'LikelihoodWithTime',
LogregPolyAgent,
{
**logreg_poly_args,
'weight_history_function': lambda t: np.exp(-t)
}
)
| 291 | 18.466667 | 60 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/dump_agent.py
|
import pandas as pd
import numpy as np
from recogym import Configuration, build_agent_init
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
dump_agent_args = {
'agent': build_agent_init(
'OrganicUserCount',
OrganicUserEventCounterAgent,
{**organic_user_count_args}
)
}
class DumpAgent(OrganicUserEventCounterAgent):
"""
Dump Agent
This is the Agent that dumps all its `train' and `act' functions.
It used mostly for debugging purposes.
"""
def __init__(self, config=Configuration(dump_agent_args)):
super(DumpAgent, self).__init__(config)
self.previous_action = None
self.data = {
'case': [],
't': [],
'u': [],
'z': [],
'v': [],
'a': [],
'c': [],
'ps': [],
'ps-a': [],
'done': [],
}
def _dump(self, case, observation, action, reward, done):
def _dump_organic():
for session in observation.sessions():
self.data['case'].append(case)
self.data['t'].append(session['t'])
self.data['u'].append(session['u'])
self.data['z'].append('organic')
self.data['v'].append(session['v'])
self.data['a'].append(None)
self.data['c'].append(None)
self.data['ps'].append(None)
self.data['ps-a'].append(None)
self.data['done'].append(done)
def _dump_bandit():
if action:
self.data['case'].append(case)
self.data['t'].append(action['t'])
self.data['u'].append(action['u'])
self.data['z'].append('bandit')
self.data['v'].append(None)
self.data['a'].append(action['a'])
self.data['c'].append(reward)
self.data['ps'].append(action['ps'])
self.data['ps-a'].append(action['ps-a'])
self.data['done'].append(done)
if case == 'A':
_dump_bandit()
_dump_organic()
else:
_dump_organic()
_dump_bandit()
def train(self, observation, action, reward, done=False):
self._dump('T', observation, action, reward, done)
self.config.agent.train(observation, action, reward, done)
def act(self, observation, reward, done):
"""Make a recommendation"""
self._dump('A', observation, self.previous_action, reward, done)
if done:
return None
else:
action = self.config.agent.act(observation, reward, done)
self.previous_action = action
return action
def reset(self):
super().reset()
self.config.agent.reset()
self.previous_action = None
def dump(self):
self.data['t'] = np.array(self.data['t'], dtype=np.float32)
self.data['u'] = pd.array(self.data['u'], dtype=pd.UInt16Dtype())
self.data['v'] = pd.array(self.data['v'], dtype=pd.UInt16Dtype())
self.data['a'] = pd.array(self.data['a'], dtype=pd.UInt16Dtype())
self.data['c'] = np.array(self.data['c'], dtype=np.float32)
return pd.DataFrame().from_dict(self.data)
agent = build_agent_init('DumpAgent', DumpAgent, {**dump_agent_args})
| 3,404 | 31.740385 | 80 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/reweighted_likelihood.py
|
from recogym import build_agent_init
from recogym.agents import LogregPolyAgent, logreg_poly_args
agent = build_agent_init('Re-weighted', LogregPolyAgent, {**logreg_poly_args, 'with_ips': True, })
| 198 | 38.8 | 98 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/bandit_count.py
|
from recogym import build_agent_init
from recogym.agents import BanditCount, bandit_count_args
agent = build_agent_init('BanditCount', BanditCount, {**bandit_count_args})
| 172 | 33.6 | 75 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/pytorch_CB_log.py
|
from recogym import build_agent_init
from recogym.agents import PyTorchMLRAgent, pytorch_mlr_args
pytorch_mlr_args['n_epochs'] = 30
pytorch_mlr_args['learning_rate'] = 0.01
pytorch_mlr_args['logIPS'] = True
agent = build_agent_init('PyTorchMLRAgent', PyTorchMLRAgent, {**pytorch_mlr_args})
| 292 | 31.555556 | 82 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/context_bandit.py
|
from recogym import build_agent_init
from recogym.agents import LogregMulticlassIpsAgent, logreg_multiclass_ips_args
agent = build_agent_init('Contextual Bandit', LogregMulticlassIpsAgent,
{**logreg_multiclass_ips_args, })
| 249 | 40.666667 | 79 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/organic_user_count.py
|
from recogym import build_agent_init
from recogym.agents.organic_user_count import organic_user_count_args, OrganicUserEventCounterAgent
agent = build_agent_init('OrganicUserCount', OrganicUserEventCounterAgent, {**organic_user_count_args})
| 242 | 47.6 | 103 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/organic_count.py
|
from recogym import build_agent_init
from recogym.agents import OrganicCount, organic_count_args
agent = build_agent_init('OrganicCount', OrganicCount, {**organic_count_args})
| 177 | 34.6 | 78 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/pytorch_CB.py
|
from recogym import build_agent_init
from recogym.agents import PyTorchMLRAgent, pytorch_mlr_args
pytorch_mlr_args['n_epochs'] = 30
pytorch_mlr_args['learning_rate'] = 0.01
agent = build_agent_init('PyTorchMLRAgent', PyTorchMLRAgent, {**pytorch_mlr_args})
| 257 | 35.857143 | 82 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/rand_agent.py
|
from recogym import build_agent_init
from recogym.agents import RandomAgent, random_args
agent = build_agent_init('RandomAgent', RandomAgent, {**random_args})
| 160 | 31.2 | 69 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/pytorch_likelihood.py
|
from recogym import build_agent_init
from recogym.agents import PyTorchMLRAgent, pytorch_mlr_args
pytorch_mlr_args['n_epochs'] = 30
pytorch_mlr_args['learning_rate'] = 0.01,
pytorch_mlr_args['ll_IPS'] = False,
pytorch_mlr_args['alpha'] = 1.0
agent = build_agent_init('PyTorchMLRAgent', PyTorchMLRAgent, {**pytorch_mlr_args})
| 326 | 35.333333 | 82 |
py
|
reco-gym
|
reco-gym-master/leaderboard_entries/bandit_mf_square.py
|
from recogym import build_agent_init
from recogym.agents import BanditMFSquare, bandit_mf_square_args
agent = build_agent_init('BanditMFsquare', BanditMFSquare, {**bandit_mf_square_args})
| 189 | 37 | 85 |
py
|
reco-gym
|
reco-gym-master/recogym/competition.py
|
import datetime
import gym
import pandas as pd
from recogym import (
Configuration,
env_1_args,
gather_agent_stats,
build_agent_init,
AgentStats
)
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
def competition_score(
num_products: int,
num_organic_users_to_train: int,
num_users_to_train: int,
num_users_to_score: int,
random_seed: int,
latent_factor: int,
num_flips: int,
log_epsilon: float,
sigma_omega: float,
agent_class,
agent_configs,
agent_name: str,
with_cache: bool,
):
training_data_samples = tuple([num_users_to_train])
testing_data_samples = num_users_to_score
stat_epochs = 1
stat_epochs_new_random_seed = True
std_env_args = {
**env_1_args,
'random_seed': random_seed,
'num_products': num_products,
'K': latent_factor,
'sigma_omega': sigma_omega,
'number_of_flips': num_flips
}
env = gym.make('reco-gym-v1')
agent_stats = gather_agent_stats(
env,
std_env_args,
{
'agent': OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
**std_env_args,
'select_randomly': True,
'epsilon': log_epsilon,
'num_products': num_products,
})),
},
{
**build_agent_init(
agent_name,
agent_class,
{
**agent_configs,
'num_products': num_products,
}
),
},
training_data_samples,
testing_data_samples,
stat_epochs,
stat_epochs_new_random_seed,
num_organic_users_to_train,
with_cache
)
time_start = datetime.datetime.now()
q0_025 = []
q0_500 = []
q0_975 = []
for agent_name in agent_stats[AgentStats.AGENTS]:
agent_values = agent_stats[AgentStats.AGENTS][agent_name]
q0_025.append(agent_values[AgentStats.Q0_025][0])
q0_500.append(agent_values[AgentStats.Q0_500][0])
q0_975.append(agent_values[AgentStats.Q0_975][0])
time_end = datetime.datetime.now()
seconds = (time_end - time_start).total_seconds()
return pd.DataFrame(
{
'q0.025': q0_025,
'q0.500': q0_500,
'q0.975': q0_975,
'time': [seconds],
}
)
| 2,469 | 24.204082 | 80 |
py
|
reco-gym
|
reco-gym-master/recogym/constants.py
|
from enum import Enum
class AgentStats(Enum):
"""
Agent Statistics
"""
# Confidence Interval.
Q0_025 = 0
Q0_500 = 1
Q0_975 = 2
# Number of Samples (Users) in a Training Data.
SAMPLES = 3
AGENTS = 4
SUCCESSES = 5 # Amount of Clicks.
FAILURES = 6 # Amount of non CLicks.
class AgentInit(Enum):
"""
Abstract data for Agent Initialisation.
"""
CTOR = 0 # Agent Constructor.
DEF_ARGS = 1 # Default Agent Arguments.
class TrainingApproach(Enum):
"""
Training Approach of Evolution of Environment (Explore/Exploit approach).
"""
ALL_DATA = 0 # All training data should be used (accumulated).
SLIDING_WINDOW_ALL_DATA = 1 # Fixed amount of training data (sliding window).
ALL_EXPLORATION_DATA = 2 # All training data obtained during Exploration (accumulated).
SLIDING_WINDOW_EXPLORATION_DATA = 3 # Fixed amount of training data obtained during Exploration.
MOST_VALUABLE = 4 # The most valuable training data.
LAST_STEP = 5 # All data BUT obtained only during the last step (both Explore and Exploit).
class EvolutionCase(Enum):
"""
Evolution Stats Data.
"""
SUCCESS = 0
FAILURE = 1
ACTIONS = 2
SUCCESS_GREEDY = 3
FAILURE_GREEDY = 4
class RoiMetrics(Enum):
"""
Return of Investment Data.
"""
ROI_MEAN = 0
ROI_0_025 = 1
ROI_0_975 = 2
ROI_SUCCESS = 3
ROI_FAILURE = 4
| 1,447 | 22.354839 | 101 |
py
|
reco-gym
|
reco-gym-master/recogym/evaluate_agent.py
|
import multiprocessing
import time
from copy import deepcopy
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import beta
import recogym
from recogym import (
AgentInit,
AgentStats,
Configuration,
EvolutionCase,
RoiMetrics,
TrainingApproach
)
from recogym.agents import EpsilonGreedy, epsilon_greedy_args
from .envs.context import DefaultContext
from .envs.observation import Observation
from .envs.session import OrganicSessions
EpsilonDelta = .02
EpsilonSteps = 6 # Including epsilon = 0.0.
EpsilonPrecision = 2
EvolutionEpsilons = (0.00, 0.01, 0.02, 0.03, 0.05, 0.08)
GraphCTRMin = 0.009
GraphCTRMax = 0.021
# from Keras
def to_categorical(y, num_classes=None, dtype='float32'):
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def evaluate_agent(
env,
agent,
num_initial_train_users=100,
num_step_users=1000,
num_steps=10,
training_approach=TrainingApproach.ALL_DATA,
sliding_window_samples=10000):
initial_agent = deepcopy(agent)
unique_user_id = 0
for u in range(num_initial_train_users):
env.reset(unique_user_id + u)
agent.reset()
new_observation, reward, done, _ = env.step(None)
while True:
old_observation = new_observation
action, new_observation, reward, done, _ = env.step_offline(
new_observation, reward, False
)
agent.train(old_observation, action, reward, done)
if done:
break
unique_user_id += num_initial_train_users
rewards = {
EvolutionCase.SUCCESS: [],
EvolutionCase.SUCCESS_GREEDY: [],
EvolutionCase.FAILURE: [],
EvolutionCase.FAILURE_GREEDY: [],
EvolutionCase.ACTIONS: dict()
}
training_agent = deepcopy(agent)
samples = 0
for action_id in range(env.config.num_products):
rewards[EvolutionCase.ACTIONS][action_id] = [0]
for step in range(num_steps):
successes = 0
successes_greedy = 0
failures = 0
failures_greedy = 0
for u in range(num_step_users):
env.reset(unique_user_id + u)
agent.reset()
new_observation, reward, done, _ = env.step(None)
while not done:
old_observation = new_observation
action = agent.act(old_observation, reward, done)
new_observation, reward, done, info = env.step(action['a'])
samples += 1
should_update_training_data = False
if training_approach == TrainingApproach.ALL_DATA or training_approach == TrainingApproach.LAST_STEP:
should_update_training_data = True
elif training_approach == TrainingApproach.SLIDING_WINDOW_ALL_DATA:
should_update_training_data = samples % sliding_window_samples == 0
elif training_approach == TrainingApproach.ALL_EXPLORATION_DATA:
should_update_training_data = not action['greedy']
elif training_approach == TrainingApproach.SLIDING_WINDOW_EXPLORATION_DATA:
should_update_training_data = (not action[
'greedy']) and samples % sliding_window_samples == 0
else:
assert False, f"Unknown Training Approach: {training_approach}"
if should_update_training_data:
training_agent.train(old_observation, action, reward, done)
if reward:
successes += 1
if 'greedy' in action and action['greedy']:
successes_greedy += 1
rewards[EvolutionCase.ACTIONS][action['a']][-1] += 1
else:
if 'greedy' in action and action['greedy']:
failures_greedy += 1
failures += 1
unique_user_id += num_step_users
agent = training_agent
for action_id in range(env.config.num_products):
rewards[EvolutionCase.ACTIONS][action_id].append(0)
if training_approach == TrainingApproach.LAST_STEP:
training_agent = deepcopy(initial_agent)
else:
training_agent = deepcopy(agent)
rewards[EvolutionCase.SUCCESS].append(successes)
rewards[EvolutionCase.SUCCESS_GREEDY].append(successes_greedy)
rewards[EvolutionCase.FAILURE].append(failures)
rewards[EvolutionCase.FAILURE_GREEDY].append(failures_greedy)
return rewards
def build_agent_init(agent_key, ctor, def_args):
return {
agent_key: {
AgentInit.CTOR: ctor,
AgentInit.DEF_ARGS: def_args,
}
}
def _collect_stats(args):
"""
Function that is executed in a separate process.
:param args: arguments of the process to be executed.
:return: a vector of CTR for these confidence values:
0th: Q0.500
1st: Q0.025
snd: Q0.975
"""
start = time.time()
print(f"START: Num of Users: {args['num_offline_users']}")
stats = recogym.test_agent(
deepcopy(args['env']),
deepcopy(args['agent']),
args['num_offline_users'],
args['num_online_users'],
args['num_organic_offline_users'],
args['num_epochs'],
args['epoch_with_random_reset'],
args['with_cache'],
)
print(f"END: Num of Offline Users: {args['num_offline_users']} ({time.time() - start}s)")
return stats
def gather_agent_stats(
env,
env_args,
extra_env_args,
agents_init_data,
user_samples=(100, 1000, 2000, 3000, 5000, 8000, 10000, 13000, 14000, 15000),
num_online_users: int = 15000,
num_epochs: int = 1,
epoch_with_random_reset: bool = False,
num_organic_offline_users: int = 100,
with_cache: bool = False
):
"""
The function that gathers Agents statistics via evaluating Agent performance
under different Environment conditions.
:param env: the Environment where some changes should be introduced and where Agent stats should
be gathered.
:param env_args: Environment arguments (default ones).
:param extra_env_args: extra Environment conditions those alter default values.
:param agents_init_data: Agent initialisation data.
This is a dictionary that has the following structure:
{
'<Agent Name>': {
AgentInit.CTOR: <Constructor>,
AgentInit.DEF_ARG: <Default Arguments>,
}
}
:param user_samples: Number of Offline Users i.e. Users used to train a Model.
:param num_online_users: Number of Online Users i.e. Users used to validate a Model.
:param num_epochs: how many different epochs should be tried to gather stats?
:param epoch_with_random_reset: should be a Random Seed reset at each new epoch?
:param num_organic_offline_users: how many Organic only users should be used for training.
:param with_cache: is the cache used for training data or not?
:return: a dictionary with stats
{
AgentStats.SAMPLES: [<vector of training offline users used to train a model>]
AgentStats.AGENTS: {
'<Agent Name>': {
AgentStats.Q0_025: [],
AgentStats.Q0_500: [],
AgentStats.Q0_975: [],
}
}
}
"""
new_env_args = {
**env_args,
**extra_env_args,
}
new_env = deepcopy(env)
new_env.init_gym(new_env_args)
agents = build_agents(agents_init_data, new_env_args)
agent_stats = {
AgentStats.SAMPLES: user_samples,
AgentStats.AGENTS: dict(),
}
for agent_key in agents:
print(f"Agent: {agent_key}")
stats = {
AgentStats.Q0_025: [],
AgentStats.Q0_500: [],
AgentStats.Q0_975: [],
}
with Pool(processes=multiprocessing.cpu_count()) as pool:
argss = [
{
'env': new_env,
'agent': agents[agent_key],
'num_offline_users': num_offline_users,
'num_online_users': num_online_users,
'num_organic_offline_users': num_organic_offline_users,
'num_epochs': num_epochs,
'epoch_with_random_reset': epoch_with_random_reset,
'with_cache': with_cache
}
for num_offline_users in user_samples
]
for result in (
[_collect_stats(args) for args in argss]
if num_epochs == 1 else
pool.map(_collect_stats, argss)
):
stats[AgentStats.Q0_025].append(result[1])
stats[AgentStats.Q0_500].append(result[0])
stats[AgentStats.Q0_975].append(result[2])
agent_stats[AgentStats.AGENTS][agent_key] = stats
return agent_stats
def build_agents(agents_init_data, new_env_args):
agents = dict()
for agent_key in agents_init_data:
agent_init_data = agents_init_data[agent_key]
ctor = agent_init_data[AgentInit.CTOR]
def_args = agent_init_data[AgentInit.DEF_ARGS]
agents[agent_key] = ctor(
Configuration({
**def_args,
**new_env_args,
})
)
return agents
def generate_epsilons(epsilon_step=EpsilonDelta, iterations=EpsilonSteps):
return [0.00, 0.01, 0.02, 0.03, 0.05, 0.08]
def format_epsilon(epsilon):
return ("{0:." + f"{EpsilonPrecision}" + "f}").format(round(epsilon, EpsilonPrecision))
def _collect_evolution_stats(args):
"""
Function that is executed in a separate process.
:param args: arguments of the process to be executed.
:return: a dictionary of Success/Failures of applying an Agent.
"""
start = time.time()
epsilon = args['epsilon']
epsilon_key = format_epsilon(epsilon)
print(f"START: ε = {epsilon_key}")
num_evolution_steps = args['num_evolution_steps']
rewards = recogym.evaluate_agent(
deepcopy(args['env']),
args['agent'],
args['num_initial_train_users'],
args['num_step_users'],
num_evolution_steps,
args['training_approach']
)
assert (len(rewards[EvolutionCase.SUCCESS]) == len(rewards[EvolutionCase.FAILURE]))
assert (len(rewards[EvolutionCase.SUCCESS]) == num_evolution_steps)
print(f"END: ε = {epsilon_key} ({time.time() - start}s)")
return {
epsilon_key: {
EvolutionCase.SUCCESS: rewards[EvolutionCase.SUCCESS],
EvolutionCase.SUCCESS_GREEDY: rewards[EvolutionCase.SUCCESS_GREEDY],
EvolutionCase.FAILURE: rewards[EvolutionCase.FAILURE],
EvolutionCase.FAILURE_GREEDY: rewards[EvolutionCase.FAILURE_GREEDY],
EvolutionCase.ACTIONS: rewards[EvolutionCase.ACTIONS]
}
}
def gather_exploration_stats(
env,
env_args,
extra_env_args,
agents_init_data,
training_approach,
num_initial_train_users=1000,
num_step_users=1000,
epsilons=EvolutionEpsilons,
num_evolution_steps=6
):
"""
A helper function that collects data regarding Agents evolution
under different values of epsilon for Epsilon-Greedy Selection Policy.
:param env: The Environment where evolution should be applied;
every time when a new step of the evolution is applied, the Environment is deeply copied
thus the Environment does not interferes with evolution steps.
:param env_args: Environment arguments (default ones).
:param extra_env_args: extra Environment conditions those alter default values.
:param agents_init_data: Agent initialisation data.
This is a dictionary that has the following structure:
{
'<Agent Name>': {
AgentInit.CTOR: <Constructor>,
AgentInit.DEF_ARG: <Default Arguments>,
}
}
:param training_approach: A training approach applied in verification;
for mode details look at `TrainingApproach' enum.
:param num_initial_train_users: how many users' data should be used
to train an initial model BEFORE evolution steps.
:param num_step_users: how many users' data should be used
at each evolution step.
:param epsilons: a list of epsilon values.
:param num_evolution_steps: how many evolution steps should be applied
for an Agent with Epsilon-Greedy Selection Policy.
:return a dictionary of Agent evolution statistics in the form:
{
'Agent Name': {
'Epsilon Values': {
EvolutionCase.SUCCESS: [an array of clicks (for each ith step of evolution)]
EvolutionCase.FAILURE: [an array of failure to draw a click (for each ith step of evolution)]
}
}
}
"""
# A dictionary that stores all data of Agent evolution statistics.
# Key is Agent Name, value is statistics.
agent_evolution_stats = dict()
new_env_args = {
**env_args,
**extra_env_args,
}
new_env = deepcopy(env)
new_env.init_gym(new_env_args)
agents = build_agents(agents_init_data, new_env_args)
for agent_key in agents:
print(f"Agent: {agent_key}")
agent_stats = dict()
with Pool(processes=multiprocessing.cpu_count()) as pool:
for result in pool.map(
_collect_evolution_stats,
[
{
'epsilon': epsilon,
'env': new_env,
'agent': EpsilonGreedy(
Configuration({
**epsilon_greedy_args,
**new_env_args,
'epsilon': epsilon,
}),
deepcopy(agents[agent_key])
),
'num_initial_train_users': num_initial_train_users,
'num_step_users': num_step_users,
'num_evolution_steps': num_evolution_steps,
'training_approach': training_approach,
}
for epsilon in epsilons
]
):
agent_stats = {
**agent_stats,
**result,
}
agent_evolution_stats[agent_key] = agent_stats
return agent_evolution_stats
def plot_agent_stats(agent_stats):
_, ax = plt.subplots(
1,
1,
figsize=(16, 8)
)
user_samples = agent_stats[AgentStats.SAMPLES]
for agent_key in agent_stats[AgentStats.AGENTS]:
stats = agent_stats[AgentStats.AGENTS][agent_key]
ax.fill_between(
user_samples,
stats[AgentStats.Q0_975],
stats[AgentStats.Q0_025],
alpha=.05
)
ax.plot(user_samples, stats[AgentStats.Q0_500])
ax.set_xlabel('Samples #')
ax.set_ylabel('CTR')
ax.legend([
"$C^{CTR}_{0.5}$: " + f"{agent_key}" for agent_key in agent_stats[AgentStats.AGENTS]
])
plt.show()
def plot_evolution_stats(
agent_evolution_stats,
max_agents_per_row=2,
epsilons=EvolutionEpsilons,
plot_min=GraphCTRMin,
plot_max=GraphCTRMax
):
figs, axs = plt.subplots(
int(len(agent_evolution_stats) / max_agents_per_row),
max_agents_per_row,
figsize=(16, 10),
squeeze=False
)
labels = [("$\epsilon=$" + format_epsilon(epsilon)) for epsilon in epsilons]
for (ix, agent_key) in enumerate(agent_evolution_stats):
ax = axs[int(ix / max_agents_per_row), int(ix % max_agents_per_row)]
agent_evolution_stat = agent_evolution_stats[agent_key]
ctr_means = []
for epsilon in epsilons:
epsilon_key = format_epsilon(epsilon)
evolution_stat = agent_evolution_stat[epsilon_key]
steps = []
ms = []
q0_025 = []
q0_975 = []
assert (len(evolution_stat[EvolutionCase.SUCCESS]) == len(
evolution_stat[EvolutionCase.FAILURE]))
for step in range(len(evolution_stat[EvolutionCase.SUCCESS])):
steps.append(step)
successes = evolution_stat[EvolutionCase.SUCCESS][step]
failures = evolution_stat[EvolutionCase.FAILURE][step]
ms.append(beta.ppf(0.5, successes + 1, failures + 1))
q0_025.append(beta.ppf(0.025, successes + 1, failures + 1))
q0_975.append(beta.ppf(0.975, successes + 1, failures + 1))
ctr_means.append(np.mean(ms))
ax.fill_between(
range(len(steps)),
q0_975,
q0_025,
alpha=.05
)
ax.plot(steps, ms)
ctr_means_mean = np.mean(ctr_means)
ctr_means_div = np.sqrt(np.var(ctr_means))
ax.set_title(
f"Agent: {agent_key}\n"
+ "$\hat{Q}^{CTR}_{0.5}="
+ "{0:.5f}".format(round(ctr_means_mean, 5))
+ "$, "
+ "$\hat{\sigma}^{CTR}_{0.5}="
+ "{0:.5f}".format(round(ctr_means_div, 5))
+ "$"
)
ax.legend(labels)
ax.set_ylabel('CTR')
ax.set_ylim([plot_min, plot_max])
plt.subplots_adjust(hspace=.5)
plt.show()
def plot_heat_actions(
agent_evolution_stats,
epsilons=EvolutionEpsilons
):
max_epsilons_per_row = len(epsilons)
the_first_agent = next(iter(agent_evolution_stats.values()))
epsilon_steps = len(the_first_agent)
rows = int(len(agent_evolution_stats) * epsilon_steps / max_epsilons_per_row)
figs, axs = plt.subplots(
int(len(agent_evolution_stats) * epsilon_steps / max_epsilons_per_row),
max_epsilons_per_row,
figsize=(16, 4 * rows),
squeeze=False
)
for (ix, agent_key) in enumerate(agent_evolution_stats):
agent_evolution_stat = agent_evolution_stats[agent_key]
for (jx, epsilon_key) in enumerate(agent_evolution_stat):
flat_index = ix * epsilon_steps + jx
ax = axs[int(flat_index / max_epsilons_per_row), int(flat_index % max_epsilons_per_row)]
evolution_stat = agent_evolution_stat[epsilon_key]
action_stats = evolution_stat[EvolutionCase.ACTIONS]
total_actions = len(action_stats)
heat_data = []
for kx in range(total_actions):
heat_data.append(action_stats[kx])
heat_data = np.array(heat_data)
im = ax.imshow(heat_data)
ax.set_yticks(np.arange(total_actions))
ax.set_yticklabels([f"{action_id}" for action_id in range(total_actions)])
ax.set_title(f"Agent: {agent_key}\n$\epsilon=${epsilon_key}")
_ = ax.figure.colorbar(im, ax=ax)
plt.show()
def plot_roi(
agent_evolution_stats,
epsilons=EvolutionEpsilons,
max_agents_per_row=2
):
"""
A helper function that calculates Return of Investment (ROI) for applying Epsilon-Greedy Selection Policy.
:param agent_evolution_stats: statistic about Agent evolution collected in `build_exploration_data'.
:param epsilons: a list of epsilon values.
:param max_agents_per_row: how many graphs should be drawn per a row
:return: a dictionary of Agent ROI after applying Epsilon-Greedy Selection Strategy in the following form:
{
'Agent Name': {
'Epsilon Value': {
Metrics.ROI: [an array of ROIs for each ith step (starting from 1st step)]
}
}
}
"""
figs, axs = plt.subplots(
int(len(agent_evolution_stats) / max_agents_per_row),
max_agents_per_row,
figsize=(16, 8),
squeeze=False
)
labels = [("$\epsilon=$" + format_epsilon(epsilon)) for epsilon in epsilons if epsilon != 0.0]
agent_roi_stats = dict()
for (ix, agent_key) in enumerate(agent_evolution_stats):
ax = axs[int(ix / max_agents_per_row), int(ix % max_agents_per_row)]
agent_stat = agent_evolution_stats[agent_key]
zero_epsilon_key = format_epsilon(0)
zero_epsilon = agent_stat[zero_epsilon_key]
zero_success_evolutions = zero_epsilon[EvolutionCase.SUCCESS]
zero_failure_evolutions = zero_epsilon[EvolutionCase.FAILURE]
assert (len(zero_success_evolutions))
agent_stats = dict()
roi_mean_means = []
for epsilon in generate_epsilons():
if zero_epsilon_key == format_epsilon(epsilon):
continue
epsilon_key = format_epsilon(epsilon)
agent_stats[epsilon_key] = {
RoiMetrics.ROI_0_025: [],
RoiMetrics.ROI_MEAN: [],
RoiMetrics.ROI_0_975: [],
}
epsilon_evolutions = agent_stat[epsilon_key]
success_greedy_evolutions = epsilon_evolutions[EvolutionCase.SUCCESS_GREEDY]
failure_greedy_evolutions = epsilon_evolutions[EvolutionCase.FAILURE_GREEDY]
assert (len(success_greedy_evolutions) == len(failure_greedy_evolutions))
assert (len(zero_success_evolutions) == len(success_greedy_evolutions))
steps = []
roi_means = []
for step in range(1, len(epsilon_evolutions[EvolutionCase.SUCCESS])):
previous_zero_successes = zero_success_evolutions[step - 1]
previous_zero_failures = zero_failure_evolutions[step - 1]
current_zero_successes = zero_success_evolutions[step]
current_zero_failures = zero_failure_evolutions[step]
current_epsilon_greedy_successes = success_greedy_evolutions[step]
current_epsilon_greedy_failures = failure_greedy_evolutions[step]
def roi_with_confidence_interval(
epsilon,
previous_zero_successes,
previous_zero_failures,
current_zero_successes,
current_zero_failures,
current_epsilon_greedy_successes,
current_epsilon_greedy_failures
):
def roi_formulae(
epsilon,
previous_zero,
current_zero,
current_epsilon_greedy
):
current_gain = current_epsilon_greedy / (1 - epsilon) - current_zero
roi = current_gain / (epsilon * previous_zero)
return roi
return {
RoiMetrics.ROI_SUCCESS: roi_formulae(
epsilon,
previous_zero_successes,
current_zero_successes,
current_epsilon_greedy_successes
),
RoiMetrics.ROI_FAILURE: roi_formulae(
epsilon,
previous_zero_failures,
current_zero_failures,
current_epsilon_greedy_failures
)
}
roi_mean = roi_with_confidence_interval(
epsilon,
previous_zero_successes,
previous_zero_failures,
current_zero_successes,
current_zero_failures,
current_epsilon_greedy_successes,
current_epsilon_greedy_failures
)[RoiMetrics.ROI_SUCCESS]
agent_stats[epsilon_key][RoiMetrics.ROI_MEAN].append(roi_mean)
roi_means.append(roi_mean)
steps.append(step)
roi_mean_means.append(np.mean(roi_means))
ax.plot(steps, roi_means)
roi_means_mean = np.mean(roi_mean_means)
roi_means_div = np.sqrt(np.var(roi_mean_means))
ax.set_title(
"$ROI_{t+1}$ of Agent: " + f"'{agent_key}'\n"
+ "$\hat{\mu}_{ROI}="
+ "{0:.5f}".format(round(roi_means_mean, 5))
+ "$, "
+ "$\hat{\sigma}_{ROI}="
+ "{0:.5f}".format(round(roi_means_div, 5))
+ "$"
)
ax.legend(labels, loc=10)
ax.set_ylabel('ROI')
agent_roi_stats[agent_key] = agent_stats
plt.subplots_adjust(hspace=.5)
plt.show()
return agent_roi_stats
def verify_agents(env, number_of_users, agents):
stat = {
'Agent': [],
'0.025': [],
'0.500': [],
'0.975': [],
}
for agent_id in agents:
stat['Agent'].append(agent_id)
data = deepcopy(env).generate_logs(number_of_users, agents[agent_id])
bandits = data[data['z'] == 'bandit']
successes = bandits[bandits['c'] == 1].shape[0]
failures = bandits[bandits['c'] == 0].shape[0]
stat['0.025'].append(beta.ppf(0.025, successes + 1, failures + 1))
stat['0.500'].append(beta.ppf(0.500, successes + 1, failures + 1))
stat['0.975'].append(beta.ppf(0.975, successes + 1, failures + 1))
return pd.DataFrame().from_dict(stat)
def evaluate_IPS(agent, reco_log):
ee = []
for u in range(max(reco_log.u)):
t = np.array(reco_log[reco_log['u'] == u].t)
v = np.array(reco_log[reco_log['u'] == u].v)
a = np.array(reco_log[reco_log['u'] == u].a)
c = np.array(reco_log[reco_log['u'] == u].c)
z = list(reco_log[reco_log['u'] == u].z)
ps = np.array(reco_log[reco_log['u'] == u].ps)
jj = 0
session = OrganicSessions()
agent.reset()
while True:
if jj >= len(z):
break
if z[jj] == 'organic':
session.next(DefaultContext(t[jj], u), int(v[jj]))
else:
prob_policy = agent.act(Observation(DefaultContext(t[jj], u), session), 0, False)[
'ps-a']
if prob_policy!=():
ee.append(c[jj] * prob_policy[int(a[jj])] / ps[jj])
session = OrganicSessions()
jj += 1
return ee
def evaluate_SNIPS(agent, reco_log):
rewards = []
p_ratio = []
for u in range(max(reco_log.u)):
t = np.array(reco_log[reco_log['u'] == u].t)
v = np.array(reco_log[reco_log['u'] == u].v)
a = np.array(reco_log[reco_log['u'] == u].a)
c = np.array(reco_log[reco_log['u'] == u].c)
z = list(reco_log[reco_log['u'] == u].z)
ps = np.array(reco_log[reco_log['u'] == u].ps)
jj = 0
session = OrganicSessions()
agent.reset()
while True:
if jj >= len(z):
break
if z[jj] == 'organic':
session.next(DefaultContext(t[jj], u), int(v[jj]))
else:
prob_policy = agent.act(Observation(DefaultContext(t[jj], u), session), 0, False)[
'ps-a']
rewards.append(c[jj])
p_ratio.append(prob_policy[int(a[jj])] / ps[jj])
session = OrganicSessions()
jj += 1
return rewards, p_ratio
def verify_agents_IPS(reco_log, agents):
stat = {
'Agent': [],
'0.025': [],
'0.500': [],
'0.975': [],
}
for agent_id in agents:
ee = evaluate_IPS(agents[agent_id], reco_log)
mean_ee = np.mean(ee)
se_ee = np.std(ee) / np.sqrt(len(ee))
stat['Agent'].append(agent_id)
stat['0.025'].append(mean_ee - 2 * se_ee)
stat['0.500'].append(mean_ee)
stat['0.975'].append(mean_ee + 2 * se_ee)
return pd.DataFrame().from_dict(stat)
def verify_agents_SNIPS(reco_log, agents):
stat = {
'Agent': [],
'0.025': [],
'0.500': [],
'0.975': [],
}
for agent_id in agents:
rewards, p_ratio = evaluate_SNIPS(agents[agent_id], reco_log)
ee = np.asarray(rewards) * np.asarray(p_ratio)
mean_ee = np.sum(ee) / np.sum(p_ratio)
se_ee = np.std(ee) / np.sqrt(len(ee))
stat['Agent'].append(agent_id)
stat['0.025'].append(mean_ee - 2 * se_ee)
stat['0.500'].append(mean_ee)
stat['0.975'].append(mean_ee + 2 * se_ee)
return pd.DataFrame().from_dict(stat)
def evaluate_recall_at_k(agent, reco_log, k=5):
hits = []
for u in range(max(reco_log.u)):
t = np.array(reco_log[reco_log['u'] == u].t)
v = np.array(reco_log[reco_log['u'] == u].v)
a = np.array(reco_log[reco_log['u'] == u].a)
c = np.array(reco_log[reco_log['u'] == u].c)
z = list(reco_log[reco_log['u'] == u].z)
ps = np.array(reco_log[reco_log['u'] == u].ps)
jj = 0
session = OrganicSessions()
agent.reset()
while True:
if jj >= len(z):
break
if z[jj] == 'organic':
session.next(DefaultContext(t[jj], u), int(v[jj]))
else:
prob_policy = agent.act(Observation(DefaultContext(t[jj], u), session), 0, False)[
'ps-a']
# Does the next session exist?
if (jj + 1) < len(z):
# Is the next session organic?
if z[jj + 1] == 'organic':
# Whas there no click for this bandit event?
if not c[jj]:
# Generate a top-K from the probability distribution over all actions
top_k = set(np.argpartition(prob_policy, -k)[-k:])
# Is the next seen item in the top-K?
if v[jj + 1] in top_k:
hits.append(1)
else:
hits.append(0)
session = OrganicSessions()
jj += 1
return hits
def verify_agents_recall_at_k(reco_log, agents, k=5):
stat = {
'Agent': [],
'0.025': [],
'0.500': [],
'0.975': [],
}
for agent_id in agents:
hits = evaluate_recall_at_k(agents[agent_id], reco_log, k=k)
mean_hits = np.mean(hits)
se_hits = np.std(hits) / np.sqrt(len(hits))
stat['Agent'].append(agent_id)
stat['0.025'].append(mean_hits - 2 * se_hits)
stat['0.500'].append(mean_hits)
stat['0.975'].append(mean_hits + 2 * se_hits)
return pd.DataFrame().from_dict(stat)
def plot_verify_agents(result):
fig, ax = plt.subplots()
ax.set_title('CTR Estimate for Different Agents')
plt.errorbar(result['Agent'],
result['0.500'],
yerr=(result['0.500'] - result['0.025'],
result['0.975'] - result['0.500']),
fmt='o',
capsize=4)
plt.xticks(result['Agent'], result['Agent'], rotation='vertical')
return fig
| 32,069 | 33.78308 | 117 |
py
|
reco-gym
|
reco-gym-master/recogym/bench_agents.py
|
import datetime
import hashlib
import json
import os
import pickle
import time
from copy import deepcopy
from pathlib import Path
import numpy as np
from scipy.stats.distributions import beta
from tqdm import trange, tqdm
from recogym import AgentStats, DefaultContext, Observation
from recogym.envs.session import OrganicSessions
CACHE_DIR = os.path.join(os.path.join(str(Path.home()), '.reco-gym'), 'cache')
def _cache_file_name(env, num_organic_offline_users: int, num_offline_users: int) -> str:
unique_config_data = (
(
env.config.K,
(
str(type(env.config.agent)),
),
env.config.change_omega_for_bandits,
env.config.normalize_beta,
env.config.num_clusters,
env.config.num_products,
env.config.num_users,
env.config.number_of_flips,
env.config.phi_var,
env.config.prob_bandit_to_organic,
env.config.prob_leave_bandit,
env.config.prob_leave_organic,
env.config.prob_organic_to_bandit,
env.config.random_seed,
env.config.sigma_mu_organic,
env.config.sigma_omega,
env.config.sigma_omega_initial,
env.config.with_ps_all,
),
num_organic_offline_users,
num_offline_users
)
return f'{hashlib.sha1(json.dumps(unique_config_data).encode()).hexdigest()}.pkl'
def _cached_data(env, num_organic_offline_users: int, num_offline_users: int) -> str:
cache_file_name = _cache_file_name(env, num_organic_offline_users, num_offline_users)
file_path = os.path.join(CACHE_DIR, cache_file_name)
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
if os.path.exists(file_path):
with open(file_path, 'rb') as file:
data = pickle.load(file, fix_imports=False)
else:
data = env.generate_logs(num_offline_users=num_offline_users,
num_organic_offline_users=num_organic_offline_users)
with open(file_path, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=False)
return data
def _collect_stats(args):
env = args['env']
agent = args['agent']
num_offline_users = args['num_offline_users']
num_online_users = args['num_online_users']
num_organic_offline_users = args['num_organic_offline_users']
epoch_with_random_reset = args['epoch_with_random_reset']
epoch = args['epoch']
with_cache = args['with_cache']
print(f"START: Agent Training #{epoch}")
unique_user_id = 0
new_agent = deepcopy(agent)
print(f"START: Agent Training @ Epoch #{epoch}")
start = time.time()
if epoch_with_random_reset:
train_env = deepcopy(env)
train_env.reset_random_seed(epoch)
else:
train_env = env
if with_cache:
data = _cached_data(train_env, num_organic_offline_users, num_offline_users)
def _train(observation, session, action, reward, time, done):
if observation:
assert session is not None
else:
observation = Observation(DefaultContext(time, current_user), session)
new_agent.train(observation, action, reward, done)
return None, OrganicSessions(), None, None
current_session = OrganicSessions()
last_observation = None
last_action = None
last_reward = None
last_time = None
current_user = None
with tqdm(total=data.shape[0], desc='Offline Logs') as pbar:
for _, row in data.iterrows():
pbar.update()
t, u, z, v, a, c, ps, ps_a = row.values
if current_user is None:
current_user = u
if current_user != u:
last_observation, current_session, last_action, last_reward = _train(
last_observation,
current_session,
last_action,
last_reward,
last_time,
True
)
current_user = u
if last_action:
last_observation, current_session, last_action, last_reward = _train(
last_observation,
current_session,
last_action,
last_reward,
last_time,
False
)
if z == 'organic':
assert (not np.isnan(v))
assert (np.isnan(a))
assert (np.isnan(c))
current_session.next(DefaultContext(t, u), np.int16(v))
else:
last_observation = Observation(DefaultContext(t, u), current_session)
current_session = OrganicSessions()
assert (np.isnan(v))
assert (not np.isnan(a))
last_action = {
't': t,
'u': u,
'a': np.int16(a),
'ps': ps,
'ps-a': ps_a,
}
assert (not np.isnan(c))
last_reward = c
last_time = t
_train(
last_observation,
current_session,
last_action,
last_reward,
last_time,
True
)
else:
# Offline Organic Training.
for _ in trange(num_organic_offline_users, desc='Organic Users'):
train_env.reset(unique_user_id)
unique_user_id += 1
new_observation, _, _, _ = train_env.step(None)
new_agent.train(new_observation, None, None, True)
# Offline Organic and Bandit Training.
for _ in trange(num_offline_users, desc='Users'):
train_env.reset(unique_user_id)
unique_user_id += 1
new_observation, _, done, reward = train_env.step(None)
while not done:
old_observation = new_observation
action, new_observation, reward, done, _ = train_env.step_offline(
old_observation, reward, done
)
new_agent.train(old_observation, action, reward, False)
old_observation = new_observation
action, _, reward, done, _ = train_env.step_offline(
old_observation, reward, done
)
new_agent.train(old_observation, action, reward, True)
print(f"END: Agent Training @ Epoch #{epoch} ({time.time() - start}s)")
# Online Testing.
print(f"START: Agent Evaluating @ Epoch #{epoch}")
start = time.time()
if epoch_with_random_reset:
eval_env = deepcopy(env)
eval_env.reset_random_seed(epoch)
else:
eval_env = env
stat_data = eval_env.generate_logs(num_offline_users=num_online_users, agent=new_agent)
rewards = stat_data[~np.isnan(stat_data['a'])]['c']
successes = np.sum(rewards)
failures = rewards.shape[0] - successes
print(f"END: Agent Evaluating @ Epoch #{epoch} ({time.time() - start}s)")
return {
AgentStats.SUCCESSES: successes,
AgentStats.FAILURES: failures,
}
def test_agent(
env,
agent,
num_offline_users=1000,
num_online_users=100,
num_organic_offline_users=0,
num_epochs=1,
epoch_with_random_reset=False,
with_cache=False
):
successes = 0
failures = 0
argss = [
{
'env': env,
'agent': agent,
'num_offline_users': num_offline_users,
'num_online_users': num_online_users,
'num_organic_offline_users': num_organic_offline_users,
'epoch_with_random_reset': epoch_with_random_reset,
'epoch': epoch,
'with_cache': with_cache,
}
for epoch in range(num_epochs)
]
for result in [_collect_stats(args) for args in argss]:
successes += result[AgentStats.SUCCESSES]
failures += result[AgentStats.FAILURES]
return (
beta.ppf(0.500, successes + 1, failures + 1),
beta.ppf(0.025, successes + 1, failures + 1),
beta.ppf(0.975, successes + 1, failures + 1)
)
| 8,516 | 32.797619 | 91 |
py
|
reco-gym
|
reco-gym-master/recogym/__init__.py
|
from .envs import env_0_args, env_1_args
from .envs import Observation
from .envs import Configuration
from .envs import Session
from .envs import Context, DefaultContext
from .constants import (
AgentStats,
AgentInit,
EvolutionCase,
TrainingApproach,
RoiMetrics
)
from .bench_agents import test_agent
from .evaluate_agent import (
evaluate_agent,
build_agent_init,
gather_agent_stats,
plot_agent_stats,
gather_exploration_stats,
plot_evolution_stats,
plot_heat_actions,
plot_roi,
verify_agents,
verify_agents_IPS,
to_categorical
)
from .competition import competition_score
from .envs.features.time.default_time_generator import DefaultTimeGenerator
from .envs.features.time.normal_time_generator import NormalTimeGenerator
from gym.envs.registration import register
register(
id = 'reco-gym-v0',
entry_point = 'recogym.envs:RecoEnv0'
)
register(
id = 'reco-gym-v1',
entry_point = 'recogym.envs:RecoEnv1'
)
| 996 | 20.212766 | 75 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/reco_env_v0.py
|
from numpy import array, sqrt, kron, eye, ones
from .abstract import AbstractEnv, f, env_args
# Default arguments for toy environment ------------------------------------
env_0_args = env_args
# Users are grouped into distinct clusters to prevent mixing.
env_args['num_clusters'] = 2
# Variance of the difference between organic and bandit.
env_args['phi_var'] = 0.1
# Environment definition ----------------------------------------------------
class RecoEnv0(AbstractEnv):
def __init__(self):
super(RecoEnv0, self).__init__()
def set_static_params(self):
# State transition Matrix between Organic, Bandit, Leave
self.state_transition = array([
[0, self.config.prob_organic_to_bandit, self.config.prob_leave_organic],
[self.config.prob_bandit_to_organic, 0, self.config.prob_leave_organic],
[0.0, 0.0, 1.]
])
self.state_transition[0, 0] = 1 - sum(self.state_transition[0, :])
self.state_transition[1, 1] = 1 - sum(self.state_transition[1, :])
# Organic Transition Matrix
cluster_ratio = int(self.config.num_products / self.config.num_clusters)
ones_mat = ones((cluster_ratio, cluster_ratio))
T = kron(eye(self.config.num_clusters), ones_mat)
T = T / kron(T.sum(1), ones((self.config.num_products, 1))).T
self.product_transition = T
# creating click probability matrix
self.phi = self.rng.normal(
scale=sqrt(self.config.phi_var),
size=(self.config.num_products, self.config.num_products)
)
self.click_probs = f(self.config.num_products / 5. * (T + T.T) + self.phi)
self.initial_product_probs = \
ones((self.config.num_products)) / self.config.num_products
def reset(self, user_id=0):
super().reset(user_id)
# Current Organic product viewed, choose from initial probabilities
self.product_view = self.rng.choice(
self.config.num_products, p=self.initial_product_probs
)
def update_state(self):
"""Update Markov state between `organic`, `bandit`, or `stop`"""
self.state = self.rng.choice(3, p=self.state_transition[self.state, :])
def draw_click(self, recommendation):
p = self.click_probs[recommendation, self.product_view]
return self.rng.binomial(1, p)
def update_product_view(self):
probs = self.product_transition[self.product_view, :]
self.product_view = self.rng.choice(self.config.num_products, p=probs)
| 2,553 | 35.485714 | 84 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/context.py
|
class Context:
def time(self):
raise NotImplemented
def user(self):
raise NotImplemented
class DefaultContext(Context):
def __init__(self, current_time, current_user_id):
super(DefaultContext, self).__init__()
self.current_time = current_time
self.current_user_id = current_user_id
def time(self):
return self.current_time
def user(self):
return self.current_user_id
| 450 | 19.5 | 54 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/reco_env_v1.py
|
# Omega is the users latent representation of interests - vector of size K
# omega is initialised when you have new user with reset
# omega is updated at every timestep using timestep
#
# Gamma is the latent representation of organic products (matrix P by K)
# softmax(Gamma omega) is the next item probabilities (organic)
# Beta is the latent representation of response to actions (matrix P by K)
# sigmoid(beta omega) is the ctr for each action
import numpy as np
from numba import njit
from .abstract import AbstractEnv, env_args, organic
# Default arguments for toy environment ------------------------------------
# inherit most arguments from abstract class
env_1_args = {
**env_args,
**{
'K': 5,
'sigma_omega_initial': 1,
'sigma_omega': 0.1,
'number_of_flips': 0,
'sigma_mu_organic': 3,
'change_omega_for_bandits': False,
'normalize_beta': False
}
}
@njit(nogil=True)
def sig(x):
return 1.0 / (1.0 + np.exp(-x))
# Maps behaviour into ctr - organic has real support ctr is on [0,1].
@njit(nogil=True)
def ff(xx, aa=5, bb=2, cc=0.3, dd=2, ee=6):
# Magic numbers give a reasonable ctr of around 2%.
return sig(aa * sig(bb * sig(cc * xx) - dd) - ee)
# Environment definition.
class RecoEnv1(AbstractEnv):
def __init__(self):
super(RecoEnv1, self).__init__()
self.cached_state_seed = None
def set_static_params(self):
# Initialise the state transition matrix which is 3 by 3
# high level transitions between organic, bandit and leave.
self.state_transition = np.array([
[0, self.config.prob_organic_to_bandit, self.config.prob_leave_organic],
[self.config.prob_bandit_to_organic, 0, self.config.prob_leave_organic],
[0.0, 0.0, 1.]
])
self.state_transition[0, 0] = 1 - sum(self.state_transition[0, :])
self.state_transition[1, 1] = 1 - sum(self.state_transition[1, :])
# Initialise Gamma for all products (Organic).
self.Gamma = self.rng.normal(
size=(self.config.num_products, self.config.K)
)
# Initialise mu_organic.
self.mu_organic = self.rng.normal(
0, self.config.sigma_mu_organic,
size=(self.config.num_products, 1)
)
# Initialise beta, mu_bandit for all products (Bandit).
self.generate_beta(self.config.number_of_flips)
# Create a new user.
def reset(self, user_id=0):
super().reset(user_id)
self.omega = self.rng.normal(
0, self.config.sigma_omega_initial, size=(self.config.K, 1)
)
# Update user state to one of (organic, bandit, leave) and their omega (latent factor).
def update_state(self):
old_state = self.state
self.state = self.rng.choice(3, p=self.state_transition[self.state, :])
assert (hasattr(self, 'time_generator'))
old_time = self.current_time
self.current_time = self.time_generator.new_time()
time_delta = self.current_time - old_time
omega_k = 1 if time_delta == 0 else time_delta
# And update omega.
if self.config.change_omega_for_bandits or self.state == organic:
self.omega = self.rng.normal(
self.omega,
self.config.sigma_omega * omega_k, size=(self.config.K, 1)
)
self.context_switch = old_state != self.state
# Sample a click as response to recommendation when user in bandit state
# click ~ Bernoulli().
def draw_click(self, recommendation):
# Personalised CTR for every recommended product.
if self.config.change_omega_for_bandits or self.context_switch:
self.cached_state_seed = (
self.beta @ self.omega + self.mu_bandit
).ravel()
assert self.cached_state_seed is not None
ctr = ff(self.cached_state_seed)
click = self.rng.choice(
[0, 1],
p=[1 - ctr[recommendation], ctr[recommendation]]
)
return click
# Sample the next organic product view.
def update_product_view(self):
log_uprob = (self.Gamma @ self.omega + self.mu_organic).ravel()
log_uprob = log_uprob - max(log_uprob)
uprob = np.exp(log_uprob)
self.product_view = np.int16(
self.rng.choice(
self.config.num_products,
p=uprob / uprob.sum()
)
)
def normalize_beta(self):
self.beta = self.beta / np.sqrt((self.beta ** 2).sum(1)[:, np.newaxis])
def generate_beta(self, number_of_flips):
"""Create Beta by flipping Gamma, but flips are between similar items only"""
if number_of_flips == 0:
self.beta = self.Gamma
self.mu_bandit = self.mu_organic
if self.config.normalize_beta:
self.normalize_beta()
return
P, K = self.Gamma.shape
index = np.arange(P)
prod_cov = self.Gamma @ self.Gamma.T
# We are always most correlated with ourselves so remove the diagonal.
prod_cov = prod_cov - np.diag(np.diag(prod_cov))
prod_cov_flat = prod_cov.flatten()
already_used = set()
flips = 0
for p in prod_cov_flat.argsort()[::-1]: # Find the most correlated entries
# Convert flat indexes to 2d indexes
ii, jj = int(p / P), np.mod(p, P)
# Do flips between the most correlated entries
# provided neither the row or col were used before.
if not (ii in already_used or jj in already_used):
index[ii] = jj # Do a flip.
index[jj] = ii
already_used.add(ii)
already_used.add(jj)
flips += 1
if flips == number_of_flips:
break
self.beta = self.Gamma[index, :]
self.mu_bandit = self.mu_organic[index, :]
if self.config.normalize_beta:
self.normalize_beta()
| 6,063 | 33.651429 | 91 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/configuration.py
|
class Configuration:
"""
Configuration
That class defines Environment Configurations used in RecoGym.
The configurations are provided as a dictionary: key = value.
The value can be ANY type i.e. a complex object, function etc.
The class is immutable i.e. once an instance of that class is created,
no configuration can be changed.
"""
def __init__(self, args):
# self.args = args
# Set all key word arguments as attributes.
for key in args:
super(Configuration, self).__setattr__(key, args[key])
Configuration.__slots__ = [key for key in args]
def __setattr__(self, key, value):
pass
def __deepcopy__(self, memodict={}):
return self
| 742 | 28.72 | 74 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/observation.py
|
class Observation:
def __init__(self, context, sessions):
self.current_context = context
self.current_sessions = sessions
def context(self):
return self.current_context
def sessions(self):
return self.current_sessions
| 264 | 23.090909 | 42 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/abstract.py
|
from abc import ABC
import gym
import numpy as np
import pandas as pd
from gym.spaces import Discrete
from numpy.random.mtrand import RandomState
from scipy.special import expit as sigmoid
from tqdm import trange
from .configuration import Configuration
from .context import DefaultContext
from .features.time import DefaultTimeGenerator
from .observation import Observation
from .session import OrganicSessions
from ..agents import Agent
# Arguments shared between all environments.
env_args = {
'num_products': 10,
'num_users': 100,
'random_seed': np.random.randint(2 ** 31 - 1),
# Markov State Transition Probabilities.
'prob_leave_bandit': 0.01,
'prob_leave_organic': 0.01,
'prob_bandit_to_organic': 0.05,
'prob_organic_to_bandit': 0.25,
'normalize_beta': False,
'with_ps_all': False
}
# Static function for squashing values between 0 and 1.
def f(mat, offset=5):
"""Monotonic increasing function as described in toy.pdf."""
return sigmoid(mat - offset)
# Magic numbers for Markov states.
organic = 0
bandit = 1
stop = 2
class AbstractEnv(gym.Env, ABC):
def __init__(self):
gym.Env.__init__(self)
ABC.__init__(self)
self.first_step = True
self.config = None
self.state = None
self.current_user_id = None
self.current_time = None
self.empty_sessions = OrganicSessions()
def reset_random_seed(self, epoch=0):
# Initialize Random State.
assert (self.config.random_seed is not None)
self.rng = RandomState(self.config.random_seed + epoch)
def init_gym(self, args):
self.config = Configuration(args)
# Defining Action Space.
self.action_space = Discrete(self.config.num_products)
if 'time_generator' not in args:
self.time_generator = DefaultTimeGenerator(self.config)
else:
self.time_generator = self.config.time_generator
# Setting random seed for the first time.
self.reset_random_seed()
if 'agent' not in args:
self.agent = None
else:
self.agent = self.config.agent
# Setting any static parameters such as transition probabilities.
self.set_static_params()
# Set random seed for second time, ensures multiple epochs possible.
self.reset_random_seed()
def reset(self, user_id=0):
# Current state.
self.first_step = True
self.state = organic # Manually set first state as Organic.
self.time_generator.reset()
if self.agent:
self.agent.reset()
self.current_time = self.time_generator.new_time()
self.current_user_id = user_id
# Record number of times each product seen for static policy calculation.
self.organic_views = np.zeros(self.config.num_products)
def generate_organic_sessions(self):
# Initialize session.
session = OrganicSessions()
while self.state == organic:
# Add next product view.
self.update_product_view()
session.next(
DefaultContext(self.current_time, self.current_user_id),
self.product_view
)
# Update markov state.
self.update_state()
return session
def step(self, action_id):
"""
Parameters
----------
action_id : int between 1 and num_products indicating which
product recommended (aka which ad shown)
Returns
-------
observation, reward, done, info : tuple
observation (tuple) :
a tuple of values (is_organic, product_view)
is_organic - True if Markov state is `organic`,
False if Markov state `bandit` or `stop`.
product_view - if Markov state is `organic` then it is an int
between 1 and P where P is the number of
products otherwise it is None.
reward (float) :
if the previous state was
`bandit` - then reward is 1 if the user clicked on the ad
you recommended otherwise 0
`organic` - then reward is None
done (bool) :
whether it's time to reset the environment again.
An episode is over at the end of a user's timeline (all of
their organic and bandit sessions)
info (dict) :
this is unused, it's always an empty dict
"""
# No information to return.
info = {}
if self.first_step:
assert (action_id is None)
self.first_step = False
sessions = self.generate_organic_sessions()
return (
Observation(
DefaultContext(
self.current_time,
self.current_user_id
),
sessions
),
None,
self.state == stop,
info
)
assert (action_id is not None)
# Calculate reward from action.
reward = self.draw_click(action_id)
self.update_state()
if reward == 1:
self.state = organic # After a click, Organic Events always follow.
# Markov state dependent logic.
if self.state == organic:
sessions = self.generate_organic_sessions()
else:
sessions = self.empty_sessions
return (
Observation(
DefaultContext(self.current_time, self.current_user_id),
sessions
),
reward,
self.state == stop,
info
)
def step_offline(self, observation, reward, done):
"""Call step function wih the policy implemented by a particular Agent."""
if self.first_step:
action = None
else:
assert (hasattr(self, 'agent'))
assert (observation is not None)
if self.agent:
action = self.agent.act(observation, reward, done)
else:
# Select a Product randomly.
action = {
't': observation.context().time(),
'u': observation.context().user(),
'a': np.int16(self.rng.choice(self.config.num_products)),
'ps': 1.0 / self.config.num_products,
'ps-a': (
np.ones(self.config.num_products) / self.config.num_products
if self.config.with_ps_all else
()
),
}
if done:
return (
action,
Observation(
DefaultContext(self.current_time, self.current_user_id),
self.empty_sessions
),
0,
done,
None
)
else:
observation, reward, done, info = self.step(
action['a'] if action is not None else None
)
return action, observation, reward, done, info
def generate_logs(
self,
num_offline_users: int,
agent: Agent = None,
num_organic_offline_users: int = 0
):
"""
Produce logs of applying an Agent in the Environment for the specified amount of Users.
If the Agent is not provided, then the default Agent is used that randomly selects an Action.
"""
if agent:
old_agent = self.agent
self.agent = agent
data = {
't': [],
'u': [],
'z': [],
'v': [],
'a': [],
'c': [],
'ps': [],
'ps-a': [],
}
def _store_organic(observation):
assert (observation is not None)
assert (observation.sessions() is not None)
for session in observation.sessions():
data['t'].append(session['t'])
data['u'].append(session['u'])
data['z'].append('organic')
data['v'].append(session['v'])
data['a'].append(None)
data['c'].append(None)
data['ps'].append(None)
data['ps-a'].append(None)
def _store_bandit(action, reward):
if action:
assert (reward is not None)
data['t'].append(action['t'])
data['u'].append(action['u'])
data['z'].append('bandit')
data['v'].append(None)
data['a'].append(action['a'])
data['c'].append(reward)
data['ps'].append(action['ps'])
data['ps-a'].append(action['ps-a'] if 'ps-a' in action else ())
unique_user_id = 0
for _ in trange(num_organic_offline_users, desc='Organic Users'):
self.reset(unique_user_id)
unique_user_id += 1
observation, _, _, _ = self.step(None)
_store_organic(observation)
for _ in trange(num_offline_users, desc='Users'):
self.reset(unique_user_id)
unique_user_id += 1
observation, reward, done, _ = self.step(None)
while not done:
_store_organic(observation)
action, observation, reward, done, _ = self.step_offline(
observation, reward, done
)
_store_bandit(action, reward)
_store_organic(observation)
action, _, reward, done, _ = self.step_offline(
observation, reward, done
)
assert done, 'Done must not be changed!'
_store_bandit(action, reward)
data['t'] = np.array(data['t'], dtype=np.float32)
data['u'] = pd.array(data['u'], dtype=pd.UInt16Dtype())
data['v'] = pd.array(data['v'], dtype=pd.UInt16Dtype())
data['a'] = pd.array(data['a'], dtype=pd.UInt16Dtype())
data['c'] = np.array(data['c'], dtype=np.float32)
if agent:
self.agent = old_agent
return pd.DataFrame().from_dict(data)
| 10,433 | 30.810976 | 101 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/session.py
|
class Session(list):
"""Abstract Session class"""
def to_strings(self, user_id, session_id):
"""represent session as list of strings (one per event)"""
user_id, session_id = str(user_id), str(session_id)
session_type = self.get_type()
strings = []
for event, product in self:
columns = [user_id, session_type, session_id, event, str(product)]
strings.append(','.join(columns))
return strings
def get_type(self):
raise NotImplemented
class OrganicSessions(Session):
def __init__(self):
super(OrganicSessions, self).__init__()
def next(self, context, product):
self.append(
{
't': context.time(),
'u': context.user(),
'z': 'pageview',
'v': product
}
)
def get_type(self):
return 'organic'
def get_views(self):
return [p for _, _, e, p in self if e == 'pageview']
| 1,004 | 26.162162 | 78 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/__init__.py
|
from .reco_env_v0 import RecoEnv0
from .reco_env_v1 import RecoEnv1
from .observation import Observation
from .configuration import Configuration
from .context import Context, DefaultContext
from .session import Session
from .reco_env_v0 import env_0_args
from .reco_env_v1 import env_1_args
| 294 | 25.818182 | 44 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/features/__init__.py
|
from .time import TimeGenerator, DefaultTimeGenerator
| 54 | 26.5 | 53 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/features/time/normal_time_generator.py
|
from numpy.random.mtrand import RandomState
import numpy as np
from .time_generator import TimeGenerator
class NormalTimeGenerator(TimeGenerator):
""""""
def __init__(self, config):
super(NormalTimeGenerator, self).__init__(config)
self.current_time = 0
if not hasattr(self.config, 'normal_time_mu'):
self.normal_time_mu = 0
else:
self.normal_time_mu = self.config.normal_time_mu
if not hasattr(self.config, 'normal_time_sigma'):
self.normal_time_sigma = 1
else:
self.normal_time_sigma = self.config.normal_time_sigma
self.rng = RandomState(config.random_seed)
def new_time(self):
tmp_time = self.current_time
self.current_time += np.abs(self.rng.normal(self.normal_time_mu, self.normal_time_sigma))
return tmp_time
def reset(self):
self.current_time = 0
| 915 | 27.625 | 97 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/features/time/time_generator.py
|
class TimeGenerator:
""""""
def __init__(self, config):
self.config = config
def new_time(self):
raise NotImplemented
def reset(self):
raise NotImplemented
| 198 | 17.090909 | 31 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/features/time/default_time_generator.py
|
from .time_generator import TimeGenerator
class DefaultTimeGenerator(TimeGenerator):
""""""
def __init__(self, config):
super(DefaultTimeGenerator, self).__init__(config)
self.current_time = 0
def new_time(self):
tmp_time = self.current_time
self.current_time += 1
return tmp_time
def reset(self):
self.current_time = 0
| 388 | 21.882353 | 58 |
py
|
reco-gym
|
reco-gym-master/recogym/envs/features/time/__init__.py
|
from .time_generator import TimeGenerator
from .default_time_generator import DefaultTimeGenerator
from .normal_time_generator import NormalTimeGenerator
| 154 | 37.75 | 56 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/pytorch_mlr.py
|
import numpy as np
#import tensorflow as tf
import torch
from torch.autograd import Variable
from torch.nn import functional as F
from scipy.special import expit
from recogym.agents import (
AbstractFeatureProvider,
Model,
ModelBasedAgent,
ViewsFeaturesProvider
)
from ..envs.configuration import Configuration
pytorch_mlr_args = {
'n_epochs': 30,
'learning_rate': 0.01,
'random_seed': np.random.randint(2 ** 31 - 1),
'logIPS': False,
'variance_penalisation_strength': .0,
'clip_weights': False,
'alpha': .0,
'll_IPS': True
}
from numba import jit
from tqdm import tqdm
@jit(nopython=True)
def act_linear_model(X, W, P):
return np.argmax(X.dot(W.T).reshape(P))
class PyTorchMLRModelBuilder(AbstractFeatureProvider):
def __init__(self, config):
super(PyTorchMLRModelBuilder, self).__init__(config)
def build(self):
class PyTorchMLRFeaturesProvider(ViewsFeaturesProvider):
"""
"""
def __init__(self, config):
super(PyTorchMLRFeaturesProvider, self).__init__(config)
def features(self, observation):
base_features = super().features(observation)
return base_features.reshape(1, self.config.num_products)
class PyTorchMLRModel(Model):
"""
"""
def __init__(self, config, model):
super(PyTorchMLRModel, self).__init__(config)
self.model = model
self.W = model.weight.detach().numpy()
def act(self, observation, features):
# OWN CODE
#X = features
#P = X.shape[1]
#X = Variable(torch.Tensor(X))
#action_probs = self.model(X).detach().numpy().ravel()
#action = np.argmax(action_probs)
#ps_all = np.zeros(P)
#ps_all[action] = 1.0
#/OWN CODE
# NUMBA CODE
P = features.shape[1]
X = features.astype(np.float32)
action = act_linear_model(X,self.W,P)
ps_all = np.zeros(P)
ps_all[action] = 1.0
#/NUMBA CODE
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
class MultinomialLogisticRegressionModel(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(MultinomialLogisticRegressionModel, self).__init__()
# Generate weights - initialise randomly
self.weight = torch.nn.Parameter(torch.Tensor(output_dim, input_dim))
torch.nn.init.kaiming_uniform_(self.weight, a = np.sqrt(5))
# Experimental bias
self.bias = torch.nn.Parameter(torch.Tensor(1))
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / np.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
# Compute linear transformation x.A.T
pred = F.linear(x, self.weight)
return pred
class BinomialLogisticRegressionModel(torch.nn.Module):
def __init__(self, multinomial_model):
super(BinomialLogisticRegressionModel, self).__init__()
# Weights learned through the multinomial model
self.weight = multinomial_model.weight
self.bias = multinomial_model.bias
def pairwise_dot(self, x, a):
# Not just .matmul - compute pairwise dotproduct for action/context pairs
return (x*a).sum(dim=1)
def forward(self, x, a):
# Compute linear transformation x.A.T
return torch.sigmoid(self.pairwise_dot(x, self.weight[a,:]) + self.bias) # Bias is experimental TODO
# Get data
features, actions, deltas, pss = self.train_data()
# Extract data properly
X = features
N = X.shape[0]
P = X.shape[1]
A = actions
y = deltas
# Generate model
multinomial_model = MultinomialLogisticRegressionModel(P, P)
binomial_model = BinomialLogisticRegressionModel(multinomial_model)
# Compute clipping constant as ratio between 90th and 10th percentile
# As recommended in POEM
M = np.inf
if type(self.config.clip_weights) != bool:
M = self.config.clip_weights
elif self.config.clip_weights:
M = np.percentile(pss[y != 0], 90) / np.percentile(pss[y != 0], 10)
if self.config.clip_weights:
print('Clipping with constant M:\t{0}'.format(M))
# Convert data to torch objects - only clicks for learning multinomial model
X1 = Variable(torch.Tensor(X[y != 0]))
A1 = Variable(torch.LongTensor(A[y != 0]))
w1 = torch.Tensor(pss[y != 0] ** -1)
N1 = X1.shape[0]
# Convert data to torch objects - all samples for learning binomial model
X = Variable(torch.Tensor(X))
A = Variable(torch.LongTensor(A))
w = torch.Tensor(pss ** -1)
y = Variable(torch.Tensor(y))
# Binary cross-entropy as objective for the binomial model
binary_cross_entropy = torch.nn.BCELoss(reduction = 'none')
# LBFGS for optimisation
optimiser = FullBatchLBFGS(multinomial_model.parameters())
def closure():
# Reset gradients
optimiser.zero_grad()
# Check whether we're using the multinomial model
if self.config.alpha < 1.0:
# Compute action predictions for clicks
p_a = multinomial_model(X1)
# Turn these into probabilities through softmax
p_a = F.softmax(p_a, dim = 1)
# Only keep probabilities for the actions that were taken
p_a = torch.gather(p_a, 1, A1.unsqueeze(1))
p_a = p_a.reshape(-1) # FIXME HOTFIX BY MARTIN
# (Clipped) IPS Estimate of the reward
reward = torch.clamp(p_a * w1, max = M)
# Logged version
log_reward = torch.log(torch.clamp(p_a, min = 1e-10)) * w1
# Compute the estimated reward
#reward = torch.clamp(torch.gather(F.softmax(prob_a, dim = 1), 1, A1.unsqueeze(1)) * w1, max = M)
# Log if necessary
#if self.config.logIPS:
# reward = torch.log(torch.clamp(reward, min = 1e-10))
# PyTorch likes to minimise - loss instead of reward
loss = -reward
log_loss = -log_reward
# If the variance regularisation strength is larger than zero
if self.config.variance_penalisation_strength:
# Compute the expectation of the IPS estimate
avg_weighted_loss = torch.mean(loss)
# Compute the variance of the IPS estimate
var = torch.sqrt(torch.sum((loss - avg_weighted_loss)**2) / (N1 - 1) / N1)
# Reweight with lambda and add to the loss
if self.config.logIPS:
loss = log_loss.mean() + self.config.variance_penalisation_strength * var
else:
loss = loss.mean() + self.config.variance_penalisation_strength * var
else:
# Compute the mean over all samples as the final loss
if self.config.logIPS:
loss = log_loss.mean()
else:
loss = loss.mean()
# Check whether we're using the binomial model
if .0 < self.config.alpha:
# Let it generate predictions
prob_c = binomial_model(X, A)
# Negative log-likelihood as loss here - TODO check whether IPS reweighting here makes sense
nll = binary_cross_entropy(prob_c, y)
if self.config.ll_IPS:
nll = nll * w
nll = nll.mean() #* rescaling_factor
# A bit ugly but most efficient - check explicitly for loss combination
if self.config.alpha == .0:
pass
elif self.config.alpha == 1.0:
loss = nll
else:
loss = (1.0 - self.config.alpha) * loss + self.config.alpha * nll
return loss
# Initial loss
last_obj = closure()
max_epoch = 200
tol = 1e-10 # Magic number
max_patience, patience = 20, 0
checkpoints = []
#for epoch in tqdm(range(self.config.n_epochs)):
for epoch in tqdm(range(max_epoch)):
# Optimisation step
obj, _, _, _, _, _, _, _ = optimiser.step({'closure': closure, 'current_loss': last_obj, 'max_ls': 20})
# Check for convergence
#if (last_obj - obj) < tol and epoch > 10:
# patience += 1
# if patience >= max_patience:
# print('Converged after {0} iterations. Final loss: {1}'.format(epoch, obj))
# break
# else:
# #print('Remaining patient at epoch {0}...'.format(epoch))
# pass
# Save last loss
last_obj = obj
if (epoch % 25) == 0:
checkpoints.append(last_obj)
#if epoch == (max_epoch - 1):
# print('Not converged after {0} iterations. Final loss: {1}'.format(max_epoch, last_obj))
print('Checkpoints: {0}\nFinal loss: {1}'.format(checkpoints, last_obj))
return (
PyTorchMLRFeaturesProvider(self.config),
PyTorchMLRModel(self.config, multinomial_model)
)
class PyTorchMLRAgent(ModelBasedAgent):
"""
PyTorch-based multinomial logistic regression Agent.
"""
def __init__(self, config = Configuration(pytorch_mlr_args)):
super(PyTorchMLRAgent, self).__init__(
config,
PyTorchMLRModelBuilder(config)
)
# GRACIOUSLY TAKEN FROM https://github.com/hjmshi/PyTorch-LBFGS
import torch
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
from copy import deepcopy
from torch.optim import Optimizer
#%% Helper Functions for L-BFGS
def is_legal(v):
"""
Checks that tensor is not NaN or Inf.
Inputs:
v (tensor): tensor to be checked
"""
legal = not torch.isnan(v).any() and not torch.isinf(v)
return legal
def polyinterp(points, x_min_bound=None, x_max_bound=None, plot=False):
"""
Gives the minimizer and minimum of the interpolating polynomial over given points
based on function and derivative information. Defaults to bisection if no critical
points are valid.
Based on polyinterp.m Matlab function in minFunc by Mark Schmidt with some slight
modifications.
Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere
Last edited 12/6/18.
Inputs:
points (nparray): two-dimensional array with each point of form [x f g]
x_min_bound (float): minimum value that brackets minimum (default: minimum of points)
x_max_bound (float): maximum value that brackets minimum (default: maximum of points)
plot (bool): plot interpolating polynomial
Outputs:
x_sol (float): minimizer of interpolating polynomial
F_min (float): minimum of interpolating polynomial
Note:
. Set f or g to np.nan if they are unknown
"""
no_points = points.shape[0]
order = np.sum(1 - np.isnan(points[:,1:3]).astype('int')) - 1
x_min = np.min(points[:, 0])
x_max = np.max(points[:, 0])
# compute bounds of interpolation area
if(x_min_bound is None):
x_min_bound = x_min
if(x_max_bound is None):
x_max_bound = x_max
# explicit formula for quadratic interpolation
if no_points == 2 and order == 2 and plot is False:
# Solution to quadratic interpolation is given by:
# a = -(f1 - f2 - g1(x1 - x2))/(x1 - x2)^2
# x_min = x1 - g1/(2a)
# if x1 = 0, then is given by:
# x_min = - (g1*x2^2)/(2(f2 - f1 - g1*x2))
if(points[0, 0] == 0):
x_sol = -points[0, 2]*points[1, 0]**2/(2*(points[1, 1] - points[0, 1] - points[0, 2]*points[1, 0]))
else:
a = -(points[0, 1] - points[1, 1] - points[0, 2]*(points[0, 0] - points[1, 0]))/(points[0, 0] - points[1, 0])**2
x_sol = points[0, 0] - points[0, 2]/(2*a)
x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)
# explicit formula for cubic interpolation
elif no_points == 2 and order == 3 and plot is False:
# Solution to cubic interpolation is given by:
# d1 = g1 + g2 - 3((f1 - f2)/(x1 - x2))
# d2 = sqrt(d1^2 - g1*g2)
# x_min = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2))
d1 = points[0, 2] + points[1, 2] - 3*((points[0, 1] - points[1, 1])/(points[0, 0] - points[1, 0]))
d2 = np.sqrt(d1**2 - points[0, 2]*points[1, 2])
if np.isreal(d2):
x_sol = points[1, 0] - (points[1, 0] - points[0, 0])*((points[1, 2] + d2 - d1)/(points[1, 2] - points[0, 2] + 2*d2))
x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)
else:
x_sol = (x_max_bound + x_min_bound)/2
# solve linear system
else:
# define linear constraints
A = np.zeros((0, order+1))
b = np.zeros((0, 1))
# add linear constraints on function values
for i in range(no_points):
if not np.isnan(points[i, 1]):
constraint = np.zeros((1, order+1))
for j in range(order, -1, -1):
constraint[0, order - j] = points[i, 0]**j
A = np.append(A, constraint, 0)
b = np.append(b, points[i, 1])
# add linear constraints on gradient values
for i in range(no_points):
if not np.isnan(points[i, 2]):
constraint = np.zeros((1, order+1))
for j in range(order):
constraint[0, j] = (order-j)*points[i,0]**(order-j-1)
A = np.append(A, constraint, 0)
b = np.append(b, points[i, 2])
# check if system is solvable
if(A.shape[0] != A.shape[1] or np.linalg.matrix_rank(A) != A.shape[0]):
x_sol = (x_min_bound + x_max_bound)/2
f_min = np.Inf
else:
# solve linear system for interpolating polynomial
coeff = np.linalg.solve(A, b)
# compute critical points
dcoeff = np.zeros(order)
for i in range(len(coeff) - 1):
dcoeff[i] = coeff[i]*(order-i)
crit_pts = np.array([x_min_bound, x_max_bound])
crit_pts = np.append(crit_pts, points[:, 0])
if not np.isinf(dcoeff).any():
roots = np.roots(dcoeff)
crit_pts = np.append(crit_pts, roots)
# test critical points
f_min = np.Inf
x_sol = (x_min_bound + x_max_bound)/2 # defaults to bisection
for crit_pt in crit_pts:
if np.isreal(crit_pt) and crit_pt >= x_min_bound and crit_pt <= x_max_bound:
F_cp = np.polyval(coeff, crit_pt)
if np.isreal(F_cp) and F_cp < f_min:
x_sol = np.real(crit_pt)
f_min = np.real(F_cp)
if(plot):
plt.figure()
x = np.arange(x_min_bound, x_max_bound, (x_max_bound - x_min_bound)/10000)
f = np.polyval(coeff, x)
plt.plot(x, f)
plt.plot(x_sol, f_min, 'x')
return x_sol
#%% L-BFGS Optimizer
class LBFGS(Optimizer):
"""
Implements the L-BFGS algorithm. Compatible with multi-batch and full-overlap
L-BFGS implementations and (stochastic) Powell damping. Partly based on the
original L-BFGS implementation in PyTorch, Mark Schmidt's minFunc MATLAB code,
and Michael Overton's weak Wolfe line search MATLAB code.
Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere
Last edited 12/6/18.
Warnings:
. Does not support per-parameter options and parameter groups.
. All parameters have to be on a single device.
Inputs:
lr (float): steplength or learning rate (default: 1)
history_size (int): update history size (default: 10)
line_search (str): designates line search to use (default: 'Wolfe')
Options:
'None': uses steplength designated in algorithm
'Armijo': uses Armijo backtracking line search
'Wolfe': uses Armijo-Wolfe bracketing line search
dtype: data type (default: torch.float)
debug (bool): debugging mode
References:
[1] Berahas, Albert S., Jorge Nocedal, and Martin Takác. "A Multi-Batch L-BFGS
Method for Machine Learning." Advances in Neural Information Processing
Systems. 2016.
[2] Bollapragada, Raghu, et al. "A Progressive Batching L-BFGS Method for Machine
Learning." International Conference on Machine Learning. 2018.
[3] Lewis, Adrian S., and Michael L. Overton. "Nonsmooth Optimization via Quasi-Newton
Methods." Mathematical Programming 141.1-2 (2013): 135-163.
[4] Liu, Dong C., and Jorge Nocedal. "On the Limited Memory BFGS Method for
Large Scale Optimization." Mathematical Programming 45.1-3 (1989): 503-528.
[5] Nocedal, Jorge. "Updating Quasi-Newton Matrices With Limited Storage."
Mathematics of Computation 35.151 (1980): 773-782.
[6] Nocedal, Jorge, and Stephen J. Wright. "Numerical Optimization." Springer New York,
2006.
[7] Schmidt, Mark. "minFunc: Unconstrained Differentiable Multivariate Optimization
in Matlab." Software available at http://www.cs.ubc.ca/~schmidtm/Software/minFunc.html
(2005).
[8] Schraudolph, Nicol N., Jin Yu, and Simon Günter. "A Stochastic Quasi-Newton
Method for Online Convex Optimization." Artificial Intelligence and Statistics.
2007.
[9] Wang, Xiao, et al. "Stochastic Quasi-Newton Methods for Nonconvex Stochastic
Optimization." SIAM Journal on Optimization 27.2 (2017): 927-956.
"""
def __init__(self, params, lr=1, history_size=10, line_search='Wolfe',
dtype=torch.float, debug=False):
# ensure inputs are valid
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0 <= history_size:
raise ValueError("Invalid history size: {}".format(history_size))
if line_search not in ['Armijo', 'Wolfe', 'None']:
raise ValueError("Invalid line search: {}".format(line_search))
defaults = dict(lr=lr, history_size=history_size, line_search=line_search,
dtype=dtype, debug=debug)
super(LBFGS, self).__init__(params, defaults)
if len(self.param_groups) != 1:
raise ValueError("L-BFGS doesn't support per-parameter options "
"(parameter groups)")
self._params = self.param_groups[0]['params']
self._numel_cache = None
state = self.state['global_state']
state.setdefault('n_iter', 0)
state.setdefault('curv_skips', 0)
state.setdefault('fail_skips', 0)
state.setdefault('H_diag',1)
state.setdefault('fail', True)
state['old_dirs'] = []
state['old_stps'] = []
def _numel(self):
if self._numel_cache is None:
self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)
return self._numel_cache
def _gather_flat_grad(self):
views = []
for p in self._params:
if p.grad is None:
view = p.data.new(p.data.numel()).zero_()
elif p.grad.data.is_sparse:
view = p.grad.data.to_dense().view(-1)
else:
view = p.grad.data.view(-1)
views.append(view)
return torch.cat(views, 0)
def _add_update(self, step_size, update):
offset = 0
for p in self._params:
numel = p.numel()
# view as to avoid deprecated pointwise semantics
p.data.add_(step_size, update[offset:offset + numel].view_as(p.data))
offset += numel
assert offset == self._numel()
def _copy_params(self):
current_params = []
for param in self._params:
current_params.append(deepcopy(param.data))
return current_params
def _load_params(self, current_params):
i = 0
for param in self._params:
param.data[:] = current_params[i]
i += 1
def line_search(self, line_search):
"""
Switches line search option.
Inputs:
line_search (str): designates line search to use
Options:
'None': uses steplength designated in algorithm
'Armijo': uses Armijo backtracking line search
'Wolfe': uses Armijo-Wolfe bracketing line search
"""
group = self.param_groups[0]
group['line_search'] = line_search
return
def two_loop_recursion(self, vec):
"""
Performs two-loop recursion on given vector to obtain Hv.
Inputs:
vec (tensor): 1-D tensor to apply two-loop recursion to
Output:
r (tensor): matrix-vector product Hv
"""
group = self.param_groups[0]
history_size = group['history_size']
state = self.state['global_state']
old_dirs = state.get('old_dirs') # change in gradients
old_stps = state.get('old_stps') # change in iterates
H_diag = state.get('H_diag')
# compute the product of the inverse Hessian approximation and the gradient
num_old = len(old_dirs)
if 'rho' not in state:
state['rho'] = [None] * history_size
state['alpha'] = [None] * history_size
rho = state['rho']
alpha = state['alpha']
for i in range(num_old):
rho[i] = 1. / old_stps[i].dot(old_dirs[i])
q = vec
for i in range(num_old - 1, -1, -1):
alpha[i] = old_dirs[i].dot(q) * rho[i]
q.add_(-alpha[i], old_stps[i])
# multiply by initial Hessian
# r/d is the final direction
r = torch.mul(q, H_diag)
for i in range(num_old):
beta = old_stps[i].dot(r) * rho[i]
r.add_(alpha[i] - beta, old_dirs[i])
return r
def curvature_update(self, flat_grad, eps=1e-2, damping=False):
"""
Performs curvature update.
Inputs:
flat_grad (tensor): 1-D tensor of flattened gradient for computing
gradient difference with previously stored gradient
eps (float): constant for curvature pair rejection or damping (default: 1e-2)
damping (bool): flag for using Powell damping (default: False)
"""
assert len(self.param_groups) == 1
# load parameters
if(eps <= 0):
raise(ValueError('Invalid eps; must be positive.'))
group = self.param_groups[0]
history_size = group['history_size']
debug = group['debug']
# variables cached in state (for tracing)
state = self.state['global_state']
fail = state.get('fail')
# check if line search failed
if not fail:
d = state.get('d')
t = state.get('t')
old_dirs = state.get('old_dirs')
old_stps = state.get('old_stps')
H_diag = state.get('H_diag')
prev_flat_grad = state.get('prev_flat_grad')
Bs = state.get('Bs')
# compute y's
y = flat_grad.sub(prev_flat_grad)
s = d.mul(t)
sBs = s.dot(Bs)
ys = y.dot(s) # y*s
# update L-BFGS matrix
if ys > eps*sBs or damping == True:
# perform Powell damping
if damping == True and ys < eps*sBs:
if debug:
print('Applying Powell damping...')
theta = ((1-eps)*sBs)/(sBs - ys)
y = theta*y + (1-theta)*Bs
# updating memory
if len(old_dirs) == history_size:
# shift history by one (limited-memory)
old_dirs.pop(0)
old_stps.pop(0)
# store new direction/step
old_dirs.append(s)
old_stps.append(y)
# update scale of initial Hessian approximation
H_diag = ys / y.dot(y) # (y*y)
state['old_dirs'] = old_dirs
state['old_stps'] = old_stps
state['H_diag'] = H_diag
else:
# save skip
state['curv_skips'] += 1
if debug:
print('Curvature pair skipped due to failed criterion')
else:
# save skip
state['fail_skips'] += 1
if debug:
print('Line search failed; curvature pair update skipped')
return
def _step(self, p_k, g_Ok, g_Sk=None, options={}):
"""
Performs a single optimization step.
Inputs:
p_k (tensor): 1-D tensor specifying search direction
g_Ok (tensor): 1-D tensor of flattened gradient over overlap O_k used
for gradient differencing in curvature pair update
g_Sk (tensor): 1-D tensor of flattened gradient over full sample S_k
used for curvature pair damping or rejection criterion,
if None, will use g_Ok (default: None)
options (dict): contains options for performing line search
Options for Armijo backtracking line search:
'closure' (callable): reevaluates model and returns function value
'current_loss' (tensor): objective value at current iterate (default: F(x_k))
'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)
'eta' (tensor): factor for decreasing steplength > 0 (default: 2)
'c1' (tensor): sufficient decrease constant in (0, 1) (default: 1e-4)
'max_ls' (int): maximum number of line search steps permitted (default: 10)
'interpolate' (bool): flag for using interpolation (default: True)
'inplace' (bool): flag for inplace operations (default: True)
'ls_debug' (bool): debugging mode for line search
Options for Wolfe line search:
'closure' (callable): reevaluates model and returns function value
'current_loss' (tensor): objective value at current iterate (default: F(x_k))
'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)
'eta' (float): factor for extrapolation (default: 2)
'c1' (float): sufficient decrease constant in (0, 1) (default: 1e-4)
'c2' (float): curvature condition constant in (0, 1) (default: 0.9)
'max_ls' (int): maximum number of line search steps permitted (default: 10)
'interpolate' (bool): flag for using interpolation (default: True)
'inplace' (bool): flag for inplace operations (default: True)
'ls_debug' (bool): debugging mode for line search
Outputs (depends on line search):
. No line search:
t (float): steplength
. Armijo backtracking line search:
F_new (tensor): loss function at new iterate
t (tensor): final steplength
ls_step (int): number of backtracks
closure_eval (int): number of closure evaluations
desc_dir (bool): descent direction flag
True: p_k is descent direction with respect to the line search
function
False: p_k is not a descent direction with respect to the line
search function
fail (bool): failure flag
True: line search reached maximum number of iterations, failed
False: line search succeeded
. Wolfe line search:
F_new (tensor): loss function at new iterate
g_new (tensor): gradient at new iterate
t (float): final steplength
ls_step (int): number of backtracks
closure_eval (int): number of closure evaluations
grad_eval (int): number of gradient evaluations
desc_dir (bool): descent direction flag
True: p_k is descent direction with respect to the line search
function
False: p_k is not a descent direction with respect to the line
search function
fail (bool): failure flag
True: line search reached maximum number of iterations, failed
False: line search succeeded
Notes:
. If encountering line search failure in the deterministic setting, one
should try increasing the maximum number of line search steps max_ls.
"""
assert len(self.param_groups) == 1
# load parameter options
group = self.param_groups[0]
lr = group['lr']
line_search = group['line_search']
dtype = group['dtype']
debug = group['debug']
# variables cached in state (for tracing)
state = self.state['global_state']
d = state.get('d')
t = state.get('t')
prev_flat_grad = state.get('prev_flat_grad')
Bs = state.get('Bs')
# keep track of nb of iterations
state['n_iter'] += 1
# set search direction
d = p_k
# modify previous gradient
if prev_flat_grad is None:
prev_flat_grad = g_Ok.clone()
else:
prev_flat_grad.copy_(g_Ok)
# set initial step size
t = lr
# closure evaluation counter
closure_eval = 0
if g_Sk is None:
g_Sk = g_Ok.clone()
# perform Armijo backtracking line search
if(line_search == 'Armijo'):
# load options
if(options):
if('closure' not in options.keys()):
raise(ValueError('closure option not specified.'))
else:
closure = options['closure']
if('gtd' not in options.keys()):
gtd = g_Ok.dot(d)
else:
gtd = options['gtd']
if('current_loss' not in options.keys()):
F_k = closure()
closure_eval += 1
else:
F_k = options['current_loss']
if('eta' not in options.keys()):
eta = 2
elif(options['eta'] <= 0):
raise(ValueError('Invalid eta; must be positive.'))
else:
eta = options['eta']
if('c1' not in options.keys()):
c1 = 1e-4
elif(options['c1'] >= 1 or options['c1'] <= 0):
raise(ValueError('Invalid c1; must be strictly between 0 and 1.'))
else:
c1 = options['c1']
if('max_ls' not in options.keys()):
max_ls = 10
elif(options['max_ls'] <= 0):
raise(ValueError('Invalid max_ls; must be positive.'))
else:
max_ls = options['max_ls']
if('interpolate' not in options.keys()):
interpolate = True
else:
interpolate = options['interpolate']
if('inplace' not in options.keys()):
inplace = True
else:
inplace = options['inplace']
if('ls_debug' not in options.keys()):
ls_debug = False
else:
ls_debug = options['ls_debug']
else:
raise(ValueError('Options are not specified; need closure evaluating function.'))
# initialize values
if(interpolate):
if(torch.cuda.is_available()):
F_prev = torch.tensor(np.nan, dtype=dtype).cuda()
else:
F_prev = torch.tensor(np.nan, dtype=dtype)
ls_step = 0
t_prev = 0 # old steplength
fail = False # failure flag
# begin print for debug mode
if ls_debug:
print('==================================== Begin Armijo line search ===================================')
print('F(x): %.8e g*d: %.8e' %(F_k, gtd))
# check if search direction is descent direction
if gtd >= 0:
desc_dir = False
if debug:
print('Not a descent direction!')
else:
desc_dir = True
# store values if not in-place
if not inplace:
current_params = self._copy_params()
# update and evaluate at new point
self._add_update(t, d)
F_new = closure()
closure_eval += 1
# print info if debugging
if(ls_debug):
print('LS Step: %d t: %.8e F(x+td): %.8e F-c1*t*g*d: %.8e F(x): %.8e'
%(ls_step, t, F_new, F_k + c1*t*gtd, F_k))
# check Armijo condition
while F_new > F_k + c1*t*gtd or not is_legal(F_new):
# check if maximum number of iterations reached
if(ls_step >= max_ls):
if inplace:
self._add_update(-t, d)
else:
self._load_params(current_params)
t = 0
F_new = closure()
closure_eval += 1
fail = True
break
else:
# store current steplength
t_new = t
# compute new steplength
# if first step or not interpolating, then multiply by factor
if(ls_step == 0 or not interpolate or not is_legal(F_new)):
t = t/eta
# if second step, use function value at new point along with
# gradient and function at current iterate
elif(ls_step == 1 or not is_legal(F_prev)):
t = polyinterp(np.array([[0, F_k.item(), gtd.item()], [t_new, F_new.item(), np.nan]]))
# otherwise, use function values at new point, previous point,
# and gradient and function at current iterate
else:
t = polyinterp(np.array([[0, F_k.item(), gtd.item()], [t_new, F_new.item(), np.nan],
[t_prev, F_prev.item(), np.nan]]))
# if values are too extreme, adjust t
if(interpolate):
if(t < 1e-3*t_new):
t = 1e-3*t_new
elif(t > 0.6*t_new):
t = 0.6*t_new
# store old point
F_prev = F_new
t_prev = t_new
# update iterate and reevaluate
if inplace:
self._add_update(t-t_new, d)
else:
self._load_params(current_params)
self._add_update(t, d)
F_new = closure()
closure_eval += 1
ls_step += 1 # iterate
# print info if debugging
if(ls_debug):
print('LS Step: %d t: %.8e F(x+td): %.8e F-c1*t*g*d: %.8e F(x): %.8e'
%(ls_step, t, F_new, F_k + c1*t*gtd, F_k))
# store Bs
if Bs is None:
Bs = (g_Sk.mul(-t)).clone()
else:
Bs.copy_(g_Sk.mul(-t))
# print final steplength
if ls_debug:
print('Final Steplength:', t)
print('===================================== End Armijo line search ====================================')
state['d'] = d
state['prev_flat_grad'] = prev_flat_grad
state['t'] = t
state['Bs'] = Bs
state['fail'] = fail
return F_new, t, ls_step, closure_eval, desc_dir, fail
# perform weak Wolfe line search
elif(line_search == 'Wolfe'):
# load options
if(options):
if('closure' not in options.keys()):
raise(ValueError('closure option not specified.'))
else:
closure = options['closure']
if('current_loss' not in options.keys()):
F_k = closure()
closure_eval += 1
else:
F_k = options['current_loss']
if('gtd' not in options.keys()):
gtd = g_Ok.dot(d)
else:
gtd = options['gtd']
if('eta' not in options.keys()):
eta = 2
elif(options['eta'] <= 1):
raise(ValueError('Invalid eta; must be greater than 1.'))
else:
eta = options['eta']
if('c1' not in options.keys()):
c1 = 1e-4
elif(options['c1'] >= 1 or options['c1'] <= 0):
raise(ValueError('Invalid c1; must be strictly between 0 and 1.'))
else:
c1 = options['c1']
if('c2' not in options.keys()):
c2 = 0.9
elif(options['c2'] >= 1 or options['c2'] <= 0):
raise(ValueError('Invalid c2; must be strictly between 0 and 1.'))
elif(options['c2'] <= c1):
raise(ValueError('Invalid c2; must be strictly larger than c1.'))
else:
c2 = options['c2']
if('max_ls' not in options.keys()):
max_ls = 10
elif(options['max_ls'] <= 0):
raise(ValueError('Invalid max_ls; must be positive.'))
else:
max_ls = options['max_ls']
if('interpolate' not in options.keys()):
interpolate = True
else:
interpolate = options['interpolate']
if('inplace' not in options.keys()):
inplace = True
else:
inplace = options['inplace']
if('ls_debug' not in options.keys()):
ls_debug = False
else:
ls_debug = options['ls_debug']
else:
raise(ValueError('Options are not specified; need closure evaluating function.'))
# initialize counters
ls_step = 0
grad_eval = 0 # tracks gradient evaluations
t_prev = 0 # old steplength
# initialize bracketing variables and flag
alpha = 0
beta = float('Inf')
fail = False
# initialize values for line search
if(interpolate):
F_a = F_k
g_a = gtd
if(torch.cuda.is_available()):
F_b = torch.tensor(np.nan, dtype=dtype).cuda()
g_b = torch.tensor(np.nan, dtype=dtype).cuda()
else:
F_b = torch.tensor(np.nan, dtype=dtype)
g_b = torch.tensor(np.nan, dtype=dtype)
# begin print for debug mode
if ls_debug:
print('==================================== Begin Wolfe line search ====================================')
print('F(x): %.8e g*d: %.8e' %(F_k, gtd))
# check if search direction is descent direction
if gtd >= 0:
desc_dir = False
if debug:
print('Not a descent direction!')
else:
desc_dir = True
# store values if not in-place
if not inplace:
current_params = self._copy_params()
# update and evaluate at new point
self._add_update(t, d)
F_new = closure()
closure_eval += 1
# main loop
while True:
# check if maximum number of line search steps have been reached
if(ls_step >= max_ls):
if inplace:
self._add_update(-t, d)
else:
self._load_params(current_params)
t = 0
F_new = closure()
F_new.backward()
g_new = self._gather_flat_grad()
closure_eval += 1
grad_eval += 1
fail = True
break
# print info if debugging
if(ls_debug):
print('LS Step: %d t: %.8e alpha: %.8e beta: %.8e'
%(ls_step, t, alpha, beta))
print('Armijo: F(x+td): %.8e F-c1*t*g*d: %.8e F(x): %.8e'
%(F_new, F_k + c1*t*gtd, F_k))
# check Armijo condition
if(F_new > F_k + c1*t*gtd):
# set upper bound
beta = t
t_prev = t
# update interpolation quantities
if(interpolate):
F_b = F_new
if(torch.cuda.is_available()):
g_b = torch.tensor(np.nan, dtype=dtype).cuda()
else:
g_b = torch.tensor(np.nan, dtype=dtype)
else:
# compute gradient
F_new.backward()
g_new = self._gather_flat_grad()
grad_eval += 1
gtd_new = g_new.dot(d)
# print info if debugging
if(ls_debug):
print('Wolfe: g(x+td)*d: %.8e c2*g*d: %.8e gtd: %.8e'
%(gtd_new, c2*gtd, gtd))
# check curvature condition
if(gtd_new < c2*gtd):
# set lower bound
alpha = t
t_prev = t
# update interpolation quantities
if(interpolate):
F_a = F_new
g_a = gtd_new
else:
break
# compute new steplength
# if first step or not interpolating, then bisect or multiply by factor
if(not interpolate or not is_legal(F_b)):
if(beta == float('Inf')):
t = eta*t
else:
t = (alpha + beta)/2.0
# otherwise interpolate between a and b
else:
t = polyinterp(np.array([[alpha, F_a.item(), g_a.item()],[beta, F_b.item(), g_b.item()]]))
# if values are too extreme, adjust t
if(beta == float('Inf')):
if(t > 2*eta*t_prev):
t = 2*eta*t_prev
elif(t < eta*t_prev):
t = eta*t_prev
else:
if(t < alpha + 0.2*(beta - alpha)):
t = alpha + 0.2*(beta - alpha)
elif(t > (beta - alpha)/2.0):
t = (beta - alpha)/2.0
# if we obtain nonsensical value from interpolation
if(t <= 0):
t = (beta - alpha)/2.0
# update parameters
if inplace:
self._add_update(t - t_prev, d)
else:
self._load_params(current_params)
self._add_update(t, d)
# evaluate closure
F_new = closure()
closure_eval += 1
ls_step += 1
# store Bs
if Bs is None:
Bs = (g_Sk.mul(-t)).clone()
else:
Bs.copy_(g_Sk.mul(-t))
# print final steplength
if ls_debug:
print('Final Steplength:', t)
print('===================================== End Wolfe line search =====================================')
state['d'] = d
state['prev_flat_grad'] = prev_flat_grad
state['t'] = t
state['Bs'] = Bs
state['fail'] = fail
return F_new, g_new, t, ls_step, closure_eval, grad_eval, desc_dir, fail
else:
# perform update
self._add_update(t, d)
# store Bs
if Bs is None:
Bs = (g_Sk.mul(-t)).clone()
else:
Bs.copy_(g_Sk.mul(-t))
state['d'] = d
state['prev_flat_grad'] = prev_flat_grad
state['t'] = t
state['Bs'] = Bs
state['fail'] = False
return t
def step(self, p_k, g_Ok, g_Sk=None, options={}):
return self._step(p_k, g_Ok, g_Sk, options)
#%% Full-Batch (Deterministic) L-BFGS Optimizer (Wrapper)
class FullBatchLBFGS(LBFGS):
"""
Implements full-batch or deterministic L-BFGS algorithm. Compatible with
Powell damping. Can be used when evaluating a deterministic function and
gradient. Wraps the LBFGS optimizer. Performs the two-loop recursion,
updating, and curvature updating in a single step.
Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere
Last edited 11/15/18.
Warnings:
. Does not support per-parameter options and parameter groups.
. All parameters have to be on a single device.
Inputs:
lr (float): steplength or learning rate (default: 1)
history_size (int): update history size (default: 10)
line_search (str): designates line search to use (default: 'Wolfe')
Options:
'None': uses steplength designated in algorithm
'Armijo': uses Armijo backtracking line search
'Wolfe': uses Armijo-Wolfe bracketing line search
dtype: data type (default: torch.float)
debug (bool): debugging mode
"""
def __init__(self, params, lr=1, history_size=10, line_search='Wolfe',
dtype=torch.float, debug=False):
super(FullBatchLBFGS, self).__init__(params, lr, history_size, line_search,
dtype, debug)
def step(self, options={}):
"""
Performs a single optimization step.
Inputs:
options (dict): contains options for performing line search
General Options:
'eps' (float): constant for curvature pair rejection or damping (default: 1e-2)
'damping' (bool): flag for using Powell damping (default: False)
Options for Armijo backtracking line search:
'closure' (callable): reevaluates model and returns function value
'current_loss' (tensor): objective value at current iterate (default: F(x_k))
'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)
'eta' (tensor): factor for decreasing steplength > 0 (default: 2)
'c1' (tensor): sufficient decrease constant in (0, 1) (default: 1e-4)
'max_ls' (int): maximum number of line search steps permitted (default: 10)
'interpolate' (bool): flag for using interpolation (default: True)
'inplace' (bool): flag for inplace operations (default: True)
'ls_debug' (bool): debugging mode for line search
Options for Wolfe line search:
'closure' (callable): reevaluates model and returns function value
'current_loss' (tensor): objective value at current iterate (default: F(x_k))
'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)
'eta' (float): factor for extrapolation (default: 2)
'c1' (float): sufficient decrease constant in (0, 1) (default: 1e-4)
'c2' (float): curvature condition constant in (0, 1) (default: 0.9)
'max_ls' (int): maximum number of line search steps permitted (default: 10)
'interpolate' (bool): flag for using interpolation (default: True)
'inplace' (bool): flag for inplace operations (default: True)
'ls_debug' (bool): debugging mode for line search
Outputs (depends on line search):
. No line search:
t (float): steplength
. Armijo backtracking line search:
F_new (tensor): loss function at new iterate
t (tensor): final steplength
ls_step (int): number of backtracks
closure_eval (int): number of closure evaluations
desc_dir (bool): descent direction flag
True: p_k is descent direction with respect to the line search
function
False: p_k is not a descent direction with respect to the line
search function
fail (bool): failure flag
True: line search reached maximum number of iterations, failed
False: line search succeeded
. Wolfe line search:
F_new (tensor): loss function at new iterate
g_new (tensor): gradient at new iterate
t (float): final steplength
ls_step (int): number of backtracks
closure_eval (int): number of closure evaluations
grad_eval (int): number of gradient evaluations
desc_dir (bool): descent direction flag
True: p_k is descent direction with respect to the line search
function
False: p_k is not a descent direction with respect to the line
search function
fail (bool): failure flag
True: line search reached maximum number of iterations, failed
False: line search succeeded
Notes:
. If encountering line search failure in the deterministic setting, one
should try increasing the maximum number of line search steps max_ls.
"""
# load options for damping and eps
if('damping' not in options.keys()):
damping = False
else:
damping = options['damping']
if('eps' not in options.keys()):
eps = 1e-2
else:
eps = options['eps']
# gather gradient
grad = self._gather_flat_grad()
# update curvature if after 1st iteration
state = self.state['global_state']
if(state['n_iter'] > 0):
self.curvature_update(grad, eps, damping)
# compute search direction
p = self.two_loop_recursion(-grad)
# take step
return self._step(p, grad, options=options)
| 52,651 | 38.030393 | 128 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/bayesian_poly.py
|
import numpy as np
import pystan
from scipy.special import expit
from recogym import Configuration
from recogym.agents import (
AbstractFeatureProvider,
Model,
ModelBasedAgent,
ViewsFeaturesProvider
)
from recogym.agents.organic_count import to_categorical
bayesian_poly_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'poly_degree': 2,
'max_iter': 5000,
'aa': 1.,
'bb': 1.,
'with_ps_all': False,
}
class BayesianModelBuilder(AbstractFeatureProvider):
def __init__(self, config):
super(BayesianModelBuilder, self).__init__(config)
def build(self):
class BayesianFeaturesProvider(ViewsFeaturesProvider):
"""
"""
def __init__(self, config):
super(BayesianFeaturesProvider, self).__init__(config)
def features(self, observation):
base_features = super().features(observation)
return base_features.reshape(1, self.config.num_products)
class BayesianRegressionModel(Model):
"""
"""
def __init__(self, config, Lambda):
super(BayesianRegressionModel, self).__init__(config)
self.Lambda = Lambda
def act(self, observation, features):
X = features
P = X.shape[1]
A = np.eye(P)
XA = np.kron(X, A)
action_proba = expit(np.matmul(XA, self.Lambda.T)).mean(1)
action = np.argmax(action_proba)
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
features, actions, deltas, pss = self.train_data()
X = features
N = X.shape[0]
P = X.shape[1]
A = to_categorical(actions, P)
XA = np.array([np.kron(X[n, :], A[n, :]) for n in range(N)])
y = deltas # clicks
Sigma = np.kron(self.config.aa * np.eye(P) + self.config.bb,
self.config.aa * np.eye(P) + self.config.bb)
fit = pystan.stan('recogym/agents/model.stan', data = {
'N': features.shape[0],
'P': features.shape[1],
'XA': XA,
'y': deltas,
'Sigma': Sigma
}, chains = 1)
s = fit.extract()
Lambda = s['lambda']
return (
BayesianFeaturesProvider(self.config), # Poly is a bad name ..
BayesianRegressionModel(self.config, Lambda)
)
class BayesianAgent(ModelBasedAgent):
"""
Bayesian Agent.
Note: the agent utilises Stan to train a model.
"""
def __init__(self, config = Configuration(bayesian_poly_args)):
super(BayesianAgent, self).__init__(
config,
BayesianModelBuilder(config)
)
| 3,197 | 27.810811 | 75 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/bandit_count.py
|
import numpy as np
from ..envs.configuration import Configuration
from .abstract import Agent
bandit_count_args = {
'num_products': 10,
'with_ps_all': False,
}
class BanditCount(Agent):
"""
Bandit Count
The Agent that selects an Action for the most frequently clicked Action before.
"""
def __init__(self, config = Configuration(bandit_count_args)):
super(BanditCount, self).__init__(config)
self.pulls_a = np.zeros((self.config.num_products, self.config.num_products))
self.clicks_a = np.zeros((self.config.num_products, self.config.num_products))
self.last_product_viewed = None
self.ctr = (self.clicks_a + 1) / (self.pulls_a + 2)
def act(self, observation, reward, done):
"""Make a recommendation"""
self.update_lpv(observation)
action = self.ctr[self.last_product_viewed, :].argmax()
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, reward, done),
**{
'a': self.ctr[self.last_product_viewed, :].argmax(),
'ps': self.ctr[self.last_product_viewed, :][action],
'ps-a': ps_all,
},
}
def train(self, observation, action, reward, done = False):
"""Train the model in an online fashion"""
if action is not None and reward is not None:
ix = self.last_product_viewed
jx = action['a']
self.update_lpv(observation)
self.pulls_a[ix, jx] += 1
self.clicks_a[ix, jx] += reward
self.ctr[ix, jx] = (
(self.clicks_a[ix, jx] + 1) / (self.pulls_a[ix, jx] + 2)
)
def update_lpv(self, observation):
"""Updates the last product viewed based on the observation"""
if observation.sessions():
self.last_product_viewed = observation.sessions()[-1]['v']
def save(self, location):
"""Save the state of the model to disk"""
np.save(location + "pulls_a.npy", self.pulls_a)
np.save(location + "clicks_a.npy", self.clicks_a)
def load(self, location):
"""Load the model state from disk"""
self.pulls_a = np.load(location + "pulls_a.npy")
self.clicks_a = np.load(location + "clicks_a.npy")
def reset(self):
pass
| 2,473 | 28.807229 | 86 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/organic_mf.py
|
import numpy as np
import torch
from ..envs.configuration import Configuration
from .abstract import Agent
# Default Arguments ----------------------------------------------------------
organic_mf_square_args = {
'num_products': 10,
'embed_dim': 5,
'mini_batch_size': 32,
'loss_function': torch.nn.CrossEntropyLoss(),
'optim_function': torch.optim.RMSprop,
'learning_rate': 0.01,
'with_ps_all': False,
}
# Model ----------------------------------------------------------------------
class OrganicMFSquare(torch.nn.Module, Agent):
"""
Organic Matrix Factorisation (Square)
The Agent that selects an Action from the model that performs
Organic Events matrix factorisation.
"""
def __init__(self, config = Configuration(organic_mf_square_args)):
torch.nn.Module.__init__(self)
Agent.__init__(self, config)
self.product_embedding = torch.nn.Embedding(
self.config.num_products, self.config.embed_dim
)
self.output_layer = torch.nn.Linear(
self.config.embed_dim, self.config.num_products
)
# Initializing optimizer type.
self.optimizer = self.config.optim_function(
self.parameters(), lr = self.config.learning_rate
)
self.last_product_viewed = None
self.curr_step = 0
self.train_data = []
self.action = None
def forward(self, product):
product = torch.Tensor([product])
a = self.product_embedding(product.long())
b = self.output_layer(a)
return b
def act(self, observation, reward, done):
with torch.no_grad():
if observation is not None and len(observation.current_sessions) > 0:
logits = self.forward(observation.current_sessions[-1]['v'])
# No exploration strategy, choose maximum logit.
self.action = logits.argmax().item()
if self.config.with_ps_all:
all_ps = np.zeros(self.config.num_products)
all_ps[self.action] = 1.0
else:
all_ps = ()
return {
**super().act(observation, reward, done),
**{
'a': self.action,
'ps': 1.0,
'ps-a': all_ps,
}
}
def update_weights(self):
"""Update weights of embedding matrices using mini batch of data"""
# Eliminate previous gradient.
self.optimizer.zero_grad()
for prods in self.train_data:
# Calculating logit of action and last product viewed.
# Loop over the number of products.
for i in range(len(prods) - 1):
logit = self.forward(prods[i]['v'])
# Converting label into Tensor.
label = torch.LongTensor([prods[i + 1]['v']])
# Calculating supervised loss.
loss = self.config.loss_function(logit, label)
loss.backward()
# Update weight parameters.
self.optimizer.step()
def train(self, observation, action, reward, done = False):
"""Method to deal with the """
# Increment step.
self.curr_step += 1
# Update weights of model once mini batch of data accumulated.
if self.curr_step % self.config.mini_batch_size == 0:
self.update_weights()
self.train_data = []
else:
if observation is not None:
data = observation.current_sessions
self.train_data.append(data)
| 3,630 | 30.034188 | 81 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/epsilon_greedy.py
|
from numpy.random.mtrand import RandomState
import numpy as np
from .abstract import Agent
epsilon_greedy_args = {
'epsilon': 0.01,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select an Action that is ABSOLUTELY different to the Action
# that would have been selected in case when Epsilon-Greedy Policy Selection
# had not been applied.
'epsilon_pure_new': True,
# Try to select the worse case in epsilon-case.
'epsilon_select_worse': False,
'with_ps_all': False,
}
class EpsilonGreedy(Agent):
def __init__(self, config, agent):
super(EpsilonGreedy, self).__init__(config)
self.agent = agent
self.rng = RandomState(self.config.random_seed)
def train(self, observation, action, reward, done = False):
self.agent.train(observation, action, reward, done)
def act(self, observation, reward, done):
greedy_action = self.agent.act(observation, reward, done)
if self.rng.choice([True, False], p = [self.config.epsilon, 1.0 - self.config.epsilon]):
if self.config.epsilon_select_worse:
product_probas = greedy_action['ps-a']
product_probas = (1.0 - product_probas) # Inversion of probabilities.
else:
product_probas = np.ones(self.config.num_products)
if self.config.epsilon_pure_new:
product_probas[greedy_action['a']] = 0.0
product_probas = product_probas / np.sum(product_probas)
epsilon_action = self.rng.choice(
self.config.num_products,
p = product_probas
)
return {
**super().act(observation, reward, done),
**{
'a': epsilon_action,
'ps': self.config.epsilon * product_probas[epsilon_action],
'ps-a': (
self.config.epsilon * product_probas
if self.config.with_ps_all else
()
),
'greedy': False,
'h0': greedy_action['a']
}
}
else:
return {
**greedy_action,
'greedy': True,
'ps': (1.0 - self.config.epsilon) * greedy_action['ps'],
'ps-a': (
(1.0 - self.config.epsilon) * greedy_action['ps-a']
if self.config.with_ps_all else
()
),
}
def reset(self):
self.agent.reset()
| 2,607 | 33.773333 | 96 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/nn_ips.py
|
import numpy as np
import torch
import torch.optim as optim
from numpy.random.mtrand import RandomState
from torch import nn
from .abstract import AbstractFeatureProvider, ViewsFeaturesProvider, Model, ModelBasedAgent
from ..envs.configuration import Configuration
nn_ips_args = {
'num_products': 10,
'number_of_flips': 1,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select a Product randomly with the the probability predicted by Neural Network with IPS.
'select_randomly': False,
'M': 111,
'learning_rate': 0.01,
'num_epochs': 100,
'num_hidden': 20,
'lambda_val': 0.01,
'with_ps_all': False,
}
class IpsLoss(nn.Module):
"""
IPS Loss Function
"""
def __init__(self, config):
super(IpsLoss, self).__init__()
self.config = config
self.clipping = nn.Threshold(self.config.M, self.config.M)
def forward(self, hx, h0, deltas):
u = self.clipping(hx / h0) * deltas
return torch.mean(u) + self.config.lambda_val * torch.sqrt(torch.var(u) / deltas.shape[0])
class NeuralNet(nn.Module):
"""
Neural Network Model
This class implements a Neural Net model by using PyTorch.
"""
def __init__(self, config):
super(NeuralNet, self).__init__()
self.config = config
self.model = nn.Sequential(
nn.Linear(self.config.num_products, self.config.num_hidden),
nn.Sigmoid(),
nn.Linear(self.config.num_hidden, self.config.num_hidden),
nn.Sigmoid(),
nn.Linear(self.config.num_hidden, self.config.num_products),
nn.Softmax(dim = 1)
)
def forward(self, features):
return self.model.forward(features)
class NnIpsModelBuilder(AbstractFeatureProvider):
"""
Neural Net Inverse Propensity Score Model Builder
"""
def __init__(self, config):
super(NnIpsModelBuilder, self).__init__(config)
def build(self):
model = NeuralNet(self.config)
criterion = IpsLoss(self.config)
optimizer = optim.SGD(model.parameters(), lr = self.config.learning_rate)
features, actions, deltas, pss = self.train_data()
deltas = deltas[:, np.newaxis] * np.ones((1, self.config.num_products))
pss = pss[:, np.newaxis] * np.ones((1, self.config.num_products))
for epoch in range(self.config.num_epochs):
optimizer.zero_grad()
loss = criterion(
model(torch.Tensor(features)),
torch.Tensor(pss),
torch.Tensor(-1.0 * deltas)
)
loss.backward()
optimizer.step()
class TorchFeatureProvider(ViewsFeaturesProvider):
def __init__(self, config):
super(TorchFeatureProvider, self).__init__(config, is_sparse=True)
def features(self, observation):
base_features = super().features(observation).reshape(1, self.config.num_products)
return torch.Tensor(base_features)
class TorchModel(Model):
def __init__(self, config, model):
super(TorchModel, self).__init__(config)
self.model = model
if self.config.select_randomly:
self.rng = RandomState(self.config.random_seed)
def act(self, observation, features):
prob = self.model.forward(features)[0, :]
if self.config.select_randomly:
prob = prob.detach().numpy()
action = self.rng.choice(
np.array(self.config.num_products),
p = prob
)
if self.config.with_ps_all:
ps_all = prob
else:
ps_all = ()
else:
action = torch.argmax(prob).item()
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': prob[action].item(),
'ps-a': ps_all,
},
}
return (
TorchFeatureProvider(self.config),
TorchModel(self.config, model)
)
class NnIpsAgent(ModelBasedAgent):
"""
Neural Network Agent with IPS
"""
def __init__(self, config = Configuration(nn_ips_args)):
super(NnIpsAgent, self).__init__(
config,
NnIpsModelBuilder(config)
)
| 4,794 | 29.935484 | 98 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/organic_user_count.py
|
import numpy as np
from numpy.random.mtrand import RandomState
from .abstract import AbstractFeatureProvider, ViewsFeaturesProvider, Model, ModelBasedAgent
from ..envs.configuration import Configuration
organic_user_count_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select a Product randomly with the highest probability for the most frequently viewed product.
'select_randomly': True,
'epsilon': .0,
'exploit_explore': True,
'reverse_pop': False,
# Weight History Function: how treat each event back in time.
'weight_history_function': None,
'with_ps_all': False,
# reverse popularity.
'reverse_pop': False,
# Epsilon to add to user state - if none-zero, this ensures the policy has support over all products
'epsilon': 0.
}
class OrganicUserEventCounterModelBuilder(AbstractFeatureProvider):
def __init__(self, config):
super(OrganicUserEventCounterModelBuilder, self).__init__(config)
def build(self):
class OrganicUserEventCounterModel(Model):
"""
Organic Event Count Model (per a User).
"""
def __init__(self, config):
super(OrganicUserEventCounterModel, self).__init__(config)
self.rng = RandomState(self.config.random_seed)
def act(self, observation, features):
features = features.flatten()
if self.config.exploit_explore:
is_explore_case = self.rng.choice(
[True, False],
p=[self.config.epsilon, 1 - self.config.epsilon]
)
if is_explore_case:
mask = features == 0
features[mask] = 1
features[~mask] = 0
action_proba = features / np.sum(features)
else:
features = self.config.epsilon + features
action_proba = features / np.sum(features)
if self.config.reverse_pop:
action_proba = 1 - action_proba
action_proba = action_proba / np.sum(action_proba)
if self.config.select_randomly:
action = self.rng.choice(self.config.num_products, p=action_proba)
if self.config.exploit_explore:
ps = (
(
self.config.epsilon
if is_explore_case else
1 - self.config.epsilon
) * action_proba[action]
)
else:
ps = action_proba[action]
if self.config.with_ps_all:
ps_all = action_proba
else:
ps_all = ()
else:
action = np.argmax(action_proba)
ps = 1.0
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': ps,
'ps-a': ps_all,
},
}
return (
ViewsFeaturesProvider(self.config, is_sparse=False),
OrganicUserEventCounterModel(self.config)
)
class OrganicUserEventCounterAgent(ModelBasedAgent):
"""
Organic Event Counter Agent
The Agent that counts Organic views of Products (per a User)
and selects an Action for the most frequently shown Product.
"""
def __init__(self, config=Configuration(organic_user_count_args)):
super(OrganicUserEventCounterAgent, self).__init__(
config,
OrganicUserEventCounterModelBuilder(config)
)
| 4,161 | 34.87931 | 104 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/organic_count.py
|
import numpy as np
from .abstract import Agent
from ..envs.configuration import Configuration
organic_count_args = {
'num_products': 10,
'with_ps_all': False,
}
# From Keras.
def to_categorical(y, num_classes = None, dtype = 'float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
"""
y = np.array(y, dtype = 'int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype = dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
class OrganicCount(Agent):
"""
Organic Count
The Agent that selects an Action based on the most frequently viewed Product.
"""
def __init__(self, config = Configuration(organic_count_args)):
super(OrganicCount, self).__init__(config)
self.co_counts = np.zeros((self.config.num_products, self.config.num_products))
self.corr = None
def act(self, observation, reward, done):
"""Make a recommendation"""
self.update_lpv(observation)
action = self.co_counts[self.last_product_viewed, :].argmax()
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, reward, done),
**{
'a': self.co_counts[self.last_product_viewed, :].argmax(),
'ps': 1.0,
'ps-a': ps_all,
},
}
def train(self, observation, action, reward, done = False):
"""Train the model in an online fashion"""
if observation.sessions():
A = to_categorical(
[session['v'] for session in observation.sessions()],
self.config.num_products
)
B = A.sum(0).reshape((self.config.num_products, 1))
self.co_counts = self.co_counts + np.matmul(B, B.T)
def update_lpv(self, observation):
"""updates the last product viewed based on the observation"""
if observation.sessions():
self.last_product_viewed = observation.sessions()[-1]['v']
| 2,884 | 31.784091 | 87 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/random_agent.py
|
# Default Arguments.
import numpy as np
from numpy.random.mtrand import RandomState
from .abstract import Agent
from ..envs.configuration import Configuration
random_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'with_ps_all': False,
}
class RandomAgent(Agent):
"""The world's simplest agent!"""
def __init__(self, config = Configuration(random_args)):
super(RandomAgent, self).__init__(config)
self.rng = RandomState(config.random_seed)
def act(self, observation, reward, done):
return {
**super().act(observation, reward, done),
**{
'a': self.rng.choice(self.config.num_products),
'ps': 1.0 / float(self.config.num_products),
'ps-a': (
np.ones(self.config.num_products) / self.config.num_products
if self.config.with_ps_all else
()
),
},
}
| 994 | 27.428571 | 80 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/bayesian_poly_vb.py
|
import numpy as np
from scipy.special import expit
from ..envs.configuration import Configuration
from . import (
AbstractFeatureProvider,
Model,
ModelBasedAgent,
ViewsFeaturesProvider
)
from .organic_count import to_categorical
bayesian_poly_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'poly_degree': 2,
'max_iter': 5000,
'aa': 1.,
'bb': 1.,
'with_ps_all': False,
}
from scipy import rand
from numpy.linalg import inv
# Algorithm 6
# http://www.maths.usyd.edu.au/u/jormerod/JTOpapers/Ormerod10.pdf
def JJ(zeta):
return 1. / (2. * zeta) * (1. / (1 + np.exp(-zeta)) - 0.5)
# TODO replace explicit inv with linear solves
def bayesian_logistic(Psi, y, mu_beta, Sigma_beta, iter = 200):
zeta = rand(Psi.shape[0])
for _ in range(iter):
q_Sigma = inv(inv(Sigma_beta) + 2 * np.matmul(np.matmul(Psi.T, np.diag(JJ(zeta))), Psi))
q_mu = np.matmul(q_Sigma, (np.matmul(Psi.T, y - 0.5) + np.matmul(inv(Sigma_beta), mu_beta)))
zeta = np.sqrt(np.diag(np.matmul(np.matmul(Psi, q_Sigma + np.matmul(q_mu, q_mu.T)), Psi.T)))
return q_mu, q_Sigma
from scipy.stats import multivariate_normal
class BayesianModelBuilderVB(AbstractFeatureProvider):
def __init__(self, config):
super(BayesianModelBuilderVB, self).__init__(config)
def build(self):
class BayesianFeaturesProviderVB(ViewsFeaturesProvider):
"""
"""
def __init__(self, config):
super(BayesianFeaturesProviderVB, self).__init__(config)
def features(self, observation):
base_features = super().features(observation)
return base_features.reshape(1, self.config.num_products)
class BayesianRegressionModelVB(Model):
"""
"""
def __init__(self, config, Lambda):
super(BayesianRegressionModelVB, self).__init__(config)
self.Lambda = Lambda
def act(self, observation, features):
X = features
P = X.shape[1]
A = np.eye(P)
XA = np.kron(X, A)
action_proba = expit(np.matmul(XA, self.Lambda.T)).mean(1)
action = np.argmax(action_proba)
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
features, actions, deltas, pss = self.train_data()
X = features
N = X.shape[0]
P = X.shape[1]
A = to_categorical(actions, P)
XA = np.array([np.kron(X[n, :], A[n, :]) for n in range(N)])
y = deltas # clicks
Sigma = np.kron(self.config.aa * np.eye(P) + self.config.bb,
self.config.aa * np.eye(P) + self.config.bb)
q_mu, q_Sigma = bayesian_logistic(XA, y.reshape((N, 1)),
mu_beta = -6 * np.ones((P ** 2, 1)), Sigma_beta = Sigma)
Lambda = multivariate_normal.rvs(q_mu.reshape(P ** 2), q_Sigma, 1000)
# stan version of the above (seems to agree well)
# fit = pystan.stan('model.stan', data = {'N': features.shape[0], 'P': features.shape[1], 'XA': XA, 'y': y, 'Sigma': Sigma}, chains = 1)
# s = fit.extract()
# Lambda = s['lambda']
###
return (
BayesianFeaturesProviderVB(self.config), # Poly is a bad name ..
BayesianRegressionModelVB(self.config, Lambda)
)
class BayesianAgentVB(ModelBasedAgent):
"""
Bayesian Agent.
Note: the agent utilises VB to train a model.
"""
def __init__(self, config = Configuration(bayesian_poly_args)):
print('ffq')
super(BayesianAgentVB, self).__init__(
config,
BayesianModelBuilderVB(config)
)
| 4,193 | 30.066667 | 144 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/bandit_mf.py
|
import torch
import numpy as np
from torch import nn, optim, Tensor
from ..envs.configuration import Configuration
from .abstract import Agent
# Default Arguments.
bandit_mf_square_args = {
'num_products': 10,
'embed_dim': 5,
'mini_batch_size': 32,
'loss_function': nn.BCEWithLogitsLoss(),
'optim_function': optim.RMSprop,
'learning_rate': 0.01,
'with_ps_all': False,
}
# Model.
class BanditMFSquare(nn.Module, Agent):
def __init__(self, config = Configuration(bandit_mf_square_args)):
nn.Module.__init__(self)
Agent.__init__(self, config)
self.product_embedding = nn.Embedding(
self.config.num_products, self.config.embed_dim
)
self.user_embedding = nn.Embedding(
self.config.num_products, self.config.embed_dim
)
# Initializing optimizer type.
self.optimizer = self.config.optim_function(
self.parameters(), lr = self.config.learning_rate
)
self.last_product_viewed = None
self.curr_step = 0
self.train_data = ([], [], [])
self.all_products = np.arange(self.config.num_products)
def forward(self, products, users = None):
if users is None:
users = np.full(products.shape[0], self.last_product_viewed)
a = self.product_embedding(torch.LongTensor(products))
b = self.user_embedding(torch.LongTensor(users))
return torch.sum(a * b, dim = 1)
def get_logits(self):
"""Returns vector of product recommendation logits"""
return self.forward(self.all_products)
def update_lpv(self, observation):
"""Updates the last product viewed based on the observation"""
assert (observation is not None)
assert (observation.sessions() is not None)
if observation.sessions():
self.last_product_viewed = observation.sessions()[-1]['v']
def act(self, observation, reward, done):
with torch.no_grad():
# Update last product viewed.
self.update_lpv(observation)
# Get logits for all possible actions.
logits = self.get_logits()
# No exploration strategy, choose maximum logit.
action = logits.argmax().item()
if self.config.with_ps_all:
all_ps = np.zeros(self.config.num_products)
all_ps[action] = 1.0
else:
all_ps = ()
return {
**super().act(observation, reward, done),
**{
'a': action,
'ps': logits[action],
'ps-a': all_ps,
},
}
def update_weights(self):
"""Update weights of embedding matrices using mini batch of data"""
if len(self.train_data[0]) != 0:
# Eliminate previous gradient.
self.optimizer.zero_grad()
assert len(self.train_data[0]) == len(self.train_data[1])
assert len(self.train_data[0]) == len(self.train_data[2])
lpvs, actions, rewards = self.train_data
# Calculating logit of action and last product viewed.
logit = self.forward(np.array(actions), np.array(lpvs))
# Converting reward into Tensor.
reward = Tensor(np.array(rewards))
# Calculating supervised loss.
loss = self.config.loss_function(logit, reward)
loss.backward()
# Update weight parameters.
self.optimizer.step()
def train(self, observation, action, reward, done = False):
# Update last product viewed.
self.update_lpv(observation)
# Increment step.
self.curr_step += 1
# Update weights of model once mini batch of data accumulated.
if self.curr_step % self.config.mini_batch_size == 0:
self.update_weights()
self.train_data = ([], [], [])
else:
if action is not None and reward is not None:
self.train_data[0].append(self.last_product_viewed)
self.train_data[1].append(action['a'])
self.train_data[2].append(reward)
| 4,211 | 32.165354 | 75 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/abstract.py
|
import math
import numpy as np
import pandas as pd
import scipy.sparse as sparse
from tqdm import tqdm
class Agent:
"""
This is an abstract Agent class.
The class defines an interface with methods those should be overwritten for a new Agent.
"""
def __init__(self, config):
self.config = config
def act(self, observation, reward, done):
"""An act method takes in an observation, which could either be
`None` or an Organic_Session (see recogym/session.py) and returns
a integer between 0 and num_products indicating which product the
agent recommends"""
return {
't': observation.context().time(),
'u': observation.context().user(),
}
def train(self, observation, action, reward, done=False):
"""Use this function to update your model based on observation, action,
reward tuples"""
pass
def reset(self):
pass
class ModelBuilder:
"""
Model Builder
The class that collects data obtained during a set of sessions
(the data are collected via set of calls `train' method)
and when it is decided that there is enough data to create a Model,
the Model Builder generates BOTH the Feature Provider and the Model.
Next time when the Model is to be used,
the Feature Provider generates a Feature Set
that is suitable for the Model.
"""
def __init__(self, config):
self.config = config
self.data = None
self.reset()
def train(self, observation, action, reward, done):
"""
Train a Model
The method should be called every time when a new training data should be added.
These data are used to train a Model.
:param observation: Organic Sessions
:param action: an Agent action for the Observation
:param reward: reward (click/no click)
:param done:
:return: nothing
"""
assert (observation is not None)
assert (observation.sessions() is not None)
for session in observation.sessions():
self.data['t'].append(session['t'])
self.data['u'].append(session['u'])
self.data['z'].append('organic')
self.data['v'].append(session['v'])
self.data['a'].append(None)
self.data['c'].append(None)
self.data['ps'].append(None)
if action:
self.data['t'].append(action['t'])
self.data['u'].append(action['u'])
self.data['z'].append('bandit')
self.data['v'].append(None)
self.data['a'].append(action['a'])
self.data['c'].append(reward)
self.data['ps'].append(action['ps'])
def build(self):
"""
Build a Model
The function generates a tuple: (FeatureProvider, Model)
"""
raise NotImplemented
def reset(self):
"""
Reset Data
The method clears all previously collected training data.
"""
self.data = {
't': [],
'u': [],
'z': [],
'v': [],
'a': [],
'c': [],
'ps': [],
}
class Model:
"""
Model
"""
def __init__(self, config):
self.config = config
def act(self, observation, features):
return {
't': observation.context().time(),
'u': observation.context().user(),
}
def reset(self):
pass
class FeatureProvider:
"""
Feature Provider
The interface defines a set of methods used to:
* collect data from which should be generated a Feature Set
* generate a Feature Set suitable for a particular Model from previously collected data
"""
def __init__(self, config):
self.config = config
def observe(self, observation):
"""
Collect Observations
The data are collected for a certain user
:param observation:
:return:
"""
raise NotImplemented
def features(self, observation):
"""
Generate a Feature Set
:return: a Feature Set suitable for a certain Model
"""
raise NotImplemented
def reset(self):
"""
Reset
Clear all previously collected data.
:return: nothing
"""
raise NotImplemented
class AbstractFeatureProvider(ModelBuilder):
"""
Abstract Feature Provider
The Feature Provider that contains the common logic in
creation of a Feature Set that consists of:
* Views (count of Organic Events: Products views)
* Actions (Actions provided by an Agent)
* Propensity Scores: probability of selecting an Action by an Agent
* Delta (rewards: 1 -- there was a Click; 0 -- there was no)
"""
def __init__(self, config, is_sparse=False):
super(AbstractFeatureProvider, self).__init__(config)
self.is_sparse = is_sparse
def train_data(self):
data = pd.DataFrame().from_dict(self.data)
features = []
actions = []
pss = []
deltas = []
with_history = hasattr(self.config, 'weight_history_function')
for user_id in tqdm(data['u'].unique(), desc='Train Data'):
ix = 0
ixs = []
jxs = []
if with_history:
history = []
assert data[data['u'] == user_id].shape[0] <= np.iinfo(np.int16).max
for _, user_datum in data[data['u'] == user_id].iterrows():
assert (not math.isnan(user_datum['t']))
if user_datum['z'] == 'organic':
assert (math.isnan(user_datum['a']))
assert (math.isnan(user_datum['c']))
assert (not math.isnan(user_datum['v']))
view = np.int16(user_datum['v'])
if with_history:
ixs.append(np.int16(ix))
jxs.append(view)
history.append(np.int16(user_datum['t']))
ix += 1
else:
jxs.append(view)
else:
assert (user_datum['z'] == 'bandit')
assert (not math.isnan(user_datum['a']))
assert (not math.isnan(user_datum['c']))
assert (math.isnan(user_datum['v']))
action = np.int16(user_datum['a'])
delta = np.int16(user_datum['c'])
ps = user_datum['ps']
time = np.int16(user_datum['t'])
if with_history:
assert len(ixs) == len(jxs)
views = sparse.coo_matrix(
(np.ones(len(ixs), dtype=np.int16), (ixs, jxs)),
shape=(len(ixs), self.config.num_products),
dtype=np.int16
)
weights = self.config.weight_history_function(
time - np.array(history)
)
weighted_views = views.multiply(weights[:, np.newaxis])
features.append(
sparse.coo_matrix(
weighted_views.sum(axis=0, dtype=np.float32),
copy=False
)
)
else:
views = sparse.coo_matrix(
(
np.ones(len(jxs), dtype=np.int16),
(np.zeros(len(jxs)), jxs)
),
shape=(1, self.config.num_products),
dtype=np.int16
)
features.append(views)
actions.append(action)
deltas.append(delta)
pss.append(ps)
out_features = sparse.vstack(features, format='csr')
return (
(
out_features
if self.is_sparse else
np.array(out_features.todense(), dtype=np.float)
),
np.array(actions, dtype=np.int16),
np.array(deltas),
np.array(pss)
)
class ModelBasedAgent(Agent):
"""
Model Based Agent
This is a common implementation of the Agent that uses a certain Model when it acts.
The Agent implements all routines needed to interact with the Model, namely:
* training
* acting
"""
def __init__(self, config, model_builder):
super(ModelBasedAgent, self).__init__(config)
self.model_builder = model_builder
self.feature_provider = None
self.model = None
def train(self, observation, action, reward, done=False):
self.model_builder.train(observation, action, reward, done)
def act(self, observation, reward, done):
if self.model is None:
assert (self.feature_provider is None)
self.feature_provider, self.model = self.model_builder.build()
self.feature_provider.observe(observation)
return {
**super().act(observation, reward, done),
**self.model.act(observation, self.feature_provider.features(observation)),
}
def reset(self):
if self.model is not None:
assert (self.feature_provider is not None)
self.feature_provider.reset()
self.model.reset()
class ViewsFeaturesProvider(FeatureProvider):
"""
Views Feature Provider
This class provides Views of Products i.e. for all Products viewed by a users so far,
the class returns a vector where at the index that corresponds to the Product you shall find
amount of Views of that product.
E.G.:
Amount of Products is 5.
Then, the class returns the vector [0, 3, 7, 0, 2].
That means that Products with IDs were viewed the following amount of times:
* 0 --> 0
* 1 --> 3
* 2 --> 7
* 3 --> 0
* 4 --> 2
"""
def __init__(self, config, is_sparse=False):
super(ViewsFeaturesProvider, self).__init__(config)
self.is_sparse = is_sparse
self.with_history = (
hasattr(self.config, 'weight_history_function')
and
self.config.weight_history_function is not None
)
self.reset()
def observe(self, observation):
assert (observation is not None)
assert (observation.sessions() is not None)
for session in observation.sessions():
view = np.int16(session['v'])
if self.with_history:
self.ixs.append(np.int16(self.ix))
self.jxs.append(view)
self.history.append(np.int16(session['t']))
self.ix += 1
else:
self.views[0, view] += 1
def features(self, observation):
if self.with_history:
time = np.int16(observation.context().time())
weights = self.config.weight_history_function(
time - np.array(self.history)
)
weighted_views = self._views().multiply(weights[:, np.newaxis])
views = sparse.coo_matrix(
weighted_views.sum(axis=0, dtype=np.float32),
copy=False
)
if self.is_sparse:
return views
else:
return np.array(views.todense())
else:
return self._views()
def reset(self):
if self.with_history:
self.ix = 0
self.ixs = []
self.jxs = []
self.history = []
else:
if self.is_sparse:
self.views = sparse.lil_matrix(
(1, self.config.num_products),
dtype=np.int16
)
else:
self.views = np.zeros(
(1, self.config.num_products),
dtype=np.int16
)
def _views(self):
if self.with_history:
assert len(self.ixs) == len(self.jxs)
return sparse.coo_matrix(
(
np.ones(len(self.ixs), dtype=np.int16),
(self.ixs, self.jxs)
),
shape=(len(self.ixs), self.config.num_products),
dtype=np.int16
)
else:
return self.views
| 12,686 | 29.943902 | 96 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/__init__.py
|
from .abstract import (
Agent,
FeatureProvider,
AbstractFeatureProvider,
ViewsFeaturesProvider,
Model,
ModelBasedAgent
)
from .bayesian_poly_vb import BayesianAgentVB
from .bandit_mf import BanditMFSquare, bandit_mf_square_args
from .bandit_count import BanditCount, bandit_count_args
from .random_agent import RandomAgent, random_args
from .organic_count import OrganicCount, organic_count_args
from .organic_mf import OrganicMFSquare, organic_mf_square_args
from .logreg_ips import LogregMulticlassIpsAgent, logreg_multiclass_ips_args
from .nn_ips import NnIpsAgent, nn_ips_args
from .epsilon_greedy import EpsilonGreedy, epsilon_greedy_args
from .logreg_poly import LogregPolyAgent, logreg_poly_args
from .organic_user_count import OrganicUserEventCounterAgent, organic_user_count_args
from .pytorch_mlr import PyTorchMLRAgent, pytorch_mlr_args
| 878 | 31.555556 | 85 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/logreg_poly.py
|
import numpy as np
from numba import njit
from scipy import sparse
from sklearn.linear_model import LogisticRegression
from ..envs.configuration import Configuration
from .abstract import (AbstractFeatureProvider, Model, ModelBasedAgent, ViewsFeaturesProvider)
logreg_poly_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'poly_degree': 2,
'with_ips': False,
# Should deltas (rewards) be used to calculate weights?
# If delta should not be used as a IPS numerator, than `1.0' is used.
'ips_numerator_is_delta': False,
# Should clipping be used to calculate Inverse Propensity Score?
'ips_with_clipping': False,
# Clipping value that limits the value of Inverse Propensity Score.
'ips_clipping_value': 10,
'solver': 'lbfgs',
'max_iter': 5000,
'with_ps_all': False,
}
@njit(nogil = True, cache = True)
def _fast_kron_cols(data, indptr, indices, actions, num_products):
if indptr.shape[0] - 1 == actions.shape[0]:
col_size = data.shape[0]
kron_data = data
with_sliding_ix = True
else:
assert indptr.shape[0] - 1 == 1
col_size = data.shape[0] * actions.shape[0]
kron_data = np.kron(data, np.ones(num_products, dtype = np.int16))
with_sliding_ix = False
kron_rows = np.zeros(col_size)
kron_cols = np.zeros(col_size)
for ix, action in enumerate(actions):
if with_sliding_ix:
six = indptr[ix]
skix = six
eix = indptr[ix + 1]
ekix = eix
else:
six = indptr[0]
eix = indptr[1]
delta = eix - six
skix = ix * delta + six
ekix = ix * delta + eix
cols = indices[six:eix]
kron_rows[skix:ekix] = ix
kron_cols[skix:ekix] += cols + action * num_products
return kron_data, kron_rows, kron_cols
class SparsePolynomialFeatures:
def __init__(self, config: Configuration):
self.config = config
def transform(
self,
features: sparse.csr_matrix,
actions: np.ndarray
) -> sparse.csr_matrix:
kron_data, kron_rows, kron_cols = _fast_kron_cols(
features.data,
features.indptr,
features.indices,
actions,
self.config.num_products
)
assert kron_data.shape[0] == kron_rows.shape[0]
assert kron_data.shape[0] == kron_cols.shape[0]
kron = sparse.coo_matrix(
(
kron_data,
(kron_rows, kron_cols)
),
(actions.shape[0], self.config.num_products * self.config.num_products)
)
if features.shape[0] != actions.shape[0]:
features_data = np.tile(features.data, actions.shape[0])
features_cols = np.tile(features.indices, actions.shape[0])
feature_rows = np.repeat(np.arange(actions.shape[0]), features.nnz)
features = sparse.coo_matrix(
(
features_data,
(feature_rows, features_cols)
),
(actions.shape[0], self.config.num_products),
dtype = np.int16
)
actions = sparse.coo_matrix(
(
actions,
(range(actions.shape[0]), actions)
),
(actions.shape[0], self.config.num_products),
dtype = np.int16
)
assert features.shape[0] == actions.shape[0]
assert features.shape[0] == kron.shape[0]
assert features.shape[1] == self.config.num_products
assert actions.shape[1] == self.config.num_products
return sparse.hstack(
(features, actions, kron)
)
class LogregPolyModelBuilder(AbstractFeatureProvider):
def __init__(self, config, is_sparse=False):
super(LogregPolyModelBuilder, self).__init__(config, is_sparse=is_sparse)
def build(self):
class LogisticRegressionPolyFeaturesProvider(ViewsFeaturesProvider):
"""
Logistic Regression Polynomial Feature Provider.
"""
def __init__(self, config, poly):
super(LogisticRegressionPolyFeaturesProvider, self).__init__(config, is_sparse=True)
self.poly = poly
self.all_actions = np.arange(self.config.num_products)
def features(self, observation):
return self.poly.transform(
super().features(observation).tocsr(),
self.all_actions
)
class LogisticRegressionModel(Model):
"""
Logistic Regression Model
"""
def __init__(self, config, logreg):
super(LogisticRegressionModel, self).__init__(config)
self.logreg = logreg
def act(self, observation, features):
action_proba = self.logreg.predict_proba(features)[:, 1]
action = np.argmax(action_proba)
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
features, actions, deltas, pss = self.train_data()
logreg = LogisticRegression(
solver = self.config.solver,
max_iter = self.config.max_iter,
random_state = self.config.random_seed,
n_jobs = -1
)
poly = SparsePolynomialFeatures(self.config)
features_poly = poly.transform(features, actions)
if self.config.with_ips:
ips_numerator = deltas if self.config.ips_numerator_is_delta else 1.0
weights = ips_numerator / pss
if self.config.ips_with_clipping:
weights = np.minimum(deltas / pss, self.config.ips_clipping_value)
lr = logreg.fit(features_poly, deltas, weights)
else:
lr = logreg.fit(features_poly, deltas)
print('Model was built!')
return (
LogisticRegressionPolyFeaturesProvider(self.config, poly),
LogisticRegressionModel(self.config, lr)
)
class LogregPolyAgent(ModelBasedAgent):
"""
Logistic Regression Polynomial Agent
"""
def __init__(self, config = Configuration(logreg_poly_args)):
super(LogregPolyAgent, self).__init__(
config,
LogregPolyModelBuilder(config, is_sparse=True)
)
| 6,809 | 31.898551 | 100 |
py
|
reco-gym
|
reco-gym-master/recogym/agents/logreg_ips.py
|
from numpy.random.mtrand import RandomState
from sklearn.linear_model import LogisticRegression
from .abstract import *
from ..envs.configuration import Configuration
logreg_multiclass_ips_args = {
'num_products': 10,
'number_of_flips': 1,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select a Product randomly with the the probability predicted by Multi-Class Logistic Regression.
'select_randomly': False,
'poly_degree': 2,
'solver': 'lbfgs',
'max_iter': 5000,
'with_ps_all': False,
}
class LogregMulticlassIpsModelBuilder(AbstractFeatureProvider):
"""
Logistic Regression Multiclass Model Builder
The class that provides both:
* Logistic Regression Model
* Feature Provider that builds a Feature Set suitable for the Logistic Regression Model
"""
def __init__(self, config):
super(LogregMulticlassIpsModelBuilder, self).__init__(config)
if config.select_randomly:
self.rng = RandomState(self.config.random_seed)
def build(self):
class LogregMulticlassViewsFeaturesProvider(ViewsFeaturesProvider):
"""
Logistic Regression Multiclass Feature Provider
"""
def __init__(self, config):
super(LogregMulticlassViewsFeaturesProvider, self).__init__(config, is_sparse=True)
def features(self, observation):
base_features = super().features(observation)
return base_features.reshape(1, self.config.num_products)
class LogregMulticlassModel(Model):
"""
Logistic Regression Multiclass Model
"""
def __init__(self, config, logreg):
super(LogregMulticlassModel, self).__init__(config)
self.logreg = logreg
if config.select_randomly:
self.rng = RandomState(self.config.random_seed)
def act(self, observation, features):
if self.config.select_randomly:
action_proba = self.logreg.predict_proba(features)[0, :]
action = self.rng.choice(
self.config.num_products,
p=action_proba
)
ps = action_proba[action]
if self.config.with_ps_all:
all_ps = action_proba
else:
all_ps = ()
else:
action = self.logreg.predict(features).item()
ps = 1.0
if self.config.with_ps_all:
all_ps = np.zeros(self.config.num_products)
all_ps[action] = 1.0
else:
all_ps = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': ps,
'ps-a': all_ps,
},
}
features, actions, deltas, pss = self.train_data()
weights = deltas / pss
logreg = LogisticRegression(
solver=self.config.solver,
max_iter=self.config.max_iter,
multi_class='multinomial',
random_state=self.config.random_seed
)
lr = logreg.fit(features, actions, weights)
return (
LogregMulticlassViewsFeaturesProvider(self.config),
LogregMulticlassModel(self.config, lr)
)
class LogregMulticlassIpsAgent(ModelBasedAgent):
"""
Logistic Regression Multiclass Agent (IPS version)
"""
def __init__(self, config=Configuration(logreg_multiclass_ips_args)):
super(LogregMulticlassIpsAgent, self).__init__(
config,
LogregMulticlassIpsModelBuilder(config)
)
| 3,897 | 32.316239 | 102 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/main.py
|
import numpy as np, os, time, random
from envs_repo.constructor import EnvConstructor
from models.constructor import ModelConstructor
from core.params import Parameters
import argparse, torch
from algos.erl_trainer import ERL_Trainer
parser = argparse.ArgumentParser()
####################### COMMANDLINE - ARGUMENTS ######################
parser.add_argument('--env', type=str, help='Env Name', default='Pendulum-v0')
parser.add_argument('--seed', type=int, help='Seed', default=991)
parser.add_argument('--savetag', type=str, help='#Tag to append to savefile', default='')
parser.add_argument('--gpu_id', type=int, help='#GPU ID ', default=0)
parser.add_argument('--total_steps', type=float, help='#Total steps in the env in millions ', default=2)
parser.add_argument('--buffer', type=float, help='Buffer size in million', default=1.0)
parser.add_argument('--frameskip', type=int, help='Frameskip', default=1)
parser.add_argument('--hidden_size', type=int, help='#Hidden Layer size', default=256)
parser.add_argument('--critic_lr', type=float, help='Critic learning rate?', default=3e-4)
parser.add_argument('--actor_lr', type=float, help='Actor learning rate?', default=1e-4)
parser.add_argument('--tau', type=float, help='Tau', default=1e-3)
parser.add_argument('--gamma', type=float, help='Discount Rate', default=0.99)
parser.add_argument('--alpha', type=float, help='Alpha for Entropy term ', default=0.1)
parser.add_argument('--batchsize', type=int, help='Seed', default=512)
parser.add_argument('--reward_scale', type=float, help='Reward Scaling Multiplier', default=1.0)
parser.add_argument('--learning_start', type=int, help='Frames to wait before learning starts', default=5000)
#ALGO SPECIFIC ARGS
parser.add_argument('--popsize', type=int, help='#Policies in the population', default=10)
parser.add_argument('--rollsize', type=int, help='#Policies in rollout size', default=5)
parser.add_argument('--gradperstep', type=float, help='#Gradient step per env step', default=1.0)
parser.add_argument('--num_test', type=int, help='#Test envs to average on', default=5)
#Figure out GPU to use [Default is 0]
os.environ['CUDA_VISIBLE_DEVICES']=str(vars(parser.parse_args())['gpu_id'])
####################### Construct ARGS Class to hold all parameters ######################
args = Parameters(parser)
#Set seeds
torch.manual_seed(args.seed); np.random.seed(args.seed); random.seed(args.seed)
################################## Find and Set MDP (environment constructor) ########################
env_constructor = EnvConstructor(args.env_name, args.frameskip)
####################### Actor, Critic and ValueFunction Model Constructor ######################
model_constructor = ModelConstructor(env_constructor.state_dim, env_constructor.action_dim, args.hidden_size)
ai = ERL_Trainer(args, model_constructor, env_constructor)
ai.train(args.total_steps)
| 2,890 | 50.625 | 110 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/core/utils.py
|
from torch import nn
from torch.autograd import Variable
import random, pickle, copy, argparse
import numpy as np, torch, os
from torch import distributions
import torch.nn.functional as F
class Tracker(): #Tracker
def __init__(self, save_folder, vars_string, project_string):
self.vars_string = vars_string; self.project_string = project_string
self.foldername = save_folder
self.all_tracker = [[[],0.0,[]] for _ in vars_string] #[Id of var tracked][fitnesses, avg_fitness, csv_fitnesses]
self.counter = 0
self.conv_size = 1
if not os.path.exists(self.foldername):
os.makedirs(self.foldername)
def update(self, updates, generation):
"""Add a metric observed
Parameters:
updates (list): List of new scoresfor each tracked metric
generation (int): Current gen
Returns:
None
"""
self.counter += 1
for update, var in zip(updates, self.all_tracker):
if update == None: continue
var[0].append(update)
#Constrain size of convolution
for var in self.all_tracker:
if len(var[0]) > self.conv_size: var[0].pop(0)
#Update new average
for var in self.all_tracker:
if len(var[0]) == 0: continue
var[1] = sum(var[0])/float(len(var[0]))
if self.counter % 1 == 0: # Save to csv file
for i, var in enumerate(self.all_tracker):
if len(var[0]) == 0: continue
var[2].append(np.array([generation, var[1]]))
filename = self.foldername + self.vars_string[i] + self.project_string
np.savetxt(filename, np.array(var[2]), fmt='%.3f', delimiter=',')
def weights_init_(m, lin_gain=1.0, bias_gain=0.1):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=lin_gain)
torch.nn.init.constant_(m.bias, bias_gain)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def hard_update(target, source):
"""Hard update (clone) from target network to source
Parameters:
target (object): A pytorch model
source (object): A pytorch model
Returns:
None
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def soft_update(target, source, tau):
"""Soft update from target network to source
Parameters:
target (object): A pytorch model
source (object): A pytorch model
tau (float): Tau parameter
Returns:
None
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def to_numpy(var):
"""Tensor --> numpy
Parameters:
var (tensor): tensor
Returns:
var (ndarray): ndarray
"""
return var.data.numpy()
def to_tensor(ndarray, volatile=False, requires_grad=False):
"""numpy --> Variable
Parameters:
ndarray (ndarray): ndarray
volatile (bool): create a volatile tensor?
requires_grad (bool): tensor requires gradients?
Returns:
var (variable): variable
"""
if isinstance(ndarray, list): ndarray = np.array(ndarray)
return Variable(torch.from_numpy(ndarray).float(), volatile=volatile, requires_grad=requires_grad)
def pickle_obj(filename, object):
"""Pickle object
Parameters:
filename (str): folder to dump pickled object
object (object): object to pickle
Returns:
None
"""
handle = open(filename, "wb")
pickle.dump(object, handle)
def unpickle_obj(filename):
"""Unpickle object from disk
Parameters:
filename (str): file from which to load and unpickle object
Returns:
obj (object): unpickled object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
def init_weights(m):
"""Initialize weights using kaiming uniform initialization in place
Parameters:
m (nn.module): Linear module from torch.nn
Returns:
None
"""
if type(m) == nn.Linear:
nn.init.kaiming_uniform_(m.weight)
m.bias.data.fill_(0.01)
def list_mean(l):
"""compute avergae from a list
Parameters:
l (list): list
Returns:
mean (float): mean
"""
if len(l) == 0: return None
else: return sum(l)/len(l)
def pprint(l):
"""Pretty print
Parameters:
l (list/float/None): object to print
Returns:
pretty print str
"""
if isinstance(l, list):
if len(l) == 0: return None
else:
if l == None: return None
else: return '%.2f'%l
def flatten(d):
"""Recursive method to flatten a dict -->list
Parameters:
d (dict): dict
Returns:
l (list)
"""
res = [] # Result list
if isinstance(d, dict):
for key, val in sorted(d.items()):
res.extend(flatten(val))
elif isinstance(d, list):
res = d
else:
res = [d]
return res
def reverse_flatten(d, l):
"""Recursive method to unflatten a list -->dict [Reverse of flatten] in place
Parameters:
d (dict): dict
l (list): l
Returns:
None
"""
if isinstance(d, dict):
for key, _ in sorted(d.items()):
#FLoat is immutable so
if isinstance(d[key], float):
d[key] = l[0]
l[:] = l[1:]
continue
reverse_flatten(d[key], l)
elif isinstance(d, list):
d[:] = l[0:len(d)]
l[:] = l[len(d):]
def load_all_models_dir(dir, model_template):
"""Load all models from a given directory onto a template
Parameters:
dir (str): directory
model_template (object): Class template to load the objects onto
Returns:
models (list): list of loaded objects
"""
list_files = os.listdir(dir)
print(list_files)
models = []
for i, fname in enumerate(list_files):
try:
model_template.load_state_dict(torch.load(dir + fname))
model_template.eval()
models.append(copy.deepcopy(model_template))
except:
print(fname, 'failed to load')
return models
| 6,665 | 23.780669 | 121 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/core/buffer.py
|
import numpy as np
import random
import torch
from torch.multiprocessing import Manager
class Buffer():
"""Cyclic Buffer stores experience tuples from the rollouts
Parameters:
capacity (int): Maximum number of experiences to hold in cyclic buffer
"""
def __init__(self, capacity, buffer_gpu=False):
self.capacity = capacity; self.buffer_gpu = buffer_gpu; self.counter = 0
self.manager = Manager()
self.s = []; self.ns = []; self.a = []; self.r = []; self.done = []
def add(self, trajectory):
# Add ALL EXPERIENCE COLLECTED TO MEMORY concurrently
for exp in trajectory:
self.s.append(torch.Tensor(exp[0]))
self.ns.append(torch.Tensor(exp[1]))
self.a.append(torch.Tensor(exp[2]))
self.r.append(torch.Tensor(exp[3]))
self.done.append(torch.Tensor(exp[4]))
#Trim to make the buffer size < capacity
while self.__len__() > self.capacity:
self.s.pop(0); self.ns.pop(0); self.a.pop(0); self.r.pop(0); self.done.pop(0)
def __len__(self):
return len(self.s)
def sample(self, batch_size):
"""Sample a batch of experiences from memory with uniform probability
Parameters:
batch_size (int): Size of the batch to sample
Returns:
Experience (tuple): A tuple of (state, next_state, action, shaped_reward, done) each as a numpy array with shape (batch_size, :)
"""
ind = random.sample(range(len(self.s)), batch_size)
return torch.cat([self.s[i] for i in ind]),\
torch.cat([self.ns[i] for i in ind]),\
torch.cat([self.a[i] for i in ind]),\
torch.cat([self.r[i] for i in ind]),\
torch.cat([self.done[i] for i in ind])
| 1,624 | 27.017241 | 135 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/core/runner.py
|
from core import utils as utils
import numpy as np
import torch
# Rollout evaluate an agent in a complete game
@torch.no_grad()
def rollout_worker(id, type, task_pipe, result_pipe, store_data, model_bucket, env_constructor):
env = env_constructor.make_env()
np.random.seed(id) ###make sure the random seeds across learners are different
###LOOP###
while True:
identifier = task_pipe.recv() # Wait until a signal is received to start rollout
if identifier == 'TERMINATE': exit(0) #Exit
# Get the requisite network
net = model_bucket[identifier]
fitness = 0.0
total_frame = 0
state = env.reset()
rollout_trajectory = []
state = utils.to_tensor(state)
while True: # unless done
if type == 'pg': action = net.noisy_action(state)
else: action = net.clean_action(state)
action = utils.to_numpy(action)
next_state, reward, done, info = env.step(action.flatten()) # Simulate one step in environment
next_state = utils.to_tensor(next_state)
fitness += reward
# If storing transitions
if store_data: #Skip for test set
rollout_trajectory.append([utils.to_numpy(state), utils.to_numpy(next_state),
np.float32(action), np.reshape(np.float32(np.array([reward])), (1, 1)),
np.reshape(np.float32(np.array([float(done)])), (1, 1))])
state = next_state
total_frame += 1
# DONE FLAG IS Received
if done:
break
# Send back id, fitness, total length and shaped fitness using the result pipe
result_pipe.send([identifier, fitness, total_frame, rollout_trajectory])
| 1,834 | 32.981481 | 111 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/core/__init__.py
| 0 | 0 | 0 |
py
|
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/core/params.py
|
import os
from torch.utils.tensorboard import SummaryWriter
class Parameters:
def __init__(self, parser):
"""Parameter class stores all parameters for policy gradient
Parameters:
None
Returns:
None
"""
#Env args
self.env_name = vars(parser.parse_args())['env']
self.frameskip = vars(parser.parse_args())['frameskip']
self.total_steps = int(vars(parser.parse_args())['total_steps'] * 1000000)
self.gradperstep = vars(parser.parse_args())['gradperstep']
self.savetag = vars(parser.parse_args())['savetag']
self.seed = vars(parser.parse_args())['seed']
self.batch_size = vars(parser.parse_args())['batchsize']
self.rollout_size = vars(parser.parse_args())['rollsize']
self.hidden_size = vars(parser.parse_args())['hidden_size']
self.critic_lr = vars(parser.parse_args())['critic_lr']
self.actor_lr = vars(parser.parse_args())['actor_lr']
self.tau = vars(parser.parse_args())['tau']
self.gamma = vars(parser.parse_args())['gamma']
self.reward_scaling = vars(parser.parse_args())['reward_scale']
self.buffer_size = int(vars(parser.parse_args())['buffer'] * 1000000)
self.learning_start = vars(parser.parse_args())['learning_start']
self.pop_size = vars(parser.parse_args())['popsize']
self.num_test = vars(parser.parse_args())['num_test']
self.test_frequency = 1
self.asynch_frac = 1.0 # Aynchronosity of NeuroEvolution
#Non-Args Params
self.elite_fraction = 0.2
self.crossover_prob = 0.15
self.mutation_prob = 0.90
self.extinction_prob = 0.005 # Probability of extinction event
self.extinction_magnituide = 0.5 # Probabilty of extinction for each genome, given an extinction event
self.weight_magnitude_limit = 10000000
self.mut_distribution = 1 # 1-Gaussian, 2-Laplace, 3-Uniform
self.alpha = vars(parser.parse_args())['alpha']
self.target_update_interval = 1
self.alpha_lr = 1e-3
#Save Results
self.savefolder = 'Results/Plots/'
if not os.path.exists(self.savefolder): os.makedirs(self.savefolder)
self.aux_folder = 'Results/Auxiliary/'
if not os.path.exists(self.aux_folder): os.makedirs(self.aux_folder)
self.savetag += str(self.env_name)
self.savetag += '_seed' + str(self.seed)
self.savetag += '_roll' + str(self.rollout_size)
self.savetag += '_pop' + str(self.pop_size)
self.savetag += '_alpha' + str(self.alpha)
self.writer = SummaryWriter(log_dir='Results/tensorboard/' + self.savetag)
| 2,717 | 36.75 | 111 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/models/discrete_models.py
|
import torch, random
import torch.nn as nn
from torch.distributions import Normal, RelaxedOneHotCategorical, Categorical
import torch.nn.functional as F
class CategoricalPolicy(nn.Module):
"""Critic model
Parameters:
args (object): Parameter class
"""
def __init__(self, state_dim, action_dim, hidden_size):
super(CategoricalPolicy, self).__init__()
self.action_dim = action_dim
######################## Q1 Head ##################
# Construct Hidden Layer 1 with state
self.f1 = nn.Linear(state_dim, hidden_size)
#self.q1ln1 = nn.LayerNorm(l1)
#Hidden Layer 2
self.f2 = nn.Linear(hidden_size, hidden_size)
#self.q1ln2 = nn.LayerNorm(l2)
#Value
self.val = nn.Linear(hidden_size, 1)
#Advantages
self.adv = nn.Linear(hidden_size, action_dim)
def clean_action(self, obs, return_only_action=True):
"""Method to forward propagate through the critic's graph
Parameters:
input (tensor): states
input (tensor): actions
Returns:
Q1 (tensor): Qval 1
Q2 (tensor): Qval 2
V (tensor): Value
"""
###### Feature ####
info = torch.relu(self.f1(obs))
info = torch.relu(self.f2(info))
val = self.val(info)
adv = self.adv(info)
logits = val + adv - adv.mean()
if return_only_action:
return logits.argmax(1)
return None, None, logits
def noisy_action(self, obs, return_only_action=True):
_, _, logits = self.clean_action(obs, return_only_action=False)
dist = Categorical(logits=logits)
action = dist.sample()
action = action
if return_only_action:
return action
return action, None, logits
class GumbelPolicy(nn.Module):
"""Critic model
Parameters:
args (object): Parameter class
"""
def __init__(self, state_dim, action_dim, hidden_size, epsilon_start, epsilon_end, epsilon_decay_frames):
super(GumbelPolicy, self).__init__()
self.action_dim = action_dim
######################## Q1 Head ##################
# Construct Hidden Layer 1 with state
self.f1 = nn.Linear(state_dim, hidden_size)
#self.q1ln1 = nn.LayerNorm(l1)
#Hidden Layer 2
self.f2 = nn.Linear(hidden_size, hidden_size)
#self.q1ln2 = nn.LayerNorm(l2)
#Value
self.val = nn.Linear(hidden_size, 1)
#Advantages
self.adv = nn.Linear(hidden_size, action_dim)
#Temperature
self.log_temp = torch.nn.Linear(hidden_size, 1)
self.LOG_TEMP_MAX = 2
self.LOG_TEMP_MIN = -10
def clean_action(self, obs, return_only_action=True):
"""Method to forward propagate through the critic's graph
Parameters:
input (tensor): states
input (tensor): actions
Returns:
Q1 (tensor): Qval 1
Q2 (tensor): Qval 2
V (tensor): Value
"""
###### Feature ####
info = torch.relu(self.f1(obs))
info = torch.relu(self.f2(info))
val = self.val(info)
adv = self.adv(info)
logits = val + adv - adv.mean()
if return_only_action:
return logits.argmax(1)
else:
log_temp = self.log_temp(info)
log_temp = torch.clamp(log_temp, min=self.LOG_TEMP_MIN, max=self.LOG_TEMP_MAX)
return logits.argmax(1), log_temp, logits
def noisy_action(self, obs, return_only_action=True):
_, log_temp, logits = self.clean_action(obs, return_only_action=False)
temp = log_temp.exp()
dist = RelaxedOneHotCategorical(temperature=temp, probs=F.softmax(logits, dim=1))
action = dist.rsample()
if return_only_action:
return action.argmax(1)
log_prob = dist.log_prob(action)
log_prob = torch.diagonal(log_prob, offset=0).unsqueeze(1)
return action.argmax(1), log_prob, logits
| 4,229 | 24.178571 | 109 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/models/constructor.py
|
import torch
class ModelConstructor:
def __init__(self, state_dim, action_dim, hidden_size, actor_seed=None, critic_seed=None):
"""
A general Environment Constructor
"""
self.state_dim = state_dim
self.action_dim = action_dim
self.hidden_size = hidden_size
self.actor_seed = actor_seed
self.critic_seed = critic_seed
def make_model(self, type, seed=False):
"""
Generate and return an model object
"""
if type == 'Gaussian_FF':
from models.continous_models import Gaussian_FF
model = Gaussian_FF(self.state_dim, self.action_dim, self.hidden_size)
if seed:
model.load_state_dict(torch.load(self.critic_seed))
print('Critic seeded from', self.critic_seed)
elif type == 'Tri_Head_Q':
from models.continous_models import Tri_Head_Q
model = Tri_Head_Q(self.state_dim, self.action_dim, self.hidden_size)
if seed:
model.load_state_dict(torch.load(self.critic_seed))
print('Critic seeded from', self.critic_seed)
elif type == 'GumbelPolicy':
from models.discrete_models import GumbelPolicy
model = GumbelPolicy(self.state_dim, self.action_dim, self.hidden_size)
elif type == 'CategoricalPolicy':
from models.discrete_models import CategoricalPolicy
model = CategoricalPolicy(self.state_dim, self.action_dim, self.hidden_size)
else:
AssertionError('Unknown model type')
return model
| 1,629 | 29.754717 | 94 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/models/continous_models.py
|
import torch
import torch.nn as nn
from torch.distributions import Normal, RelaxedOneHotCategorical
from core.utils import weights_init_
import torch.nn.functional as F
def weights_init_(m, lin_gain=1.0, bias_gain=0.1):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=lin_gain)
torch.nn.init.constant_(m.bias, bias_gain)
class Gaussian_FF(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size):
super(Gaussian_FF, self).__init__()
self.num_actions = num_actions
#Shared FF
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
# SAC SPECIFIC
self.LOG_SIG_MAX = 2
self.LOG_SIG_MIN = -20
self.epsilon = 1e-6
def clean_action(self, state, return_only_action=True):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
if return_only_action: return torch.tanh(mean)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=self.LOG_SIG_MIN, max=self.LOG_SIG_MAX)
return mean, log_std
def noisy_action(self, state,return_only_action=True):
mean, log_std = self.clean_action(state, return_only_action=False)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
action = torch.tanh(x_t)
if return_only_action:
return action
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(1 - action.pow(2) + self.epsilon)
log_prob = log_prob.sum(1, keepdim=True)
return action, log_prob, None,None,torch.tanh(mean)
def get_norm_stats(self):
minimum = min([torch.min(param).item() for param in self.parameters()])
maximum = max([torch.max(param).item() for param in self.parameters()])
means = [torch.mean(torch.abs(param)).item() for param in self.parameters()]
mean = sum(means)/len(means)
return minimum, maximum, mean
class Tri_Head_Q(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size):
super(Tri_Head_Q, self).__init__()
######################## Q1 Head ##################
# Construct Hidden Layer 1 with state
self.q1f1 = nn.Linear(state_dim + action_dim, hidden_size)
# Hidden Layer 2
self.q1f2 = nn.Linear(hidden_size, hidden_size)
# Out
self.q1out = nn.Linear(hidden_size, 1)
######################## Q2 Head ##################
# Construct Hidden Layer 1 with state
self.q2f1 = nn.Linear(state_dim + action_dim, hidden_size)
# Hidden Layer 2
self.q2f2 = nn.Linear(hidden_size, hidden_size)
# Out
self.q2out = nn.Linear(hidden_size, 1)
def forward(self, obs, action):
#Concatenate observation+action as critic state
state = torch.cat([obs, action], 1)
###### Q1 HEAD ####
q1 = F.relu(self.q1f1(state))
#q1 = self.q1ln1(q1)
q1 = F.relu(self.q1f2(q1))
#q1 = self.q1ln2(q1)
q1 = self.q1out(q1)
###### Q2 HEAD ####
q2 = F.relu(self.q2f1(state))
#q2 = self.q2ln1(q2)
q2 = F.relu(self.q2f2(q2))
#q2 = self.q2ln2(q2)
q2 = self.q2out(q2)
return q1, q2, None
| 3,612 | 26.792308 | 84 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/models/__init__.py
| 0 | 0 | 0 |
py
|
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/algos/sac.py
|
import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from core.utils import soft_update, hard_update
class SAC(object):
def __init__(self, args, model_constructor):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.writer = args.writer
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = False
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.critic = model_constructor.make_model('Tri_Head_Q').to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.critic_lr)
self.critic_target = model_constructor.make_model('Tri_Head_Q').to(device=self.device)
hard_update(self.critic_target, self.critic)
if self.automatic_entropy_tuning == True:
self.target_entropy = -torch.prod(torch.Tensor(1, args.action_dim)).cuda().item()
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha_optim = Adam([self.log_alpha], lr=args.alpha_lr)
self.log_alpha.to(device=self.device)
self.actor = model_constructor.make_model('Gaussian_FF').to(device=self.device)
self.actor_optim = Adam(self.actor.parameters(), lr=args.actor_lr)
self.num_updates = 0
def update_parameters(self, state_batch, next_state_batch, action_batch, reward_batch, done_batch):
state_batch = state_batch.to(self.device)
next_state_batch=next_state_batch.to(self.device)
action_batch=action_batch.to(self.device)
reward_batch=reward_batch.to(self.device)
done_batch=done_batch.to(self.device)
with torch.no_grad():
next_state_action, next_state_log_pi,_,_,_= self.actor.noisy_action(next_state_batch, return_only_action=False)
qf1_next_target, qf2_next_target,_ = self.critic_target.forward(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + self.gamma * (min_qf_next_target) * (1 - done_batch)
self.writer.add_scalar('next_q', next_q_value.mean().item())
qf1, qf2,_ = self.critic.forward(state_batch, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
self.writer.add_scalar('q_loss', (qf1_loss + qf2_loss).mean().item() / 2.0)
pi, log_pi, _,_,_ = self.actor.noisy_action(state_batch, return_only_action=False)
self.writer.add_scalar('log_pi', log_pi.mean().item())
qf1_pi, qf2_pi, _ = self.critic.forward(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
self.writer.add_scalar('policy_q', min_qf_pi.mean().item())
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.writer.add_scalar('policy_loss', policy_loss.mean().item())
self.critic_optim.zero_grad()
qf1_loss.backward()
qf2_loss.backward()
self.critic_optim.step()
self.actor_optim.zero_grad()
policy_loss.backward()
self.actor_optim.step()
self.num_updates += 1
soft_update(self.critic_target, self.critic, self.tau)
| 3,599 | 43.444444 | 143 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/algos/erl_trainer.py
|
import numpy as np, os, time, random, torch, sys
from algos.neuroevolution import SSNE
from core import utils
from core.runner import rollout_worker
from torch.multiprocessing import Process, Pipe, Manager
from core.buffer import Buffer
import torch
class ERL_Trainer:
def __init__(self, args, model_constructor, env_constructor):
self.args = args
self.policy_string = 'CategoricalPolicy' if env_constructor.is_discrete else 'Gaussian_FF'
self.manager = Manager()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#Evolution
self.evolver = SSNE(self.args)
#Initialize population
self.population = self.manager.list()
for _ in range(args.pop_size):
self.population.append(model_constructor.make_model(self.policy_string))
#Save best policy
self.best_policy = model_constructor.make_model(self.policy_string)
#PG Learner
if env_constructor.is_discrete:
from algos.ddqn import DDQN
self.learner = DDQN(args, model_constructor)
else:
from algos.sac import SAC
self.learner = SAC(args, model_constructor)
#Replay Buffer
self.replay_buffer = Buffer(args.buffer_size)
#Initialize Rollout Bucket
self.rollout_bucket = self.manager.list()
for _ in range(args.rollout_size):
self.rollout_bucket.append(model_constructor.make_model(self.policy_string))
############## MULTIPROCESSING TOOLS ###################
#Evolutionary population Rollout workers
self.evo_task_pipes = [Pipe() for _ in range(args.pop_size)]
self.evo_result_pipes = [Pipe() for _ in range(args.pop_size)]
self.evo_workers = [Process(target=rollout_worker, args=(id, 'evo', self.evo_task_pipes[id][1], self.evo_result_pipes[id][0], args.rollout_size > 0, self.population, env_constructor)) for id in range(args.pop_size)]
for worker in self.evo_workers: worker.start()
self.evo_flag = [True for _ in range(args.pop_size)]
#Learner rollout workers
self.task_pipes = [Pipe() for _ in range(args.rollout_size)]
self.result_pipes = [Pipe() for _ in range(args.rollout_size)]
self.workers = [Process(target=rollout_worker, args=(id, 'pg', self.task_pipes[id][1], self.result_pipes[id][0], True, self.rollout_bucket, env_constructor)) for id in range(args.rollout_size)]
for worker in self.workers: worker.start()
self.roll_flag = [True for _ in range(args.rollout_size)]
#Test bucket
self.test_bucket = self.manager.list()
self.test_bucket.append(model_constructor.make_model(self.policy_string))
# Test workers
self.test_task_pipes = [Pipe() for _ in range(args.num_test)]
self.test_result_pipes = [Pipe() for _ in range(args.num_test)]
self.test_workers = [Process(target=rollout_worker, args=(id, 'test', self.test_task_pipes[id][1], self.test_result_pipes[id][0], False, self.test_bucket, env_constructor)) for id in range(args.num_test)]
for worker in self.test_workers: worker.start()
self.test_flag = False
#Trackers
self.best_score = -float('inf'); self.gen_frames = 0; self.total_frames = 0; self.test_score = None; self.test_std = None
def forward_generation(self, gen, tracker):
gen_max = -float('inf')
#Start Evolution rollouts
if self.args.pop_size > 1:
for id, actor in enumerate(self.population):
self.evo_task_pipes[id][0].send(id)
#Sync all learners actor to cpu (rollout) actor and start their rollout
self.learner.actor.cpu()
for rollout_id in range(len(self.rollout_bucket)):
utils.hard_update(self.rollout_bucket[rollout_id], self.learner.actor)
self.task_pipes[rollout_id][0].send(0)
self.learner.actor.to(device=self.device)
#Start Test rollouts
if gen % self.args.test_frequency == 0:
self.test_flag = True
for pipe in self.test_task_pipes: pipe[0].send(0)
############# UPDATE PARAMS USING GRADIENT DESCENT ##########
if self.replay_buffer.__len__() > self.args.learning_start: ###BURN IN PERIOD
for _ in range(int(self.gen_frames * self.args.gradperstep)):
s, ns, a, r, done = self.replay_buffer.sample(self.args.batch_size)
self.learner.update_parameters(s, ns, a, r, done)
self.gen_frames = 0
########## JOIN ROLLOUTS FOR EVO POPULATION ############
all_fitness = []; all_eplens = []
if self.args.pop_size > 1:
for i in range(self.args.pop_size):
_, fitness, frames, trajectory = self.evo_result_pipes[i][1].recv()
all_fitness.append(fitness); all_eplens.append(frames)
self.gen_frames+= frames; self.total_frames += frames
self.replay_buffer.add(trajectory)
self.best_score = max(self.best_score, fitness)
gen_max = max(gen_max, fitness)
########## JOIN ROLLOUTS FOR LEARNER ROLLOUTS ############
rollout_fitness = []; rollout_eplens = []
if self.args.rollout_size > 0:
for i in range(self.args.rollout_size):
_, fitness, pg_frames, trajectory = self.result_pipes[i][1].recv()
self.replay_buffer.add(trajectory)
self.gen_frames += pg_frames; self.total_frames += pg_frames
self.best_score = max(self.best_score, fitness)
gen_max = max(gen_max, fitness)
rollout_fitness.append(fitness); rollout_eplens.append(pg_frames)
######################### END OF PARALLEL ROLLOUTS ################
############ FIGURE OUT THE CHAMP POLICY AND SYNC IT TO TEST #############
if self.args.pop_size > 1:
champ_index = all_fitness.index(max(all_fitness))
utils.hard_update(self.test_bucket[0], self.population[champ_index])
if max(all_fitness) > self.best_score:
self.best_score = max(all_fitness)
utils.hard_update(self.best_policy, self.population[champ_index])
torch.save(self.population[champ_index].state_dict(), self.args.aux_folder + '_best'+self.args.savetag)
print("Best policy saved with score", '%.2f'%max(all_fitness))
else: #If there is no population, champion is just the actor from policy gradient learner
utils.hard_update(self.test_bucket[0], self.rollout_bucket[0])
###### TEST SCORE ######
if self.test_flag:
self.test_flag = False
test_scores = []
for pipe in self.test_result_pipes: #Collect all results
_, fitness, _, _ = pipe[1].recv()
self.best_score = max(self.best_score, fitness)
gen_max = max(gen_max, fitness)
test_scores.append(fitness)
test_scores = np.array(test_scores)
test_mean = np.mean(test_scores); test_std = (np.std(test_scores))
tracker.update([test_mean], self.total_frames)
else:
test_mean, test_std = None, None
#NeuroEvolution's probabilistic selection and recombination step
if self.args.pop_size > 1:
self.evolver.epoch(gen, self.population, all_fitness, self.rollout_bucket)
#Compute the champion's eplen
champ_len = all_eplens[all_fitness.index(max(all_fitness))] if self.args.pop_size > 1 else rollout_eplens[rollout_fitness.index(max(rollout_fitness))]
return gen_max, champ_len, all_eplens, test_mean, test_std, rollout_fitness, rollout_eplens
def train(self, frame_limit):
# Define Tracker class to track scores
test_tracker = utils.Tracker(self.args.savefolder, ['score_' + self.args.savetag], '.csv') # Tracker class to log progress
time_start = time.time()
for gen in range(1, 1000000000): # Infinite generations
# Train one iteration
max_fitness, champ_len, all_eplens, test_mean, test_std, rollout_fitness, rollout_eplens = self.forward_generation(gen, test_tracker)
if test_mean: self.args.writer.add_scalar('test_score', test_mean, gen)
print('Gen/Frames:', gen,'/',self.total_frames,
' Gen_max_score:', '%.2f'%max_fitness,
' Champ_len', '%.2f'%champ_len, ' Test_score u/std', utils.pprint(test_mean), utils.pprint(test_std),
' Rollout_u/std:', utils.pprint(np.mean(np.array(rollout_fitness))), utils.pprint(np.std(np.array(rollout_fitness))),
' Rollout_mean_eplen:', utils.pprint(sum(rollout_eplens)/len(rollout_eplens)) if rollout_eplens else None)
if gen % 5 == 0:
print('Best_score_ever:''/','%.2f'%self.best_score, ' FPS:','%.2f'%(self.total_frames/(time.time()-time_start)), 'savetag', self.args.savetag)
print()
if self.total_frames > frame_limit:
break
###Kill all processes
try:
for p in self.task_pipes: p[0].send('TERMINATE')
for p in self.test_task_pipes: p[0].send('TERMINATE')
for p in self.evo_task_pipes: p[0].send('TERMINATE')
except:
None
| 8,262 | 38.347619 | 217 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/algos/neuroevolution.py
|
import random
import numpy as np
import math
import core.utils as utils
class SSNE:
def __init__(self, args):
self.gen = 0
self.args = args;
self.population_size = self.args.pop_size
self.writer = args.writer
#RL TRACKERS
self.rl_policy = None
self.selection_stats = {'elite': 0, 'selected': 0, 'discarded': 0, 'total': 0}
def selection_tournament(self, index_rank, num_offsprings, tournament_size):
"""Conduct tournament selection
Parameters:
index_rank (list): Ranking encoded as net_indexes
num_offsprings (int): Number of offsprings to generate
tournament_size (int): Size of tournament
Returns:
offsprings (list): List of offsprings returned as a list of net indices
"""
total_choices = len(index_rank)
offsprings = []
for i in range(num_offsprings):
winner = np.min(np.random.randint(total_choices, size=tournament_size))
offsprings.append(index_rank[winner])
offsprings = list(set(offsprings)) # Find unique offsprings
if len(offsprings) % 2 != 0: # Number of offsprings should be even
offsprings.append(index_rank[winner])
return offsprings
def list_argsort(self, seq):
"""Sort the list
Parameters:
seq (list): list
Returns:
sorted list
"""
return sorted(range(len(seq)), key=seq.__getitem__)
def regularize_weight(self, weight, mag):
"""Clamps on the weight magnitude (reguralizer)
Parameters:
weight (float): weight
mag (float): max/min value for weight
Returns:
weight (float): clamped weight
"""
if weight > mag: weight = mag
if weight < -mag: weight = -mag
return weight
def crossover_inplace(self, gene1, gene2):
"""Conduct one point crossover in place
Parameters:
gene1 (object): A pytorch model
gene2 (object): A pytorch model
Returns:
None
"""
keys1 = list(gene1.state_dict())
keys2 = list(gene2.state_dict())
for key in keys1:
if key not in keys2: continue
# References to the variable tensors
W1 = gene1.state_dict()[key]
W2 = gene2.state_dict()[key]
if len(W1.shape) == 2: #Weights no bias
num_variables = W1.shape[0]
# Crossover opertation [Indexed by row]
try: num_cross_overs = random.randint(0, int(num_variables * 0.3)) # Number of Cross overs
except: num_cross_overs = 1
for i in range(num_cross_overs):
receiver_choice = random.random() # Choose which gene to receive the perturbation
if receiver_choice < 0.5:
ind_cr = random.randint(0, W1.shape[0]-1) #
W1[ind_cr, :] = W2[ind_cr, :]
else:
ind_cr = random.randint(0, W1.shape[0]-1) #
W2[ind_cr, :] = W1[ind_cr, :]
elif len(W1.shape) == 1: #Bias or LayerNorm
if random.random() <0.8: continue #Crossover here with low frequency
num_variables = W1.shape[0]
# Crossover opertation [Indexed by row]
#num_cross_overs = random.randint(0, int(num_variables * 0.05)) # Crossover number
for i in range(1):
receiver_choice = random.random() # Choose which gene to receive the perturbation
if receiver_choice < 0.5:
ind_cr = random.randint(0, W1.shape[0]-1) #
W1[ind_cr] = W2[ind_cr]
else:
ind_cr = random.randint(0, W1.shape[0]-1) #
W2[ind_cr] = W1[ind_cr]
def mutate_inplace(self, gene):
"""Conduct mutation in place
Parameters:
gene (object): A pytorch model
Returns:
None
"""
mut_strength = 0.1
num_mutation_frac = 0.05
super_mut_strength = 10
super_mut_prob = 0.05
reset_prob = super_mut_prob + 0.02
num_params = len(list(gene.parameters()))
ssne_probabilities = np.random.uniform(0, 1, num_params) * 2
for i, param in enumerate(gene.parameters()): # Mutate each param
# References to the variable keys
W = param.data
if len(W.shape) == 2: # Weights, no bias
num_weights = W.shape[0] * W.shape[1]
ssne_prob = ssne_probabilities[i]
if random.random() < ssne_prob:
num_mutations = random.randint(0,
int(math.ceil(num_mutation_frac * num_weights))) # Number of mutation instances
for _ in range(num_mutations):
ind_dim1 = random.randint(0, W.shape[0]-1)
ind_dim2 = random.randint(0, W.shape[-1]-1)
random_num = random.random()
if random_num < super_mut_prob: # Super Mutation probability
W[ind_dim1, ind_dim2] += random.gauss(0, super_mut_strength * W[ind_dim1, ind_dim2])
elif random_num < reset_prob: # Reset probability
W[ind_dim1, ind_dim2] = random.gauss(0, 0.1)
else: # mutauion even normal
W[ind_dim1, ind_dim2] += random.gauss(0, mut_strength * W[ind_dim1, ind_dim2])
# Regularization hard limit
W[ind_dim1, ind_dim2] = self.regularize_weight(W[ind_dim1, ind_dim2],
self.args.weight_magnitude_limit)
elif len(W.shape) == 1: # Bias or layernorm
num_weights = W.shape[0]
ssne_prob = ssne_probabilities[i]*0.04 #Low probability of mutation here
if random.random() < ssne_prob:
num_mutations = random.randint(0,
int(math.ceil(num_mutation_frac * num_weights))) # Number of mutation instances
for _ in range(num_mutations):
ind_dim = random.randint(0, W.shape[0]-1)
random_num = random.random()
if random_num < super_mut_prob: # Super Mutation probability
W[ind_dim] += random.gauss(0, super_mut_strength * W[ind_dim])
elif random_num < reset_prob: # Reset probability
W[ind_dim] = random.gauss(0, 1)
else: # mutauion even normal
W[ind_dim] += random.gauss(0, mut_strength * W[ind_dim])
# Regularization hard limit
W[ind_dim] = self.regularize_weight(W[ind_dim], self.args.weight_magnitude_limit)
def reset_genome(self, gene):
"""Reset a model's weights in place
Parameters:
gene (object): A pytorch model
Returns:
None
"""
for param in (gene.parameters()):
param.data.copy_(param.data)
def epoch(self, gen, pop, fitness_evals, migration):
self.gen+= 1; num_elitists = int(self.args.elite_fraction * len(fitness_evals))
if num_elitists < 2: num_elitists = 2
# Entire epoch is handled with indices; Index rank nets by fitness evaluation (0 is the best after reversing)
index_rank = self.list_argsort(fitness_evals); index_rank.reverse()
elitist_index = index_rank[:num_elitists] # Elitist indexes safeguard
# Selection step
offsprings = self.selection_tournament(index_rank, num_offsprings=len(index_rank) - len(elitist_index) - len(migration), tournament_size=3)
#Figure out unselected candidates
unselects = []; new_elitists = []
for net_i in range(len(pop)):
if net_i in offsprings or net_i in elitist_index:
continue
else:
unselects.append(net_i)
random.shuffle(unselects)
#Migration Tracker
if self.rl_policy != None: # RL Transfer happened
self.selection_stats['total'] += 1.0
if self.rl_policy in elitist_index:
self.selection_stats['elite'] += 1.0
elif self.rl_policy in offsprings:
self.selection_stats['selected'] += 1.0
elif self.rl_policy in unselects:
self.selection_stats['discarded'] += 1.0
self.rl_policy = None
self.writer.add_scalar('elite_rate', self.selection_stats['elite']/self.selection_stats['total'], gen)
self.writer.add_scalar('selection_rate', (self.selection_stats['elite']+self.selection_stats['selected'])/self.selection_stats['total'], gen)
self.writer.add_scalar('discard_rate', self.selection_stats['discarded']/self.selection_stats['total'], gen)
#Inheritance step (sync learners to population) --> Migration
for policy in migration:
replacee = unselects.pop(0)
utils.hard_update(target=pop[replacee], source=policy)
self.rl_policy = replacee
# Elitism step, assigning elite candidates to some unselects
for i in elitist_index:
try: replacee = unselects.pop(0)
except: replacee = offsprings.pop(0)
new_elitists.append(replacee)
utils.hard_update(target=pop[replacee], source=pop[i])
# Crossover for unselected genes with 100 percent probability
if len(unselects) % 2 != 0: # Number of unselects left should be even
unselects.append(unselects[random.randint(0, len(unselects)-1)])
for i, j in zip(unselects[0::2], unselects[1::2]):
off_i = random.choice(new_elitists);
off_j = random.choice(offsprings)
utils.hard_update(target=pop[i], source=pop[off_i])
utils.hard_update(target=pop[j], source=pop[off_j])
self.crossover_inplace(pop[i], pop[j])
# Crossover for selected offsprings
for i, j in zip(offsprings[0::2], offsprings[1::2]):
if random.random() < self.args.crossover_prob:
self.crossover_inplace(pop[i], pop[j])
# Mutate all genes in the population except the new elitists
for net_i in range(len(pop)):
if net_i not in new_elitists: # Spare the new elitists
if random.random() < self.args.mutation_prob:
self.mutate_inplace(pop[net_i])
| 8,888 | 30.299296 | 144 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/algos/ddqn.py
|
import os, random
import torch
import torch.nn.functional as F
from torch.optim import Adam
from core.utils import soft_update, hard_update
class DDQN(object):
def __init__(self, args, model_constructor):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.actor = model_constructor.make_model('CategoricalPolicy').to(device=self.device)
self.actor_optim = Adam(self.actor.parameters(), lr=args.actor_lr)
self.actor_target = model_constructor.make_model('CategoricalPolicy').to(device=self.device)
hard_update(self.actor_target, self.actor)
self.log_softmax = torch.nn.LogSoftmax(dim=1)
self.softmax = torch.nn.Softmax(dim=1)
self.num_updates = 0
def update_parameters(self, state_batch, next_state_batch, action_batch, reward_batch, done_batch):
state_batch = state_batch.to(self.device)
next_state_batch=next_state_batch.to(self.device)
action_batch=action_batch.to(self.device)
reward_batch=reward_batch.to(self.device)
done_batch=done_batch.to(self.device)
action_batch = action_batch.long().unsqueeze(1)
with torch.no_grad():
na = self.actor.clean_action(next_state_batch, return_only_action=True)
_, _, ns_logits = self.actor_target.noisy_action(next_state_batch, return_only_action=False)
next_entropy = -(F.softmax(ns_logits, dim=1) * F.log_softmax(ns_logits, dim=1)).mean(1).unsqueeze(1)
ns_logits = ns_logits.gather(1, na.unsqueeze(1))
next_target = ns_logits + self.alpha * next_entropy
next_q_value = reward_batch + (1-done_batch) * self.gamma * next_target
_, _, logits = self.actor.noisy_action(state_batch, return_only_action=False)
entropy = -(F.softmax(logits, dim=1) * F.log_softmax(logits, dim=1)).mean(1).unsqueeze(1)
q_val = logits.gather(1, action_batch)
q_loss = (next_q_value - q_val)**2
q_loss -= self.alpha*entropy
q_loss = q_loss.mean()
self.actor_optim.zero_grad()
q_loss.backward()
self.actor_optim.step()
self.num_updates += 1
soft_update(self.actor_target, self.actor, self.tau)
| 2,338 | 36.725806 | 112 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/algos/__init__.py
| 0 | 0 | 0 |
py
|
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/envs_repo/constructor.py
|
from envs_repo.gym_wrapper import GymWrapper
class EnvConstructor:
"""Wrapper around the Environment to expose a cleaner interface for RL
Parameters:
env_name (str): Env name
"""
def __init__(self, env_name, frameskip):
"""
A general Environment Constructor
"""
self.env_name = env_name
self.frameskip = frameskip
#Dummy env to get some macros
dummy_env = self.make_env()
self.is_discrete = dummy_env.is_discrete
self.state_dim = dummy_env.state_dim
self.action_dim = dummy_env.action_dim
def make_env(self, **kwargs):
"""
Generate and return an env object
"""
env = GymWrapper(self.env_name, self.frameskip)
return env
| 786 | 22.147059 | 74 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/envs_repo/gym_wrapper.py
|
import numpy as np
import gym
class GymWrapper:
"""Wrapper around the Environment to expose a cleaner interface for RL
Parameters:
env_name (str): Env name
"""
def __init__(self, env_name, frameskip=None):
"""
A base template for all environment wrappers.
"""
self.env_name = env_name
self.env = gym.make(env_name)
self.frameskip=frameskip
self.is_discrete = self.is_discrete(self.env)
#State and Action Parameters
self.state_dim = self.env.observation_space.shape[0]
if self.is_discrete:
self.action_dim = self.env.action_space.n
else:
self.action_dim = self.env.action_space.shape[0]
self.action_low = float(self.env.action_space.low[0])
self.action_high = float(self.env.action_space.high[0])
self.test_size = 10
def reset(self):
"""Method overloads reset
Parameters:
None
Returns:
next_obs (list): Next state
"""
state = self.env.reset()
return np.expand_dims(state, axis=0)
def step(self, action): #Expects a numpy action
"""Take an action to forward the simulation
Parameters:
action (ndarray): action to take in the env
Returns:
next_obs (list): Next state
reward (float): Reward for this step
done (bool): Simulation done?
info (None): Template from OpenAi gym (doesnt have anything)
"""
if self.is_discrete:
action = action[0]
else:
#Assumes action is in [-1, 1] --> Hyperbolic Tangent Activation
action = self.action_low + (action + 1.0) / 2.0 * (self.action_high - self.action_low)
reward = 0
for _ in range(self.frameskip):
next_state, rew, done, info = self.env.step(action)
reward += rew
if done: break
next_state = np.expand_dims(next_state, axis=0)
return next_state, reward, done, info
def render(self):
self.env.render()
def is_discrete(self, env):
try:
k = env.action_space.n
return True
except:
return False
| 2,325 | 27.716049 | 98 |
py
|
Evolutionary-Reinforcement-Learning
|
Evolutionary-Reinforcement-Learning-master/envs_repo/__init__.py
| 0 | 0 | 0 |
py
|
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/app.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from maxfw.core import MAXApp
from api import ModelMetadataAPI, ModelPredictAPI, ModelLabelsAPI
from config import API_TITLE, API_DESC, API_VERSION
max = MAXApp(API_TITLE, API_DESC, API_VERSION)
max.add_api(ModelMetadataAPI, '/metadata')
max.add_api(ModelPredictAPI, '/predict')
max.add_api(ModelLabelsAPI, '/labels')
max.run()
| 933 | 34.923077 | 74 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.