repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
marmot | marmot-master/marmot/representations/alignment_file_representation_generator.py | from __future__ import print_function
import numpy as np
import sys
from collections import defaultdict
from marmot.representations.representation_generator import RepresentationGenerator
class AlignmentFileRepresentationGenerator(RepresentationGenerator):
'''
Get alignments from file
'''
# parse lex.f2e file
# format of self.lex_prob: dictionary of target words
# every value of the target dictionary is a dictionary of source words
# every value of the source dictionary is a probability p(target|source):
# self.lex_prob['el']['he'] = 0.5
def get_align_prob(self, lex_file):
lex_dict = defaultdict(lambda: defaultdict(float))
for line in open(lex_file):
chunks = line[:-1].decode('utf-8').split()
assert(len(chunks) == 3), "Wrong format of the lex file: \n{}".format(line)
val = float(chunks[2])
lex_dict[chunks[0]][chunks[1]] = val
return lex_dict
def get_alignments(self, align_file, target_lines):
alignments = []
cnt = 0
print(type(target_lines))
print(target_lines[:10])
print(align_file)
for words, line in zip(target_lines, open(align_file)):
cnt += 1
cur_align_dict = defaultdict(list)
for pair in line.strip('\n').split():
pair = pair.split('-')
cur_align_dict[int(pair[1])].append(int(pair[0]))
cur_align = []
for i in range(len(words)):
cur_align.append(cur_align_dict[i])
alignments.append(cur_align)
return alignments
def __init__(self, lex_file):
self.lex_prob = self.get_align_prob(lex_file)
def generate(self, data_obj):
print("generate procedure, start")
print(data_obj["alignments_file"])
all_alignments = self.get_alignments(data_obj['alignments_file'], data_obj['target'])
unique_alignments = []
for seq_idx, al_sequence in enumerate(all_alignments):
seq_alignments = []
for w_idx, al_list in enumerate(al_sequence):
if len(al_list) > 1:
try:
target_word = data_obj['target'][seq_idx][w_idx]
source_words = [data_obj['source'][seq_idx][i] for i in al_list]
except IndexError:
print("TArget:", data_obj['target'][seq_idx])
print("Source:", data_obj['source'][seq_idx])
print("Target: {} seq, needed {}. {} words, needed {}".format(len(data_obj['target']), seq_idx, len(data_obj['target'][seq_idx]), w_idx))
sys.exit()
probs = [self.lex_prob[target_word][s] for s in source_words]
seq_alignments.append(al_list[np.argmax(probs)])
elif len(al_list) == 0:
seq_alignments.append(None)
elif len(al_list) == 1:
seq_alignments.append(al_list[0])
else:
print("Golakteko opasnoste!")
unique_alignments.append(seq_alignments)
data_obj['alignments'] = unique_alignments
# remove alignments file, we don't need it any more
del data_obj['alignments_file']
return data_obj
| 3,351 | 40.9 | 161 | py |
marmot | marmot-master/marmot/representations/word_qe_files_representation_generator.py | import codecs
import os
from marmot.representations.representation_generator import RepresentationGenerator
class WordQEFilesRepresentationGenerator(RepresentationGenerator):
'''
The standard word-level format: 3 files, source, target, tags, one line per file, whitespace tokenized
'''
def __init__(self, source_file, target_file, tags_file, return_files=True):
self.data = self.parse_files(source_file, target_file, tags_file, return_files=return_files)
@staticmethod
def parse_files(source_file, target_file, tags_file, return_files=True):
with codecs.open(source_file, encoding='utf8') as source:
source_lines = [line.split() for line in source]
with codecs.open(target_file, encoding='utf8') as target:
target_lines = [line.split() for line in target]
with codecs.open(tags_file, encoding='utf8') as tags:
tags_lines = [line.split() for line in tags]
return {'target': target_lines, 'source': source_lines, 'tags': tags_lines, 'target_file': os.path.abspath(target_file), 'source_file': os.path.abspath(source_file)}
def generate(self, data_obj=None):
return self.data
| 1,195 | 37.580645 | 173 | py |
marmot | marmot-master/marmot/representations/syntactic_representation_generator.py | from marmot.util.extract_syntactic_features import call_stanford, call_parzu, parse_xml, parse_conll
from marmot.representations.representation_generator import RepresentationGenerator
class SyntacticRepresentationGenerator(RepresentationGenerator):
def __init__(self, tmp_dir, reverse=False):
self.tmp_dir = tmp_dir
self.reverse = reverse
def generate(self, data_obj):
if self.reverse:
parsed_src = call_parzu(data_obj['source_file'], self.tmp_dir)
parsed_tg = call_stanford(data_obj['target_file'], self.tmp_dir)
sentences_tg = parse_xml(parsed_tg)
sentences_src = parse_conll(parsed_src)
else:
parsed_src = call_stanford(data_obj['source_file'], self.tmp_dir)
parsed_tg = call_parzu(data_obj['target_file'], self.tmp_dir)
sentences_src = parse_xml(parsed_src)
sentences_tg = parse_conll(parsed_tg)
data_obj['target_dependencies'] = [sent['dependencies'] for sent in sentences_tg]
data_obj['source_dependencies'] = [sent['dependencies'] for sent in sentences_src]
data_obj['target_synt_pos'] = [sent['pos'] for sent in sentences_tg]
data_obj['source_synt_pos'] = [sent['pos'] for sent in sentences_src]
# data_obj['target_root'] = [sent['root'] for sent in sentences_tg]
# data_obj['source_root'] = [sent['root'] for sent in sentences_src]
del data_obj['target_file']
del data_obj['source_file']
return data_obj
| 1,524 | 48.193548 | 100 | py |
marmot | marmot-master/marmot/representations/segmentation_simple_representation_generator.py | import codecs
import re
from marmot.representations.representation_generator import RepresentationGenerator
class SegmentationSimpleRepresentationGenerator(RepresentationGenerator):
'''
Source, target, tags, segmentation files, one line per file, whitespace tokenized
Segmentation file -- can be Moses output with phrase segmentation (with '-t' option)
or just have the information on segments.
Segments have to be in the form |i-j| where i is the index of the first word in segment,
j -- the last word in segment.
Examples of acceptable segmentation formats:
(1) he is |0-1| a good |2-3| ukulele |4-4| player |5-5| . |6-6|
(2) |0-1| |2-3| |4-4| |5-5| |6-6|
in the format (1) the words can be from the source lang (or any other), only numbers matter
the format (2) is the same as (1) but with no words
they are parsed in the same way: just substrings '|i-j|' are extracted
'''
def __init__(self, source_file, target_file, tags_file, segmentation_file, segmentation_numbers='target'):
'''
Parameters:
- <source_file>
- <target_file>
- <tags_file>
- <segmentation_file>. Acceptable formats:
(1) he is |0-1| a good |2-3| ukulele |4-4| player |5-5| . |6-6|
(2) |0-1| |2-3| |4-4| |5-5| |6-6|
- <segmentation_numbers> - 'source' or 'target', default - 'target'
the side whose segment borders are denoted with numbers in <segmentation_file>
If the <segmentation_file> has format (1),
then <segmentation_numbers> has to be 'target'
'''
self.data = self.parse_files(source_file, target_file, tags_file, segmentation_file, segmentation_numbers)
@staticmethod
def parse_files(source_file, target_file, tags_file, segmentation_file, segmentation_numbers):
with codecs.open(source_file, encoding='utf8') as source:
source_lines = [line.split() for line in source]
with codecs.open(target_file, encoding='utf8') as target:
target_lines = [line.split() for line in target]
with codecs.open(tags_file, encoding='utf8') as tags:
tags_lines = [line.split() for line in tags]
seg_regexp = re.compile("\|\d+-\d+\|")
with codecs.open(segmentation_file, encoding='utf-8') as segmentation:
segments, source_segments = [], []
if segmentation_numbers == 'target':
for line in segmentation:
seg_strings = seg_regexp.findall(line)
seg_list = []
for a_seg in seg_strings:
a_pair = a_seg.strip('|').split('-')
seg_list.append((int(a_pair[0]), int(a_pair[1])+1))
# segments need to be sorted, because they could be reordered during decoding:
# he is |3-4| a good |0-1| ukulele player |2-2| . |5-5|
# seg_list == [(3, 5), (0, 2), (2, 3), (5, 6)]
# sorted(seg_list) == [(0, 2), (2, 3), (3, 5), (5, 6)]
seg_list = sorted(seg_list)
new_seg_list = []
prev = 0
for s in seg_list:
# end of previous segment doesn't match the beginning of the current segment
# this means that one or more of the words wasn't included into the segmentation
# have to be added as a separate segment
if s[0] != prev:
new_seg_list.append((prev, s[0]))
new_seg_list.append(s)
prev = s[1]
segments.append(sorted(new_seg_list))
return {'target': target_lines, 'source': source_lines, 'tags': tags_lines, 'segmentation': segments}
elif segmentation_numbers == 'source':
for line in segmentation:
if line == '\n':
segments.append([])
source_segments.append([])
continue
seg_strings = seg_regexp.split(line[:-1])
source_seg_strings = seg_regexp.findall(line)
seg_lengths = [len(a_seg.strip().split()) for a_seg in seg_strings if len(a_seg.split()) > 0]
# segmentation doesn't exits for the sentence
if len(seg_lengths) == 0:
segments.append([])
source_segments.append([])
continue
start = 0
seg_list = []
for a_len in seg_lengths:
seg_list.append((start, start + a_len))
start += a_len
source_seg_list = []
for a_seg in source_seg_strings:
a_pair = a_seg.strip('|').split('-')
source_seg_list.append((int(a_pair[0]), int(a_pair[1])+1))
segments.append(seg_list)
# here segments mustn't be sorted, to keep the correspondence between the source and the target
source_segments.append(source_seg_list)
return {'target': target_lines, 'source': source_lines, 'tags': tags_lines, 'segmentation': segments, 'source_segmentation': source_segments}
else:
print("Unknown segmentation numbers value: {}".format(segmentation_numbers))
def generate(self, data_obj=None):
return self.data
| 5,693 | 50.763636 | 157 | py |
marmot | marmot-master/marmot/representations/representation_generator.py | # an abstract class representing a representation generator
# returns the data object
# { representation_name: representation}
# <representation_name> -- string
# <representation> -- list of lists, representation of the whole dataset
from abc import ABCMeta, abstractmethod
class RepresentationGenerator(object):
__metaclass__ = ABCMeta
# subclasses must provide the implementation
# generators may need a "persist = True/False"
@abstractmethod
def generate(self, *args, **kwargs):
pass
| 520 | 26.421053 | 72 | py |
marmot | marmot-master/marmot/representations/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/representations/segmentation_double_representation_generator.py | from marmot.representations.representation_generator import RepresentationGenerator
import codecs
class SegmentationDoubleRepresentationGenerator(RepresentationGenerator):
'''
Both source and target are already segmented with '||'
'''
def get_segments_from_line(self, line):
seg = line.strip('\n').split(' || ')
cur_words, cur_seg = [], []
cur_pos = 0
for seg in line.strip('\n').split(' || '):
seg_split = seg.split()
cur_words.extend(seg_split)
cur_seg.append((cur_pos, cur_pos + len(seg_split)))
cur_pos += len(seg_split)
return cur_words, cur_seg
def parse_files(self, source_file, target_file, tags_file, word_align_file):
# extract source segments
with codecs.open(source_file, encoding='utf8') as source:
source_words, source_segments = [], []
for line in source:
cur_words, cur_seg = self.get_segments_from_line(line)
source_words.append(cur_words)
source_segments.append(cur_seg)
# extract target segments
with codecs.open(target_file, encoding='utf8') as target:
target_words, target_segments = [], []
for line in target:
cur_words, cur_seg = self.get_segments_from_line(line)
target_words.append(cur_words)
target_segments.append(cur_seg)
with codecs.open(tags_file, encoding='utf8') as tags:
phrase_tags = [line.split() for line in tags]
return {'segmentation': target_segments, 'source_segmentation': source_segments, 'source': source_words, 'target': target_words, 'alignments_file': word_align_file, 'tags': phrase_tags}
def __init__(self, source_file, target_file, tags_file, word_align_file):
self.data = self.parse_files(source_file, target_file, tags_file, word_align_file)
def generate(self, data_obj=None):
return self.data
| 1,990 | 40.479167 | 193 | py |
marmot | marmot-master/marmot/representations/pos_representation_generator.py | from subprocess import Popen
import os
import time
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class POSRepresentationGenerator(RepresentationGenerator):
def _get_random_name(self, suffix=''):
return 'tmp_'+suffix+str(time.time())
def _get_pos_tagging(self, src, tagger, par_file, tmp_dir):
# tokenize and add the sentence end marker
# tokenization is done with nltk
tmp_tokenized_name = os.path.join(tmp_dir, self._get_random_name('tok'))
tmp_tok = open(tmp_tokenized_name, 'wr+')
for words in src:
tmp_tok.write('%s\nSentenceEndMarker\n' % '\n'.join([w.encode('utf-8') for w in words]))
tmp_tok.seek(0)
# pass to tree-tagger
tmp_tagged_name = os.path.join(tmp_dir, self._get_random_name('tag'))
tmp_tagged = open(tmp_tagged_name, 'wr+')
tagger_call = Popen([tagger, '-token', par_file], stdin=tmp_tok, stdout=tmp_tagged)
tagger_call.wait()
tmp_tagged.seek(0)
# remove sentence markers, restore sentence structure
output = []
cur_sentence = []
for line in tmp_tagged:
word_tag = line[:-1].decode('utf-8').strip().split('\t')
# each string has to be <word>\t<tag>
# TODO: if it's not of this format, it could be the end of sequence (empty string) or an error
if len(word_tag) != 2:
continue
if word_tag[0] == 'SentenceEndMarker':
output.append(cur_sentence)
cur_sentence = []
else:
cur_sentence.append(word_tag[1])
tmp_tok.close()
tmp_tagged.close()
# delete all temporary files
os.remove(tmp_tokenized_name)
os.remove(tmp_tagged_name)
return output
# <tagger> -- path to tree-tagger
# <parameters> -- parameters of tree-tagger
# <data_label> -- which data should be tagged ('source' or 'target')
def __init__(self, tagger, parameters, data_label, tmp_dir=None):
self.tmp_dir = mk_tmp_dir(tmp_dir)
self.tagger = tagger
self.parameters = parameters
self.data = data_label
def generate(self, data_obj):
data_obj[self.data+'_pos'] = self._get_pos_tagging(data_obj[self.data], self.tagger, self.parameters, self.tmp_dir)
return data_obj
| 2,447 | 36.090909 | 123 | py |
marmot | marmot-master/marmot/representations/google_translate_representation_generator.py | from __future__ import print_function
from nltk import word_tokenize
from goslate import Goslate
from marmot.representations.representation_generator import RepresentationGenerator
class GoogleTranslateRepresentationGenerator(RepresentationGenerator):
'''
Generate pseudoreference with Google Translate
'''
# <lang> -- target language
def __init__(self, lang='en'):
self.lang = lang
self.gs = Goslate()
def generate(self, data_obj):
if 'source' not in data_obj:
print('No source for pseudo-reference generation')
return data_obj
references = []
try:
for ref in self.gs.translate([' '.join(sentence) for sentence in data_obj['source']], self.lang):
references.append(word_tokenize(ref))
# TODO: might it be some other error?
except:
print('Network error, no pseudo-reference is generated')
return data_obj
data_obj['pseudo-reference'] = references
return data_obj
| 1,043 | 28.828571 | 109 | py |
marmot | marmot-master/marmot/representations/tests/test_segmentation_simple_representation_generator.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.representations.segmentation_simple_representation_generator import SegmentationSimpleRepresentationGenerator
class WordQERepresentationGeneratorTests(unittest.TestCase):
def test_generate(self):
gen_target = SegmentationSimpleRepresentationGenerator('test_data/tiny.source', 'test_data/tiny.target', 'test_data/tiny.tags', 'test_data/tiny.target_seg', 'target')
gen_src_tg = SegmentationSimpleRepresentationGenerator('test_data/tiny.source', 'test_data/tiny.target', 'test_data/tiny.tags', 'test_data/tiny.source_target_seg', 'source')
tg_data = gen_target.generate()
self.assertListEqual(tg_data['target'], [['el', 'esta', 'un', 'pupil', '.'],
['ella', 'sea', 'mi', 'hermano', '.'],
['mi', 'gato', 'es', 'amarillo', '.']])
self.assertListEqual(tg_data['source'], [['he', 'is', 'a', 'pupil', '.'],
['she', 'is', 'my', 'sister', '.'],
['my', 'cat', 'is', 'smart', '.']])
self.assertListEqual(tg_data['tags'], [['OK', 'OK', 'OK', 'BAD', 'OK'],
['OK', 'BAD', 'OK', 'BAD', 'OK'],
['OK', 'OK', 'BAD', 'BAD', 'OK']])
# print("Segmentation", tg_data['segmentation'])
# print("Expected segmentation: ", '[[(0, 1), (1, 3), (3, 5)], [(0, 1), (1, 2), (2, 5)], [(0, 1), (2, 3), (3, 4), (4, 5)]]')
self.assertListEqual(tg_data['segmentation'], [[(0, 1), (1, 3), (3, 5)],
[(0, 1), (1, 2), (2, 5)],
[(0, 2), (2, 3), (3, 4), (4, 5)]])
self.assertListEqual(tg_data['source_segmentation'], [])
src_data = gen_src_tg.generate()
# print(src_data['segmentation'])
# print('Expected: [[(0, 2), (2, 5)], [], [(0, 1), (1, 3), (3, 4), (4, 5)]]')
# print(src_data['source_segmentation'])
# print('Expected: [[(0, 2), (2, 5)], [], [(2, 3), (0, 2), (3, 4), (4, 5)]]')
self.assertListEqual(src_data['segmentation'], [[(0, 2), (2, 5)], [], [(0, 1), (1, 3), (3, 4), (4, 5)]])
self.assertListEqual(src_data['source_segmentation'], [[(0, 2), (2, 5)], [], [(2, 3), (0, 2), (3, 4), (4, 5)]])
if __name__ == '__main__':
unittest.main()
| 2,517 | 58.952381 | 181 | py |
marmot | marmot-master/marmot/representations/tests/test_wmt_representation_generator.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import yaml
import os
import shutil
import marmot
from marmot.representations.wmt_representation_generator import WMTRepresentationGenerator
from marmot.experiment.import_utils import build_object
def join_with_module_path(loader, node):
""" define custom tag handler to join paths with the path of the marmot module """
module_path = os.path.dirname(marmot.representations.tests.__file__)
resolved = loader.construct_scalar(node)
return os.path.join(module_path, resolved)
## register the tag handler
yaml.add_constructor('!join', join_with_module_path)
class WMTRepresentationGeneratorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
test_config = os.path.join(module_path, 'test_config.yaml')
with open(test_config, "r") as cfg_file:
self.config = yaml.load(cfg_file.read())
self.wmt_target = os.path.join(module_path, 'test_data/EN_ES.tgt_ann.train')
self.wmt_source = os.path.join(module_path, 'test_data/EN_ES.source.train')
self.tmp_dir = os.path.join(module_path, 'tmp_dir')
def tearDown(self):
if os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def test_load_from_config(self):
generator = build_object(self.config['representations']['training'][0])
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
def test_no_saved_files(self):
generator = WMTRepresentationGenerator(self.wmt_target, self.wmt_source)
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
def test_save_files(self):
generator = WMTRepresentationGenerator(self.wmt_target, self.wmt_source, tmp_dir=self.tmp_dir, persist=True)
data_obj = generator.generate()
target = os.path.join(self.tmp_dir, 'EN_ES.tgt_ann.train.target')
tags = os.path.join(self.tmp_dir, 'EN_ES.tgt_ann.train.tags')
source = os.path.join(self.tmp_dir, 'EN_ES.source.train.txt')
self.assertTrue(os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir))
self.assertTrue(os.path.exists(target) and os.path.isfile(target))
self.assertTrue(os.path.exists(tags) and os.path.isfile(tags))
self.assertTrue(os.path.exists(source) and os.path.isfile(source))
if __name__ == '__main__':
unittest.main()
| 2,988 | 39.391892 | 116 | py |
marmot | marmot-master/marmot/representations/tests/test_word_qe_representation_generator.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import yaml
import marmot
from marmot.representations.word_qe_representation_generator import WordQERepresentationGenerator
from marmot.experiment.import_utils import build_object
def join_with_module_path(loader, node):
""" define custom tag handler to join paths with the path of the marmot module """
module_path = os.path.dirname(marmot.representations.tests.__file__)
resolved = loader.construct_scalar(node)
return os.path.join(module_path, resolved)
## register the tag handler
yaml.add_constructor('!join', join_with_module_path)
class WordQERepresentationGeneratorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
test_config = os.path.join(module_path, 'test_config.yaml')
with open(test_config, "r") as cfg_file:
self.config = yaml.load(cfg_file.read())
self.target_file = os.path.join(module_path, 'test_data/dev.target')
self.source_file = os.path.join(module_path, 'test_data/dev.source')
self.tags_file = os.path.join(module_path, 'test_data/dev.target.tags')
def test_generator(self):
generator = WordQERepresentationGenerator(self.source_file, self.target_file, self.tags_file)
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
def test_load_from_config(self):
generator = build_object(self.config['representations']['training'][1])
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
# TODO: test that tokenization happens like we expect
if __name__ == '__main__':
unittest.main()
| 2,265 | 37.40678 | 101 | py |
marmot | marmot-master/marmot/representations/tests/test_alignment_representation_generator.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.representations.word_qe_representation_generator import WordQERepresentationGenerator
from marmot.representations.alignment_representation_generator import AlignmentRepresentationGenerator
class WordQERepresentationGeneratorTests(unittest.TestCase):
def test_generate(self):
main_generator = WordQERepresentationGenerator('test_data/tiny.source_align', 'test_data/tiny.target', 'test_data/tiny.tags')
align_generator = AlignmentRepresentationGenerator('/export/data/varvara/europarl-sys/english_spanish/model/lex.1.f2e', align_model='/export/data/varvara/my_marmot/my_marmot/experiment/tiny_test/europarl_align_model')
data = main_generator.generate()
data = align_generator.generate(data)
self.assertListEqual(data['alignments'], [[0, 1, 2, 3, 4], [None, 0, None, 1, 2], [0, 1, 2, 4, 5]])
if __name__ == '__main__':
unittest.main()
| 958 | 42.590909 | 225 | py |
marmot | marmot-master/marmot/representations/tests/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/representations/tests/test_word_qe_and_pseudo_ref_representation_generator.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import yaml
import marmot
from marmot.representations.word_qe_and_pseudo_ref_representation_generator import WordQEAndPseudoRefRepresentationGenerator
from marmot.experiment.import_utils import build_object
def join_with_module_path(loader, node):
""" define custom tag handler to join paths with the path of the marmot module """
module_path = os.path.dirname(marmot.representations.tests.__file__)
resolved = loader.construct_scalar(node)
return os.path.join(module_path, resolved)
## register the tag handler
yaml.add_constructor('!join', join_with_module_path)
class WordQEAndPseudoRefRepresentationGeneratorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
test_config = os.path.join(module_path, 'test_config.yaml')
with open(test_config, "r") as cfg_file:
self.config = yaml.load(cfg_file.read())
self.target_file = os.path.join(module_path, 'test_data/dev.target')
self.source_file = os.path.join(module_path, 'test_data/dev.source')
self.tags_file = os.path.join(module_path, 'test_data/dev.target.tags')
self.pseudo_ref_file = os.path.join(module_path, 'test_data/dev.pseudo_refs')
def test_generator(self):
generator = WordQEAndPseudoRefRepresentationGenerator(self.source_file, self.target_file,
self.tags_file, self.pseudo_ref_file)
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue('pseudo_ref' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags'])
== len(data_obj['pseudo_ref']))
def test_load_from_config(self):
generator = build_object(self.config['representations']['training'][2])
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue('pseudo_ref' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags'])
== len(data_obj['pseudo_ref']))
if __name__ == '__main__':
unittest.main()
| 2,488 | 40.483333 | 124 | py |
marmot | marmot-master/examples/word_level_quality_estimation/wmt_word_level_experiment.py | from argparse import ArgumentParser
import yaml
import os, sys
import logging
import numpy as np
import marmot
from marmot.experiment import learning_utils
import marmot.experiment.experiment_utils as experiment_utils
from marmot.evaluation.evaluation_metrics import weighted_fmeasure
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
# define custom tag handler to join paths with the path of the word_level module
def join_with_module_path(loader, node):
module_path = os.path.dirname(marmot.__file__)
resolved = loader.construct_scalar(node)
return os.path.join(module_path, resolved)
## register the tag handler
yaml.add_constructor('!join', join_with_module_path)
def main(config):
# load ContextCreators from config file, run their input functions, and pass the result into the initialization function
# init() all context creators specified by the user with their arguments
# import them according to their fully-specified class names in the config file
# it's up to the user to specify context creators which extract both negative and positive examples (if that's what they want)
# Chris - working - we want to hit every token
interesting_tokens = experiment_utils.import_and_call_function(config['interesting_tokens'])
print "INTERESTING TOKENS: ", interesting_tokens
logger.info('The number of interesting tokens is: ' + str(len(interesting_tokens)))
workers = config['workers']
# Note: context creators currently create their own interesting tokens internally (interesting tokens controls the index of the context creator)
logger.info('building the context creators...')
train_context_creators = experiment_utils.build_objects(config['context_creators'])
# get the contexts for all of our interesting words (may be +,- or, multi-class)
logger.info('mapping the training contexts over the interesting tokens in train...')
train_contexts = experiment_utils.map_contexts(interesting_tokens, train_context_creators, workers=workers)
# load and parse the test data
logger.info('mapping the training contexts over the interesting tokens in test...')
test_context_creator = experiment_utils.build_objects(config['testing'])
test_contexts = experiment_utils.map_contexts(interesting_tokens, [test_context_creator])
min_total = config['filters']['min_total']
# filter token contexts based on the user-specified filter criteria
logger.info('filtering the contexts by the total number of available instances...')
train_contexts = experiment_utils.filter_contexts(train_contexts, min_total=min_total)
test_contexts = experiment_utils.filter_contexts(test_contexts, min_total=min_total)
# make sure the test_context and train_context keys are in sync
experiment_utils.sync_keys(train_contexts, test_contexts)
# test_contexts = filter_contexts(test_contexts, min_total=min_total)
assert set(test_contexts.keys()) == set(train_contexts.keys())
# extract the 'tag' attribute into the y-value for classification
# tags may need to be converted to be consistent with the training data
wmt_binary_classes = {u'BAD': 0, u'OK': 1}
train_context_tags = experiment_utils.tags_from_contexts(train_contexts)
train_context_tags = {k: np.array([wmt_binary_classes[v] for v in val]) for k, val in train_context_tags.items()}
test_contexts = experiment_utils.convert_tagset(wmt_binary_classes, test_contexts)
test_tags_actual = experiment_utils.tags_from_contexts(test_contexts)
# all of the feature extraction should be parallelizable
# note that a feature extractor MUST be able to parse the context exchange format, or it should throw an error:
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
feature_extractors = experiment_utils.build_feature_extractors(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_context_features = experiment_utils.token_contexts_to_features_categorical(test_contexts, feature_extractors, workers=workers)
logger.info('mapping the feature extractors over the contexts for train...')
train_context_features = experiment_utils.token_contexts_to_features_categorical(train_contexts, feature_extractors, workers=workers)
# flatten so that we can properly binarize the features
all_values = experiment_utils.flatten(test_context_features.values())
all_values.extend(experiment_utils.flatten(train_context_features.values()))
binarizers = experiment_utils.fit_binarizers(all_values)
test_context_features = {k: [experiment_utils.binarize(v, binarizers) for v in val] for k, val in test_context_features.items()}
train_context_features = {k: [experiment_utils.binarize(v, binarizers) for v in val] for k, val in train_context_features.items()}
# BEGIN LEARNING
classifier_type = experiment_utils.import_class(config['learning']['classifier']['module'])
# train the classifier for each token
classifier_map = learning_utils.token_classifiers(train_context_features, train_context_tags, classifier_type)
# classify the test instances
# TODO: output a file in WMT format
# WORKING - dump the output in WMT format
logger.info('classifying the test instances')
test_predictions = {}
for key, features in test_context_features.iteritems():
try:
classifier = classifier_map[key]
predictions = classifier.predict(features)
test_predictions[key] = predictions
except KeyError as e:
print(key + " - is NOT in the classifier map")
raise
#### put the rest of the code into a separate 'evaluate' function that reads the WMT files
# create the performance report for each word in the test data that we had a classifier for
# TODO: Working - evaluate based on the format
f1_map = {}
for token, predicted in test_predictions.iteritems():
logger.info("Evaluating results for token = " + token)
actual = test_tags_actual[token]
print 'Actual: ', actual
print 'Predicted: ', predicted
logger.info("\ttotal instances: " + str(len(predicted)))
f1_map[token] = weighted_fmeasure(actual, predicted)
logger.info('Printing the map of f1 scores by token: ')
print(f1_map)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
args = parser.parse_args()
config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
config = yaml.load(cfg_file.read())
main(config)
| 6,898 | 48.633094 | 148 | py |
LayerAct | LayerAct-main/ResNet.py | from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import numpy as np
import random
import os
import torch
import torch.nn as nn
from torch import Tensor
def random_seed_set(rs) :
torch.manual_seed(rs)
torch.cuda.manual_seed(rs)
torch.cuda.manual_seed_all(rs)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
np.random.seed(rs)
random.seed(rs)
os.environ["PYTHONHASHSEED"] = str(rs)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
activation,
activation_params,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.act1 = activation(**activation_params)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.act2 = activation(**activation_params)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.act2(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition" https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
activation,
activation_params,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.act1 = activation(**activation_params)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.act2 = activation(**activation_params)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.act3 = activation(**activation_params)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.act3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
activation,
activation_params,
rs,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
random_seed_set(rs)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.act1 = activation(**activation_params)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(activation, activation_params, block, 64, layers[0])
self.layer2 = self._make_layer(activation, activation_params, block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(activation, activation_params, block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(activation, activation_params, block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
activation,
activation_params,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
activation, activation_params, self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
activation,
activation_params,
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
activation,
activation_params,
rs,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
**kwags: Any,
) -> ResNet:
model = ResNet(activation, activation_params, rs, block, layers, **kwags)
return model
def resnet18(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
def resnet32(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, BasicBlock, [3, 4, 6, 3], num_classes=num_classes)
def resnet50(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
def resnet101(activation, activation_params, rs, num_classes) :
return _resnet(activation, activation_params, rs, Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet32' : return resnet32
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
| 11,184 | 33.953125 | 149 | py |
LayerAct | LayerAct-main/test.py | import argparse
import os
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from collections import OrderedDict as OD
from LayerAct import LA_HardSiLU, LA_SiLU
import data_augmentation
from train_validate import validate, validate_10crop
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
elif name == 'resnet20' : return resnet20
elif name == 'resnet32' : return resnet32
elif name == 'resnet44' : return resnet44
def activation_set(name) :
if name == 'relu' : return nn.ReLU
elif name == 'leakyrelu' : return nn.LeakyReLU
elif name == 'prelu' : return nn.PReLU
elif name == 'mish' : return nn.Mish
elif name == 'silu' : return nn.SiLU
elif name == 'hardsilu' : return nn.Hardswish
elif name == 'la_silu' : return LA_SiLU
elif name == 'la_hardsilu' : return LA_HardSiLU
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
def folder_check(path, data_name, model_name) :
path_f = path + data_name + '/'
path_m = path_f + model_name + '/'
if data_name not in os.listdir(path) :
os.makedirs(path_f)
if model_name not in os.listdir(path_f) :
os.makedirs(path_m)
return path_m
random_seed = [11*i for i in range(1, 21)]
#######################################################################################################
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', '-d', default='CIFAR10')
parser.add_argument('--model', '-m', default='resnet20')
parser.add_argument('--activations', '-a', default='relu,leakyrelu,prelu,mish,silu,hardsilu,la_silu,la_hardsilu')
parser.add_argument('--noise', '-n', default='None')
parser.add_argument('--noise_param1', '-np1', default='')
parser.add_argument('--noise_param2', '-np2', default='')
parser.add_argument('--device', default=0, type=int)
parser.add_argument('--crop', default='center')
parser.add_argument('--start_trial', default=1, type=int)
parser.add_argument('--end_trial', default=5, type=int)
parser.add_argument('--alpha', default=1e-5)
parser.add_argument('--batch_size', '-bs', default=128)
parser.add_argument('--num_workers', '-nw', default=16)
parser.add_argument('--data_path', '-dp', default='')
parser.add_argument('--model_path', default='trained_models/')
parser.add_argument('--save_path', default='result/')
parser.add_argument('--resume', default=True, type=bool)
parser.add_argument('--duplicate', default=True, type=bool)
parser.add_argument('--save', default=True, type=bool)
args = parser.parse_args()
activation_list = [a for a in args.activations.split(',')]
device = torch.device('cuda:{}'.format(args.device))
model_path = folder_check(args.model_path, args.data, args.model)
save_path = folder_check(args.save_path, args.data, args.model)
if args.noise == 'gaussian' :
param1, param2 = float(args.noise_param1), float(args.noise_param2)
elif args.noise == 'blur' :
param1 = (int(args.noise_param1.split(',')[0]), int(args.noise_param1.split(',')[1]))
param2 = (int(args.noise_param2.split(',')[0]), int(args.noise_param2.split(',')[1]))
else :
param1, param2 = 0, 0
for activation_name in activation_list :
activation = activation_set(activation_name)
activation_params = {'alpha' : args.alpha} if 'la_' in activation_name else {} # parameter alpha of LayerAct functions for stable training
for trial in range(args.start_trial, args.end_trial+1) :
rs = random_seed[trial-1]
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
cudnn.deterministic = True
cudnn.benchmark = False
file_name = '{}_{}'.format(activation_name, trial)
if args.data == 'CIFAR10' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR10(
args.data_path, args.noise, param1, param2, args.batch_size, args.num_workers, rs
)
in_channel, H, W, out_num = 3, 32, 32, 10
elif args.data == 'CIFAR100' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(
args.data_path, args.noise, param1, param2, args.batch_size, args.num_workers, rs
)
in_channel, H, W, out_num = 3, 32, 32, 100
elif args.data == 'ImageNet' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(
args.data_path, args.noise, param1, param2, args.batch_size, args.num_workers, rs, args.crop
)
in_channel, H, W, out_num = 3, 224, 224, 1000
else :
raise Exception('Dataset should be "CIFAR10", "CIFAR100", and "ImageNet"')
model = model_loader(args.model, activation, activation_params, rs, out_num)
model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
trained = torch.load(model_path + file_name + '.pth.tar', map_location=device)
try :
model.load_state_dict(trained)
except :
trained_ = OD([(k.split('module.')[-1], trained[k]) for k in trained.keys()])
model.load_state_dict(trained_)
if args.crop == '10crop' :
test_loss, test_acc1, test_acc5 = validate_10crop(test_loader, model, criterion, device)
else :
test_loss, test_acc1, test_acc5 = validate(test_loader, model, criterion, device)
print("{} | {} | {} | Test | acc1 {} | acc5 {}".format(args.model, trial, activation_name, test_acc1, test_acc5), end = '\n') | 6,329 | 42.356164 | 149 | py |
LayerAct | LayerAct-main/train_validate.py | import time
from enum import Enum
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
import shutil
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
print(' '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def train(train_loader, model, criterion, optimizer, lr_scheduler, device, iter, output_device=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
if output_device is None :
output_device = device
else :
output_device = torch.device('cuda:{}'.format(output_device))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
data_time.update(time.time() - end)
images = images.to(device, non_blocking=True)
target = target.to(output_device, non_blocking=True)
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
batch_time.update(time.time() - end)
end = time.time()
iter += 1
return iter, lr_scheduler
def validate(val_loader, model, criterion, device, output_device=None):
if output_device is None :
output_device = device
else :
if type(output_device) == int :
output_device = torch.device('cuda:{}'.format(output_device))
else :
output_device = output_device
def run_validate(loader, base_progress=0, topk=(1,5)):
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
i = base_progress + i
images = images.to(device, non_blocking=True)
target = target.to(output_device, non_blocking=True)
output = model(images)
try :
loss = criterion(output, target)
except :
print('i : ', i, ' | output : ', output.device, ' | target : ', target.device)
acc1, acc5 = accuracy(output, target, topk=topk)
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
# switch to evaluate mode
model.eval()
run_validate(val_loader)
return losses.avg, top1.avg, top5.avg
def validate_10crop(val_loader, model, criterion, device):
def run_validate(loader, base_progress=0, topk=(1,5)):
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
i = base_progress + i
if device is not None and torch.cuda.is_available():
images = images.cuda(device, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(device, non_blocking=True)
bs, ncrops, c, h, w = images.size()
images = images.view(-1, c, h, w)
# compute output
output = model(images)
output = output.view(bs, ncrops, -1).mean(1)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=topk)
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
# switch to evaluate mode
model.eval()
run_validate(val_loader)
return losses.avg, top1.avg, top5.avg | 7,326 | 32.153846 | 101 | py |
LayerAct | LayerAct-main/train_parallel.py | import argparse
import time
import os
import sys
import numpy as np
import random
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from LayerAct import LA_HardSiLU, LA_SiLU
import data_augmentation
from train_validate import train, validate
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
elif name == 'resnet20' : return resnet20
elif name == 'resnet32' : return resnet32
elif name == 'resnet44' : return resnet44
def activation_set(name) :
if name == 'relu' : return nn.ReLU
elif name == 'leakyrelu' : return nn.LeakyReLU
elif name == 'prelu' : return nn.PReLU
elif name == 'mish' : return nn.Mish
elif name == 'silu' : return nn.SiLU
elif name == 'hardsilu' : return nn.Hardswish
elif name == 'la_silu' : return LA_SiLU
elif name == 'la_hardsilu' : return LA_HardSiLU
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
def folder_check(path, data_name, model_name) :
path_f = path + data_name + '/'
path_m = path_f + model_name + '/'
if data_name not in os.listdir(path) :
os.makedirs(path_f)
if model_name not in os.listdir(path_f) :
os.makedirs(path_m)
return path_m
random_seed = [11*i for i in range(1, 21)]
#######################################################################################################
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', '-d', default='CIFAR10')
parser.add_argument('--model', '-m', default='resnet20')
parser.add_argument('--activation', '-a', default='relu')
parser.add_argument('--device_ids', default='0')
parser.add_argument('--output_device', default=0, type=int)
parser.add_argument('--crop', default='center')
parser.add_argument('--start_trial', default=1, type=int)
parser.add_argument('--end_trial', default=5, type=int)
parser.add_argument('--alpha', default=1e-1)
parser.add_argument('--batch_size', '-bs', default=256)
parser.add_argument('--num_workers', '-nw', default=16)
parser.add_argument('--learning_rate', '-lr', default=0.1)
parser.add_argument('--momentum', default=0.9)
parser.add_argument('--weight_decay', '-wd', default=0.0001)
parser.add_argument('--max_iter', default=600000)
parser.add_argument('--milestones', default='180000,360000,540000')
parser.add_argument('--data_path', '-dp', default='')
parser.add_argument('--save_path', default='trained_models/')
parser.add_argument('--resume', default="True", type=str)
parser.add_argument('--duplicate', default="False", type=str)
parser.add_argument('--save', default="True", type=str)
args = parser.parse_args()
activation = activation_set(args.activation)
activation_params = {'alpha' : args.alpha} if 'la_' in args.activation else {} # parameter alpha of LayerAct functions for stable training
milestones = [int(m) for m in args.milestones.split(',')]
device_ids = [int(d) for d in args.device_ids.split(',')]
output_device = torch.device('cuda:{}'.format(args.output_device))
save_path = folder_check(args.save_path, args.data, args.model)
resume = True if args.resume == 'True' else False
duplicate = True if args.duplicate == 'True' else False
save = True if args.save == 'True' else False
for trial in range(args.start_trial, args.end_trial+1) :
rs = random_seed[trial-1]
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
cudnn.deterministic = True
cudnn.benchmark = False
file_name = '{}_{}'.format(args.activation, trial)
if not duplicate and '{}.pth.tar'.format(file_name) in os.listdir(save_path) :
sys.exit('Model ({} | {} | {}) exists'.format(args.data, args.model, args.activation))
if args.data == 'CIFAR10' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR10(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 10
elif args.data == 'CIFAR100' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 100
elif args.data == 'ImageNet' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs, args.crop)
in_channel, H, W, out_num = 3, 224, 224, 1000
else :
raise Exception('Dataset should be "CIFAR10", "CIFAR100", and "ImageNet"')
model = model_loader(args.model, activation, activation_params, rs, out_num)
model.to(torch.device('cuda'))
model = nn.DataParallel(model, device_ids=device_ids, output_device=output_device)
criterion = nn.CrossEntropyLoss().to(torch.device('cuda'))
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, last_epoch=0-1)
print('model make', end='\n')
best_model = None
best_acc1 = 0
start_time = time.time()
start_iter = 0
if resume and os.path.isfile(save_path + file_name + '_checkpoint.pth.tar') :
print('Resume', end='\r')
checkpoint = torch.load(save_path + file_name + '_checkpoint.pth.tar', map_location=torch.device('cuda'))
start_iter = checkpoint['iter']
best_acc1 = checkpoint['best_acc1']
best_model = checkpoint['best_model']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['scheduler'])
iter = start_iter
while iter < args.max_iter :
iter, lr_scheduler = train(train_loader, model, criterion, optimizer, lr_scheduler, torch.device('cuda'), iter, output_device=output_device)
val_loss, val_acc1, val_acc5 = validate(val_loader, model, criterion, torch.device('cuda'), output_device=output_device)
train_loss, train_acc1, train_acc5 = validate(train_loader, model, criterion, torch.device('cuda'), output_device=output_device)
t = time.time()
is_best = val_acc1 > best_acc1
best_acc1 = max(val_acc1, best_acc1)
if is_best :
best_model = model.state_dict()
best_iter = iter
print(
'Updated | Iter {}/{} | {}% | {} min | {} min left | Train loss {} | top1 {} | top5 {} | val loss {} | top1 {} | top5 {}'.format(
iter, args.max_iter, round(100*(iter+1)/args.max_iter), round((t-start_time)/60), round((t-start_time)/60*((args.max_iter-iter-1)/(iter+1))),
round(train_loss, 3), round(train_acc1.item(), 3), round(train_acc5.item(), 3),
round(val_loss, 3), round(val_acc1.item(), 3), round(val_acc5.item(), 3)
) + ' '*10, end='\r'
)
save_checkpoint(
{
'iter' : iter + 1,
'time' : t,
'state_dict' : model.state_dict(),
'best_model' : best_model,
'best_acc1' : best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : lr_scheduler.state_dict(),
}, is_best, save_path + file_name + '_checkpoint.pth.tar'
)
if iter > args.max_iter :
break
if save :
torch.save(best_model, '{}.pth.tar'.format(save_path + file_name))
model.load_state_dict(best_model)
test_loss, test_acc1, test_acc5 = validate(test_loader, model, criterion, torch.device('cuda'), output_device=output_device)
print("{} | {} | {} | Test | acc1 {} | acc5 {}".format(args.model, trial, args.activation, test_acc1, test_acc5), end = '\n')
| 8,871 | 42.920792 | 166 | py |
LayerAct | LayerAct-main/ResNet_small.py | import torch.nn as nn
import torch.nn.functional as F
class ResNet(nn.Module):
def __init__(self, activation, activation_params, rs, layers, num_classes):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.norm1 = nn.BatchNorm2d(16)
self.act1 = activation(**activation_params)
self.layers1 = self._make_layer(activation, activation_params, layers[0], 16, 16, 1)
self.layers2 = self._make_layer(activation, activation_params, layers[1], 32, 16, 2)
self.layers3 = self._make_layer(activation, activation_params, layers[2], 64, 32, 2)
self.avgpool = nn.AvgPool2d(8)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, activation, activation_params, layer_count, channels, channels_in, stride):
return nn.Sequential(
ResBlock(activation, activation_params, channels, channels_in, stride),
*[ResBlock(activation, activation_params, channels) for _ in range(layer_count-1)]
)
def forward(self, x):
out = self.conv1(x)
out = self.norm1(out)
out = self.act1(out)
out = self.layers1(out)
out = self.layers2(out)
out = self.layers3(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResBlock(nn.Module):
def __init__(self, activation, activation_params, num_filters, channels_in=None, stride=1):
super(ResBlock, self).__init__()
# uses 1x1 convolutions for downsampling
if not channels_in or channels_in == num_filters:
channels_in = num_filters
self.projection = None
else :
self.projection = IdentityPadding(num_filters, channels_in, stride)
self.conv1 = nn.Conv2d(channels_in, num_filters, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(num_filters)
self.act1 = activation(**activation_params)
self.conv2 = nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(num_filters)
self.act2 = activation(**activation_params)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.projection:
residual = self.projection(x)
out += residual
out = self.act2(out)
return out
# various projection options to change number of filters in residual connection
# option A from paper
class IdentityPadding(nn.Module):
def __init__(self, num_filters, channels_in, stride):
super(IdentityPadding, self).__init__()
# with kernel_size=1, max pooling is equivalent to identity mapping with stride
self.identity = nn.MaxPool2d(1, stride=stride)
self.num_zeros = num_filters - channels_in
def forward(self, x):
out = F.pad(x, (0, 0, 0, 0, 0, self.num_zeros))
out = self.identity(out)
return out
def resnet20(activation, activation_params, rs, num_classes) :
return ResNet(activation, activation_params, rs, [3, 3, 3], num_classes=num_classes)
def resnet32(activation, activation_params, rs, num_classes) :
return ResNet(activation, activation_params, rs, [5, 5, 5], num_classes=num_classes)
def resnet44(activation, activation_params, rs, num_classes) :
return ResNet(activation, activation_params, rs, [7, 7, 7], num_classes=num_classes)
| 3,640 | 36.536082 | 101 | py |
LayerAct | LayerAct-main/LayerAct.py | # importing
import torch
import torch.nn as nn
import warnings
warnings.filterwarnings('ignore')
# function to calculate the layer-direction mean and variance.
def calculate_mean_std_for_forward(inputs, std = True) :
if len(inputs.shape) < 4 :
cal_dim = [1]
else :
cal_dim = [1, 2, 3]
mean = inputs.mean(dim=cal_dim, keepdim=True)
if std :
var = inputs.var(dim=cal_dim, keepdim=True)
return mean, var, cal_dim
else :
return mean, cal_dim
#############################################################
class LA_SiLU(nn.Module) :
"""
# alpha
- float
- the parameter for stability of activation
# save_less
- bool
- if true, do not save mean, variance, standard deviation, and normalized input for "backward" by ctx.save_for_backward()
- if false, save mean, variance, standard deviation, and normalized input for "backward" by ctx.save_for_backward()
"""
def __init__(self, alpha=1e-5, save_less=False) :
super(LA_SiLU, self).__init__()
self.alpha = alpha
self.save_less = save_less
def forward(self, inputs) :
if self.training :
return la_silu.apply(inputs, self.alpha, self.save_less, self.training)
else :
return la_silu.apply(inputs, self.alpha, self.save_less, self.training)
class la_silu(torch.autograd.Function) :
@staticmethod
def forward(ctx, inputs, alpha, save_less, training=True) :
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
if save_less or not training :
z = torch.mul(torch.sigmoid(torch.div(torch.sub(inputs, mean), torch.sqrt(var+alpha))), inputs)
else :
var_ = var+alpha
std = torch.sqrt(var_)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.sigmoid(n)
z = torch.mul(s, inputs)
if training :
ctx.save_less = save_less
ctx.alpha = alpha
if save_less :
ctx.save_for_backward(inputs)
else :
ctx.save_for_backward(inputs, mean, var, std, n, s)
ctx.cal_dim = cal_dim
return z
@staticmethod
def backward(ctx, output_grad):
alpha = ctx.alpha
if ctx.save_less :
inputs, = ctx.saved_tensors
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
std = torch.sqrt(var+alpha)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.sigmoid(n)
else :
inputs, mean, var, std, n, s = ctx.saved_tensors
cal_dim = ctx.cal_dim
inputs_grad = torch.mul(output_grad.clone(), s)
dn = torch.div(
torch.mul(
torch.mul(output_grad.clone(), inputs.clone()),
torch.mul(s, 1-s)
),
std
)
dn = torch.sub(
dn,
torch.add(
torch.mean(dn, dim=cal_dim, keepdim=True),
torch.mul(torch.mean(torch.mul(dn, n), dim=cal_dim, keepdim=True), n)
)
)
inputs_grad = torch.add(inputs_grad, dn)
return inputs_grad, None, None, None
#############################################################
class LA_HardSiLU(nn.Module) :
def __init__(self, alpha=1e-5, save_less=False) :
super(LA_HardSiLU, self).__init__()
self.alpha = alpha
self.save_less = save_less
def forward(self, inputs) :
return la_hardsilu.apply(inputs, self.alpha, self.save_less, self.training)
class la_hardsilu(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, alpha, save_less, training=True):
shape = inputs.shape
device = inputs.device
ones = torch.ones(shape, device=device)
zeros = torch.zeros(shape, device=device)
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
if save_less or not training :
n = torch.div(torch.sub(inputs, mean), torch.sqrt(var+alpha))
z = torch.mul(inputs, torch.where(n<=3, torch.where(n<=-3, zeros.clone(), n/6+0.5), ones.clone()))
else :
var_ = var+alpha
std = torch.sqrt(var_)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.where(n<=-3, zeros.clone(), n/6+0.5)
s = torch.where(n<=3, s, ones.clone())
z = torch.mul(inputs, s)
if training :
ctx.save_less = save_less
if save_less :
ctx.save_for_backward(inputs)
ctx.alpha = alpha
else :
ctx.save_for_backward(inputs, mean, std, n, s)
ctx.cal_dim = cal_dim
return z
@staticmethod
def backward(ctx, output_grad):
if ctx.save_less :
inputs, = ctx.saved_tensors
shape = inputs.shape
device = inputs.device
ones = torch.ones(shape, device=device)
zeros = torch.zeros(shape, device=device)
alpha = ctx.alpha
mean, var, cal_dim = calculate_mean_std_for_forward(inputs)
std = torch.sqrt(var+alpha)
n = torch.div(torch.sub(inputs, mean), std)
s = torch.where(
n<=3,
torch.where(n<=-3, zeros.clone(), n/6+0.5),
ones.clone()
)
else :
cal_dim = ctx.cal_dim
inputs, mean, std, n, s = ctx.saved_tensors
shape = inputs.shape
device = inputs.device
ones = torch.ones(shape, device=device)
zeros = torch.zeros(shape, device=device)
inputs_grad = torch.mul(output_grad.clone(), s)
ds = torch.where(
n<=3,
torch.where(n<=-3, zeros.clone(), ones.clone()/6),
zeros.clone()
)
da = torch.mul(output_grad.clone(), inputs.clone())
dn = torch.div(torch.mul(da, ds), std)
dn = torch.sub(
dn,
torch.add(
torch.mean(dn, dim=cal_dim, keepdim=True),
torch.mul(torch.mean(torch.mul(dn, n), dim=cal_dim, keepdim=True), n)
)
)
inputs_grad = torch.add(inputs_grad, dn)
return inputs_grad, None, None, None
#############################################################
| 6,523 | 32.803109 | 125 | py |
LayerAct | LayerAct-main/data_augmentation.py | import os
import numpy as np
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.model_selection import StratifiedShuffleSplit
import random
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
class AddGaussianNoise(object):
def __init__(self, mean=0, std=1, random_seed=0):
self.std = std
self.mean = mean
self.random_seed = random_seed
def __call__(self, tensor):
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed(self.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class AddPoissonNoise(object):
def __init__(self, random_seed=0):
self.random_seed=random_seed
def __call__(self, tensor):
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed(self.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
vals = len(torch.unique(tensor))
vals = 2**np.ceil(np.log2(vals))
return tensor + torch.poisson(tensor*vals)/float(vals)
def __repr__(self):
return self.__class__.__name__
def CIFAR_transforms(noise, normalize, test, param1, param2, random_seed) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
compose_list = [transforms.ToTensor()]
if noise == 'blur' :
compose_list.append(transforms.GaussianBlur(param1, param2))
elif noise == 'gaussian' :
compose_list.append(AddGaussianNoise(param1, param2, random_seed))
elif noise == 'poisson' :
compose_list.append(AddPoissonNoise(random_seed))
if not test :
compose_list += [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4)]
compose_list.append(normalize)
return transforms.Compose(compose_list)
def load_CIFAR10(data_path, test_noise, param1, param2, batch_size, num_workers, random_seed) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_dataset = torchvision.datasets.CIFAR10(root = data_path, train=True, transform=transforms.ToTensor(), download=False)
test_dataset = torchvision.datasets.CIFAR10(root = data_path, train=False, transform=transforms.ToTensor(), download=False)
imgs = torch.stack([d[0] for d in train_dataset], dim=0).numpy()
mean = [imgs[:, 0, :, :].mean(), imgs[:, 1, :, :].mean(), imgs[:, 2, :, :].mean()]
std = [imgs[:, 0, :, :].std(), imgs[:, 1, :, :].std(), imgs[:, 2, :, :].std()]
normalize = transforms.Normalize(mean=mean, std=std)
train_transforms = CIFAR_transforms('None', normalize, False, param1, param2, random_seed)
test_transforms = CIFAR_transforms(test_noise, normalize, True, param1, param2, random_seed)
train_dataset = torchvision.datasets.CIFAR10(root = data_path, train=True, transform=train_transforms, download=False)
test_dataset = torchvision.datasets.CIFAR10(root = data_path, train=False, transform=test_transforms, download=False)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=random_seed)
indices = list(range(len(train_dataset)))
train_list = [t for _, t in train_dataset]
for train_index, val_index in sss.split(indices, train_list):
train_index = train_index
val_index = val_index
train_sampler = SubsetRandomSampler(train_index)
val_sampler = SubsetRandomSampler(val_index)
pin_memory = True
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
val_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=val_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory = pin_memory
)
return train_loader, val_loader, test_loader
def load_CIFAR100(data_path, test_noise, param1, param2, batch_size, num_workers, random_seed) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_dataset = torchvision.datasets.CIFAR100(root = data_path, train=True, transform=transforms.ToTensor(), download=False)
test_dataset = torchvision.datasets.CIFAR100(root = data_path, train=False, transform=transforms.ToTensor(), download=False)
imgs = torch.stack([d[0] for d in train_dataset], dim=0).numpy()
mean = [imgs[:, 0, :, :].mean(), imgs[:, 1, :, :].mean(), imgs[:, 2, :, :].mean()]
std = [imgs[:, 0, :, :].std(), imgs[:, 1, :, :].std(), imgs[:, 2, :, :].std()]
normalize = transforms.Normalize(mean=mean, std=std)
train_transforms = CIFAR_transforms('None', normalize, False, param1, param2, random_seed)
test_transforms = CIFAR_transforms(test_noise, normalize, True, param1, param2, random_seed)
train_dataset = torchvision.datasets.CIFAR100(root = data_path, train=True, transform=train_transforms, download=False)
test_dataset = torchvision.datasets.CIFAR100(root = data_path, train=False, transform=test_transforms, download=False)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=random_seed)
indices = list(range(len(train_dataset)))
train_list = [t for _, t in train_dataset]
for train_index, val_index in sss.split(indices, train_list):
train_index = train_index
val_index = val_index
train_sampler = SubsetRandomSampler(train_index)
val_sampler = SubsetRandomSampler(val_index)
pin_memory = True
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
val_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=val_sampler,
num_workers=num_workers, pin_memory = pin_memory
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory = pin_memory
)
return train_loader, val_loader, test_loader
def imagenet_transforms(noise, normalize, test, param1, param2, crop='center', random_seed=0) :
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
compose_list = [transforms.ToTensor()]
if noise == 'blur' :
compose_list.append(transforms.GaussianBlur(param1, param2))
elif noise == 'gaussian' :
compose_list.append(AddGaussianNoise(param1, param2))
elif noise == 'poisson' :
compose_list.append(AddPoissonNoise())
if not test :
compose_list += [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), normalize]
else :
if crop == 'random' :
compose_list += [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), normalize]
elif crop == 'center' :
compose_list += [transforms.Resize(256), transforms.CenterCrop(224), normalize]
elif crop == '10crop' :
compose_list = [
transforms.Resize(256), transforms.TenCrop(224),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops])),
]
return transforms.Compose(compose_list)
def load_ImageNet(data_path, test_noise, param1, param2, batch_size, num_workers, random_seed, crop) :
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
train_transforms = imagenet_transforms('None', normalize, False, param1, param2, random_seed=random_seed)
test_transforms = imagenet_transforms(test_noise, normalize, True, param1, param2, crop, random_seed=random_seed)
if crop == '10crop' :
batch_size = 32
else :
batch_size = 256
pin_memory = True
train_dataset = torchvision.datasets.ImageFolder(root = data_path + 'train', transform=train_transforms)
val_dataset = torchvision.datasets.ImageFolder(root = data_path + 'val', transform=test_transforms)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory = pin_memory
)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory
)
return train_loader, val_loader, val_loader
| 9,943 | 39.422764 | 128 | py |
LayerAct | LayerAct-main/train.py | import argparse
import time
import os
import sys
import numpy as np
import random
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from LayerAct import LA_HardSiLU, LA_SiLU
import data_augmentation
from train_validate import train, validate
from ResNet import resnet18, resnet50, resnet101
from ResNet_small import resnet20, resnet32, resnet44
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def resnet_set(name) :
if name == 'resnet18' : return resnet18
elif name == 'resnet50' : return resnet50
elif name == 'resnet101' : return resnet101
elif name == 'resnet20' : return resnet20
elif name == 'resnet32' : return resnet32
elif name == 'resnet44' : return resnet44
def activation_set(name) :
if name == 'relu' : return nn.ReLU
elif name == 'leakyrelu' : return nn.LeakyReLU
elif name == 'prelu' : return nn.PReLU
elif name == 'mish' : return nn.Mish
elif name == 'silu' : return nn.SiLU
elif name == 'hardsilu' : return nn.Hardswish
elif name == 'la_silu' : return LA_SiLU
elif name == 'la_hardsilu' : return LA_HardSiLU
def model_loader(model_name, activation, activation_params, rs, out_num) :
return resnet_set(model_name)(activation, activation_params, rs=rs, num_classes=out_num)
def folder_check(path, data_name, model_name) :
path_f = path + data_name + '/'
path_m = path_f + model_name + '/'
if data_name not in os.listdir(path) :
os.makedirs(path_f)
if model_name not in os.listdir(path_f) :
os.makedirs(path_m)
return path_m
random_seed = [11*i for i in range(1, 21)]
#######################################################################################################
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data', '-d', default='CIFAR10', type=str)
parser.add_argument('--model', '-m', default='resnet20', type=str)
parser.add_argument('--activation', '-a', default='relu', type=str)
parser.add_argument('--device', default=0, type=int)
parser.add_argument('--crop', default='center', type=str)
parser.add_argument('--start_trial', default=1, type=int)
parser.add_argument('--end_trial', default=5, type=int)
parser.add_argument('--alpha', default=1e-5, type=float)
parser.add_argument('--batch_size', '-bs', default=128, type=int)
parser.add_argument('--num_workers', '-nw', default=16, type=int)
parser.add_argument('--learning_rate', '-lr', default=0.1, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--weight_decay', '-wd', default=0.0001, type=float)
parser.add_argument('--max_iter', default=64000, type=int)
parser.add_argument('--milestones', default='32000,48000', type=str)
parser.add_argument('--data_path', '-dp', default='', type=str)
parser.add_argument('--save_path', default='trained_models/', type=str)
parser.add_argument('--resume', default="True", type=str)
parser.add_argument('--duplicate', default="False", type=str)
parser.add_argument('--save', default="True", type=str)
args = parser.parse_args()
activation = activation_set(args.activation)
activation_params = {'alpha' : args.alpha} if 'la_' in args.activation else {} # parameter alpha of LayerAct functions for stable training
milestones = [int(m) for m in args.milestones.split(',')]
device = torch.device('cuda:{}'.format(args.device))
save_path = folder_check(args.save_path, args.data, args.model)
resume = True if args.resume == 'True' else False
duplicate = True if args.duplicate == 'True' else False
save = True if args.save == 'True' else False
for trial in range(args.start_trial, args.end_trial+1) :
rs = random_seed[trial-1]
random.seed(rs)
np.random.seed(rs)
torch.manual_seed(rs)
cudnn.deterministic = True
cudnn.benchmark = False
file_name = '{}_{}'.format(args.activation, trial)
if not duplicate and '{}.pth.tar'.format(file_name) in os.listdir(save_path) :
sys.exit('Model ({} | {} | {}) exists'.format(args.data, args.model, args.activation))
if args.data == 'CIFAR10' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR10(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 10
elif args.data == 'CIFAR100' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs)
in_channel, H, W, out_num = 3, 32, 32, 100
elif args.data == 'ImageNet' :
train_loader, val_loader, test_loader = data_augmentation.load_CIFAR100(args.data_path, 'None', '', '', args.batch_size, args.num_workers, rs, args.crop)
in_channel, H, W, out_num = 3, 224, 224, 1000
else :
raise Exception('Dataset should be "CIFAR10", "CIFAR100", and "ImageNet"')
model = model_loader(args.model, activation, activation_params, rs, out_num)
model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, last_epoch=0-1)
print('model make', end='\n')
best_model = None
best_acc1 = 0
start_time = time.time()
start_iter = 0
if resume and os.path.isfile(save_path + file_name + '_checkpoint.pth.tar') :
print('model resume', end='\n')
checkpoint = torch.load(save_path + file_name + '_checkpoint.pth.tar', map_location=device)
start_iter = checkpoint['iter']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['scheduler'])
iter = start_iter
while iter < args.max_iter :
iter, lr_scheduler = train(train_loader, model, criterion, optimizer, lr_scheduler, device, iter)
val_loss, val_acc1, val_acc5 = validate(val_loader, model, criterion, device)
train_loss, train_acc1, train_acc5 = validate(train_loader, model, criterion, device)
t = time.time()
is_best = val_acc1 > best_acc1
best_acc1 = max(val_acc1, best_acc1)
if is_best :
best_model = model.state_dict()
best_iter = iter
print(
'Updated | Iter {}/{} | {}% | {} min | {} min left | Train loss {} | top1 {} | top5 {} | val loss {} | top1 {} | top5 {}'.format(
iter, args.max_iter, round(100*(iter+1)/(args.max_iter)),
round((t-start_time)/60), round((t-start_time)/60*((args.max_iter-iter-1)/(iter+1))),
round(train_loss, 3), round(train_acc1.item(), 3), round(train_acc5.item(), 3),
round(val_loss, 3), round(val_acc1.item(), 3), round(val_acc5.item(), 3)
) + ' '*10, end='\r'
)
save_checkpoint(
{
'iter' : iter + 1,
'time' : t,
'state_dict' : model.state_dict(),
'best_model' : best_model,
'best_acc1' : best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : lr_scheduler.state_dict(),
}, is_best, save_path + file_name + '_checkpoint.pth.tar'
)
if iter > args.max_iter :
break
torch.save(best_model, '{}.pth.tar'.format(save_path + file_name))
model.load_state_dict(best_model)
test_loss, test_acc1, test_acc5 = validate(test_loader, model, criterion, device)
print("{} | {} | {} | Test | acc1 {} | acc5 {} ".format(args.model, trial, args.activation, test_acc1, test_acc5) + ' '*20, end = '\n')
| 8,519 | 42.469388 | 165 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/createjobs-f1.py | #!/usr/bin/python
# sample perl script to create SGE jobs (sun grid engine)
# for scanning a parameter space
import os
jobname = "F1_" # should be short
name = "" + jobname # name of shell scripts
res = "result_f1-EQLDIV"
submitfile = "submit_" + name + ".sh"
SUBMIT = open(submitfile,'w')
SUBMIT.write("#/bin/bash\n")
pwd=os.getcwd()
#number of epochs
e=10000
regstart = e/4
regend = e-e/20
i = 0
for l1 in [10**(-l1exp/10.0) for l1exp in range(35,60)]:
for l_n in [2,3]:
for normal in ([True] if i > 0 else [False, True]):
epochs = e if normal else 1
result = res + "/" if normal else res + "test/"
base_cmd = ["python src/mlfg_final.py -i ", str(i),
" -f ", result,
" -d data/f1-n-10k-1.dat.gz",
" --extrapol=data/f1-n-5k-1-test.dat.gz",
" --extrapol=data/f1-n-5k-1-2-test.dat.gz",
" --epochs=", str(epochs),
" --l1=", str(l1),
" --layers=", str(l_n),
" --iterNum=1",
" --reg_start=", str(regstart),
" --reg_end=", str(regend),
" -o",
]
cmd= "".join(base_cmd)
script_fname = ((name + str(i)) if normal else name + "000_test") + ".sh"
if normal:
SUBMIT.write("./" + script_fname + "\n")
with open(script_fname, 'w') as FILE:
FILE.write("""\
#!/bin/bash
# %(jobname)s%(i)d
cd %(pwd)s
export OMP_NUM_THREADS=1
export PATH=${HOME}/bin:/usr/bin:${PATH}
if %(cmd)s; then
rm -f %(script_fname)s
else
touch %(script_fname)s.dead
fi
""" % locals())
os.chmod(script_fname,0755)
i += 1
SUBMIT.close()
os.chmod(submitfile,0755)
print("Jobs:" , i)
with open("finished_" + name + ".sh",'w') as FINISHED:
FINISHED.write("#!/bin/bash\nset -e\n" +
'grep "#" $(ls ' + res + '/*.res -1 | head -n 1) >' + res + '/all.dat\n' +
"cat " + res + '/*.res | grep -v "#" >>' + res + '/all.dat\n' +
"cp " + __file__ + ' ' + res + '/\n' +
"rm finished_" + name+ '.sh ' + submitfile + '\n')
os.chmod("finished_" + name + ".sh",0755)
| 2,187 | 28.173333 | 92 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/__init__.py | 0 | 0 | 0 | py |
|
EQL | EQL-master/EQL-DIV-ICML-Python3/src/graph_separate.py | from graphviz import Digraph
import numpy as np
def getEdges(matrix,inputnames,outputnames,thresh=0.1):
edges=[]
it = np.nditer(matrix, flags=['multi_index'])
while not it.finished:
if np.abs(it[0])>thresh:
edges.append((inputnames[it.multi_index[0]],outputnames[it.multi_index[1]],np.round(it[0].item(),2)))
it.iternext()
return edges
def functionGraph1H(classifier,thresh=0.1):
functionGraph(classifier, thresh)
def functionGraph(classifier,thresh=0.1):
n_in,n_out = classifier.n_in, classifier.n_out
try:
shortcuts = classifier.with_shortcuts
except AttributeError:
shortcuts=False
names_in = [ 'x' + str(s) for s in range(1,n_in+1)]
names_out= [ 'y' + str(s) for s in range(1,n_out+1)]
alledges = []
allbiases = []
for l in range(len(classifier.hidden_layers)+1):
if l==0:
inp=names_in
else:
inp = classifier.hidden_layers[l-1].getNodeFunctions()
if l==len(classifier.hidden_layers):
if shortcuts:
inp = np.concatenate([ l.getNodeFunctions() for l in classifier.hidden_layers ])
out = names_out
ps = classifier.output_layer.get_params()
W = ps[0]
b = ps[1]
else:
out = classifier.hidden_layers[l].getWeightCorrespondence()
ps = classifier.hidden_layers[l].get_params()
W = ps[0]
b = ps[1]
alledges.extend(getEdges(W, inp, out ,thresh))
allbiases.extend(list(zip(out,b)))
nodes=list(set([e[0] for e in alledges])) + list(set([e[1] for e in alledges]))
def isArgument(name):
return ':' in name
def arity2node(name,b1, b2):
return '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD PORT="1">''' + b1 + '''</TD>
<TD PORT="2">''' + b2 + '''</TD>
</TR>
<TR><TD COLSPAN="2">''' + name + '''</TD></TR>
</TABLE>>'''
arity2set = set([n.split(':')[0] for n in nodes if isArgument(n)])
arity2 = list(arity2set)
arity1 = list(set([n for n in nodes if not isArgument(n)]) - arity2set)
bias_dict = dict(allbiases)
dot = Digraph(comment='Function Graph')
for n in arity1:
if n in bias_dict:
dot.node(n,str(np.round(bias_dict[n],2)) + '\n' + n.split('-')[0])
else:
dot.node(n,n.split('-')[0])
for n in arity2:
dot.node(n,arity2node(n.split('-')[0],
str(np.round(bias_dict.get(n+ ':1',0),2)),
str(np.round(bias_dict.get(n+ ':2',0),2)) ),shape='plaintext')
for e in alledges:
dot.edge(e[0], e[1], label=str(e[2]))
return dot
| 2,731 | 32.317073 | 113 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/mlp.py | """
Multilayer function graph for system identification
This will simply use regression in the square error with
L1 norm on weights to get a sparse representation
It follows the multilayer perceptron style, but has more complicated
nodes.
.. math:: Each layer is
y(x) = {f^{(1)}(W^{(1)} x), f^{(2)}(W^{(2)} x), .., f^{(k)}(W^{(k)} x), g^{(1)}(W^{(k+1)}x, W^{(k+2)}x) }
We groups the weight matrices W1-Wk etc
"""
import time
import os
import sys
import timeit
import pickle
import getopt
import csv
import numpy as np
import utils
import theano
import theano.tensor as T
#import lasagne.updates as Lupdates
theano.config.floatX = 'float64'
__docformat__ = 'restructedtext en'
def logistic(x):
return 1 / (1 + T.exp(-x))
class LinearRegression(object):
"""Regression layer (linear regression)
"""
def __init__(self, rng, inp, n_in, n_out):
""" Initialize the parameters of the linear regression
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inp: theano.tensor.TensorType
:param inp: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels/outputs lie
"""
# initialize with random weights W as a matrix of shape (n_in, n_out)
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(1.0 / (n_in + n_out)),
high=np.sqrt(1.0 / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
self.W = theano.shared(value=W_values, name='W', borrow=True)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=np.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.output = T.dot(inp, self.W) + self.b
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = inp
self.L1 = abs(self.W).sum()
self.L2_sqr = T.sum(self.W ** 2)
def get_params(self):
paramfun = theano.function(inputs=[], outputs=self.params)
return paramfun()
def set_params(self, newParams):
newb = T.dvector('newb')
newW = T.dmatrix('newW')
param_fun = theano.function(inputs=[newW, newb], outputs=None, updates=[(self.W, newW), (self.b, newb)])
return param_fun(newParams[0], newParams[1])
def get_weights(self):
return self.W.get_value()
def loss(self, y):
"""Return the mean square error of the prediction.
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct value
"""
return T.mean(T.sqr(self.output - y))
class HiddenLayer(object):
def __init__(self, rng, inp, n_in, n_units, layer_idx, W=None, b=None):
"""
Hidden layer of Multi layer network
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inp: theano.tensor.dmatrix
:param inp: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_units: int
:param n_units: number of hidden nodes
"""
self.layer_idx = layer_idx
self.input = inp
n_out = (n_units)
self.n_out = n_out
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# May need other values here.
if W is None:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
node_inputs = T.dot(inp, self.W) + self.b
self.output = T.tanh(node_inputs)
self.params = [self.W, self.b]
self.L1 = abs(self.W).sum()
self.L2_sqr = T.sum(self.W ** 2)
def get_params(self):
paramfun = theano.function(inputs=[], outputs=self.params)
return paramfun()
def set_params(self, newParams):
newb = T.dvector('newb')
newW = T.dmatrix('newW')
param_fun = theano.function(inputs=[newW, newb], outputs=None, updates=[(self.W, newW), (self.b, newb)])
return param_fun(newParams[0], newParams[1])
def get_weights(self):
return self.W.get_value()
class MLP(object):
"""Multi-Layer Function Graph
A multilayer function graph, like a artificial neural network model
that has one layer or more of hidden units and various activations.
"""
def __init__(self, rng, n_in, n_units, n_out, n_layer=1, gradient=None):
"""Initialize the parameters for the multilayer function graph
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_layer: int
:param n_layer: number of hidden layers
:type n_units: int
:param n_units: number of nodes per hidden layer
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
self.input = T.matrix('input') # the data is presented as vector input
self.labels = T.matrix('labels') # the labels are presented as vector of continous values
self.n_layers = n_layer
self.hidden_layers = []
self.params = []
self.n_in = n_in
self.n_out = n_out
for l in range(n_layer):
if l == 0:
layer_input = self.input
n_input = n_in
else:
layer_input = self.hidden_layers[l - 1].output
n_input = self.hidden_layers[l - 1].n_out
hiddenLayer = HiddenLayer(
rng=rng,
inp=layer_input,
n_in=n_input,
n_units=n_units,
layer_idx=l,
)
self.hidden_layers.append(hiddenLayer)
self.params.extend(hiddenLayer.params)
# The linear output layer gets as input the hidden units
# of the hidden layer
self.output_layer = LinearRegression(
rng=rng,
inp=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out
)
self.params.extend(self.output_layer.params)
self.evalfun = theano.function(inputs=[self.input], outputs=self.output_layer.output)
L1_reg = T.dscalar('L1_reg')
L2_reg = T.dscalar('L2_reg')
self.L1 = self.output_layer.L1 + sum([l.L1 for l in self.hidden_layers])
self.L2_sqr = self.output_layer.L2_sqr + sum([l.L2_sqr for l in self.hidden_layers])
self.loss = self.output_layer.loss
self.errors = self.loss
self.cost = (self.loss(self.labels) + L1_reg * self.L1 + L2_reg * self.L2_sqr)
learning_rate = T.dscalar('learning_rate')
updates = []
if gradient is None:
gradient = "sgd"
print("Gradient:", gradient)
if gradient == 'sgd':
gparams = [T.grad(self.cost, param) for param in self.params]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
# elif gradient == 'adam':
# updates = Lupdates.adam(self.cost, self.params, learning_rate)
else:
assert ("unknown gradient " + gradient == False)
self.train_model = theano.function(
inputs=[self.input, self.labels, L1_reg, L2_reg, learning_rate],
outputs=self.cost,
updates=updates,
)
self.test_model = theano.function(
inputs=[self.input, self.labels],
outputs=self.loss(self.labels),
)
self.validate_model = theano.function(
inputs=[self.input, self.labels],
outputs=self.errors(self.labels),
)
def get_params(self):
paramfun = theano.function(inputs=[], outputs=self.params)
return paramfun()
def get_state(self):
return [l.get_params() for l in self.hidden_layers] + [self.output_layer.get_params()]
def set_state(self, newState):
for (s, l) in zip(newState, self.hidden_layers + [self.output_layer]):
l.set_params(s)
def get_active_units(self, thresh=0.1):
# count units with nonzero input * output weights
# in principle one could make a backward scan and identify units without path to the output
total = 0
for layer_idx in range(0, self.n_layers):
layer = self.hidden_layers[layer_idx]
in_weights = layer.get_weights()
out_weights = self.hidden_layers[layer_idx + 1].get_weights() if layer_idx + 1 < self.n_layers \
else self.output_layer.get_weights()
# noinspection PyTypeChecker
in_weight_norm = np.linalg.norm(in_weights, axis=0, ord=1)
# noinspection PyTypeChecker
out_weight_norm = np.linalg.norm(out_weights, axis=1, ord=1)
total += sum(
(out_weight_norm * in_weight_norm) > thresh * thresh)
return total
def get_active_units_old(self, thresh=0.05):
# quick hack: count units with nonzero output weights not counting the inputs
total = 0
for layer_idx in range(1, self.n_layers + 1):
layer = self.hidden_layers[layer_idx] if layer_idx < self.n_layers else self.output_layer
# noinspection PyTypeChecker
out_weight_norm = np.linalg.norm(layer.get_weights(), axis=1, ord=1)
total += sum(out_weight_norm > thresh)
return total
def evaluate(self, input):
return self.evalfun(input)
def test_mlp(datasets, learning_rate=0.01, L1_reg=0.001, L2_reg=0.00, n_epochs=200,
batch_size=20, n_layer=1, n_units=30, classifier=None, init_state=None,
gradient=None, verbose=True, param_store=None, id=None,
validate_every=50, reg_start=0, reg_end=None
):
"""
:type datasets: ((matrix,matrix),(matrix,matrix),(matrix,matrix))
:param datasets: ((train-x,train-y),(valid-x,valid-y),(test-x,test-y))
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
if len(datasets) > 2 and len(datasets[2]) == 2:
test_set_x, test_set_y = datasets[2]
n_test_batches = test_set_x.shape[0] // batch_size
else:
test_set_x = test_set_y = None
n_test_batches = 0
n_train_batches = train_set_x.shape[0] // batch_size
n_valid_batches = valid_set_x.shape[0] // batch_size
inputdim = len(datasets[0][0][0])
outputdim = len(datasets[0][1][0])
if verbose: print("Input/output dim:", (inputdim, outputdim))
if verbose: print("Training set, test set:", (train_set_x.shape[0], test_set_x.shape[0]))
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
rng = np.random.RandomState(int(time.time()) if id is None else id)
if classifier is None:
classifier = MLP(
rng=rng,
n_in=inputdim,
n_units=n_units,
n_out=outputdim,
n_layer=n_layer,
gradient=gradient,
)
if init_state:
classifier.set_state(init_state)
###############
# TRAIN MODEL #
###############
print('... training')
sys.stdout.flush()
# early-stopping parameters
improvement_threshold = 0.99 # a relative improvement of this much is considered significant
best_validation_error = np.inf
this_validation_error = np.inf
best_epoch = 0
test_score = 0.
best_state = classifier.get_state()
start_time = timeit.default_timer()
epoch = 0
done_looping = False
train_errors = []
validation_errors = []
test_errors = []
if param_store is not None:
param_store.append(classifier.get_params())
while (epoch < n_epochs) and (not done_looping):
epoch += 1
reg_factor = 0.0
if reg_start < epoch <= reg_end:
reg_factor = 1.0
for minibatch_index in range(n_train_batches):
index = minibatch_index
minibatch_avg_cost = classifier.train_model(
input=train_set_x[index * batch_size: (index + 1) * batch_size],
labels=train_set_y[index * batch_size: (index + 1) * batch_size],
L1_reg=L1_reg * reg_factor,
L2_reg=L2_reg * reg_factor,
learning_rate=learning_rate,
)
# if verbose:
# print('epoch %i, minibatch %i cost: %f' %(epoch, minibatch_index, minibatch_avg_cost))
# if(minibatch_avg_cost>2):
# np.set_printoptions(precision=4,suppress=True)
# print(classifier.get_params())
train_errors.append([epoch, minibatch_avg_cost])
if param_store is not None:
param_store.append(classifier.get_params())
# perform validation
if epoch == 1 or epoch % validate_every == 0 or epoch == n_epochs:
this_validation_errors = [classifier.validate_model(
input=valid_set_x[index * batch_size:(index + 1) * batch_size],
labels=valid_set_y[index * batch_size:(index + 1) * batch_size])
for index in range(n_valid_batches)]
this_validation_error = np.asscalar(np.mean(this_validation_errors))
validation_errors.append([epoch, this_validation_error])
if verbose:
print((
'epoch %i, minibatch %i/%i, minibatch_avg_cost %f validation error %f' %
(
epoch,
minibatch_index + 1,
n_train_batches,
minibatch_avg_cost,
this_validation_error
)
))
# test it on the test set
if test_set_x is not None:
test_losses = [classifier.test_model(
input=test_set_x[index * batch_size:(index + 1) * batch_size],
labels=test_set_y[index * batch_size:(index + 1) * batch_size])
for index in range(n_test_batches)]
this_test_score = np.asscalar(np.mean(test_losses))
test_errors.append([epoch, this_test_score])
else:
this_test_score = np.inf
# if we got the best validation score until now
if this_validation_error < best_validation_error:
if this_validation_error < best_validation_error * improvement_threshold:
best_state = classifier.get_state()
best_validation_error = this_validation_error
best_epoch = epoch
test_score = this_test_score
if verbose:
print(((' epoch %i, minibatch %i/%i, test error of '
'best model %f') %
(epoch, minibatch_index + 1, n_train_batches,
test_score)))
if epoch % 1000 == 0:
print("Epoch: ", epoch, " Best val error: ", best_validation_error)
sys.stdout.flush()
end_time = timeit.default_timer()
time_required = (end_time - start_time) / 60.
print((('Optimization complete. Best validation score of %f '
'obtained at epoch %i, with test performance %f ') %
(best_validation_error, best_epoch + 1, test_score)))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % time_required), file=sys.stderr)
if verbose:
np.set_printoptions(precision=4, suppress=True)
print((classifier.get_params()))
return {'train_losses': np.asarray(train_errors),
'val_errors': np.asarray(validation_errors),
'test_errors': np.asarray(test_errors),
'classifier': classifier,
'test_score': test_score,
'val_score': this_validation_error,
'best_val_score': best_validation_error,
'best_epoch': best_epoch,
'best_state': best_state,
'num_active': classifier.get_active_units(),
'runtime': time_required
}
def usage():
print((sys.argv[0] + "[-i id -d dataset -p extrapolationdataset -l layers -e epochs -n nodes" +
" -r learningrate --l1=l1reg --l2=l2reg --shortcut --resfolder" +
" --gradient=sgd|adam --initfile=statefile -v -o]"))
if __name__ == "__main__":
dataset_file = None
extra_pol_test_sets = []
extra_pols = []
n_epochs = 1200
n_layers = 3
n_nodes = 10
batch_size = 20
init_file = None
init_state = None
gradient = "sgd"
L1_reg = 0.00001
L2_reg = 0.00001
learning_rate = 0.01
reg_start = 0
reg_end = None
output = False
verbose = 0
id = np.random.randint(0, 1000000)
result_folder = "./"
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:i:d:p:l:e:n:f:co",
["help", "verbose=", "id=", "dataset=", "extrapol=", "layers=", "epochs=",
"nodes=", "l1=", "l2=", "lr=", "resfolder=",
"batchsize=", "initfile=", "gradient=",
"reg_start=", "reg_end=", "output"
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-v", "--verbose"):
verbose = int(arg)
elif opt in ("-i", "--id"):
id = int(arg)
elif opt in ("-d", "--dataset"):
dataset_file = arg
elif opt in ("-p", "--extrapol"):
extra_pol_test_sets.append(arg)
elif opt in ("-l", "--layers"):
n_layers = int(arg)
elif opt in ("-e", "--epochs"):
n_epochs = int(arg)
elif opt in ("--batchsize"):
batch_size = int(arg)
elif opt in ("--l1"):
L1_reg = float(arg)
elif opt in ("--l2"):
L2_reg = float(arg)
elif opt in ("--lr"):
learning_rate = float(arg)
elif opt in ("-n", "--nodes"):
n_nodes = int(arg)
elif opt in ("-c", "--shortcut"):
with_shortcuts = True
elif opt in ("--initfile"):
init_file = arg
elif opt in ("--gradient"):
gradient = arg
elif opt in ("--reg_start"):
reg_start = int(arg)
elif opt in ("--reg_end"):
reg_end = int(arg)
elif opt in ("-o", "--output"):
output = True
elif opt in ("-f", "--resfolder"):
result_folder = arg
# load dataset
if not dataset_file:
print("provide datasetfile!")
usage()
exit(1)
dataset = utils.load_data(dataset_file)
# load extrapolation test
if len(extra_pol_test_sets) > 0:
if verbose > 0:
print("do also extrapolation test(s)!")
extra_pols = [utils.load_data(test_set) for test_set in extra_pol_test_sets]
if init_file:
with open(init_file, 'rb') as f:
init_state = pickle.load(f)
print("load initial state from file " + init_file)
if not os.path.exists(result_folder):
os.makedirs(result_folder)
name = result_folder + str(id)
result = test_mlp(datasets=dataset, n_epochs=n_epochs, verbose=verbose > 0, learning_rate=learning_rate,
L1_reg=L1_reg, L2_reg=L2_reg, n_layer=n_layers, n_units=n_nodes, id=id,
gradient=gradient, batch_size=batch_size, init_state=init_state,
reg_start=reg_start, reg_end=reg_end
)
classifier = result['classifier']
with open(name + '.best_state', 'wb') as f:
pickle.dump(result['best_state'], f, protocol=pickle.HIGHEST_PROTOCOL)
with open(name + '.last_state', 'wb') as f:
pickle.dump(classifier.get_state(), f, protocol=pickle.HIGHEST_PROTOCOL)
extra_scores = []
extra_scores_best = []
for extra in extra_pols:
extra_set_x, extra_set_y = extra[0]
extra_scores.append(classifier.test_model(input=extra_set_x, labels=extra_set_y))
# also for best_state
classifier.set_state(result['best_state'])
for extra in extra_pols:
extra_set_x, extra_set_y = extra[0]
extra_scores_best.append(classifier.test_model(input=extra_set_x, labels=extra_set_y))
result_line = ""
with open(name + '.res', 'w') as f:
if (id == 0):
f.write('#C layers epochs nodes lr L1 L2 batchsize regstart regend' +
' id dataset gradient numactive bestepoch runtime' +
"".join([' extrapol' + str(i) for i in range(1, len(extra_scores) + 1)]) +
"".join([' extrapolbest' + str(i) for i in range(1, len(extra_scores_best) + 1)]) +
' valerror valerrorbest testerror\n')
f.write('# extra datasets: ' + " ".join(extra_pol_test_sets) + '\n')
result_line = [str(n_layers), str(n_epochs), str(n_nodes), str(learning_rate), str(L1_reg), str(L2_reg),
str(batch_size), str(reg_start), str(reg_end),
str(id), dataset_file, gradient,
str(result['num_active']), str(result['best_epoch']),
str(result['runtime'])] + \
[str(e) for e in extra_scores] + \
[str(e) for e in extra_scores_best] + \
[str(result['val_score']), str(result['best_val_score']), str(result['test_score'])]
f.write(str.join('\t', result_line) + '\n')
with open(name + '.validerror', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "val_error"]])
a.writerows([["# "] + result_line])
a.writerows(result['val_errors'])
if output:
with open(name + '.trainloss', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "train_loss"]])
a.writerows([["# "] + result_line])
a.writerows(result['train_losses'])
if len(result['test_errors']) > 0:
with open(name + '.testerrors', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "test_error"]])
a.writerows([["# "] + result_line])
a.writerows(result['test_errors'])
| 22,132 | 31.500734 | 110 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/utils.py | """
Utility functions
"""
import csv
import numpy as np
import theano
from itertools import chain
import os
import gzip
import pickle
#import dill
__docformat__ = 'restructedtext en'
def softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def relative_prob(x):
e_x = (x - np.min(x))
out = e_x / e_x.sum()
return out
def sample_from_dist(pdf,rnd=None):
return samples_from_dist(pdf, 1, rnd)[0]
def samples_from_dist(pdf,n=1,rnd=None):
if rnd is None:
return np.random.choice(len(pdf),n,p=pdf)
else:
return rnd.choice(len(pdf),n,p=pdf)
def samples_distinct_from_dist(pdf,n=1,rnd=None):
samples=list(set(samples_from_dist(pdf,3*n,rnd)))
if len(samples)<n:
samples=list(set(samples_from_dist(pdf,50*n,rnd)))
if len(samples)<n:
return samples
else:
return samples[:n]
def is_sequence(obj):
return hasattr(obj, '__len__') and hasattr(obj, '__getitem__')
def flatten(l):
return list(chain.from_iterable(l))
def normalize(vec):
n = np.linalg.norm(vec)
if n > 0:
return vec / n
else:
return vec
def add_diagonal_limit(mat, val, max_size):
di = np.diag_indices(min(min(mat.shape), max_size), mat.ndim)
mat[di] += val
def wrap_mat_to_vec_func_3(func, *args):
return lambda p1, p2, p3: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
np.expand_dims(p3, axis=0), *args)
def wrap_mat_to_vec_func_3_0(func, *args):
return lambda p1, p2, p3: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
np.expand_dims(p3, axis=0), *args)[0]
def wrap_mat_to_vec_func_2(func, *args):
return lambda p1, p2: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
*args)
def wrap_mat_to_vec_func_2_0(func, *args):
return lambda p1, p2: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
*args)[0]
def cast_dataset_to_floatX(data_xy):
data_x, data_y = data_xy
return np.asarray(data_x,dtype=theano.config.floatX),np.asarray(data_y,dtype=theano.config.floatX)
def cast_to_floatX(array):
return np.asarray(array, dtype=theano.config.floatX)
def load_from_hold(name):
assert (not name is None)
if os.path.exists(name):
with open(name, 'rb') as f:
data = pickle.load(f, encoding='latin1')
print("loaded data from the file " + name)
else:
print("Initialising with an empty list")
data = []
return data
def dump_for_hold(data, name):
print("Dumping:", name)
assert (not name is None) and (not data is None)
with open(name , 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path):
dataset = new_path
if not os.path.isfile(dataset):
print('cannot find dataset', dataset)
return
print('... loading data ' + dataset)
# Load the dataset
f = gzip.open(dataset, 'rb')
# try:
datasets = pickle.load(f, encoding='latin1')
# except ImportError:
# f.close()
# f = gzip.open(dataset, 'rb')
# datasets = dill.load(f)
f.close()
return datasets
def splitDataSet(inputs, outputs):
assert len(inputs) == len(outputs)
size = len(inputs)
ts = size * 80 / 100
vs = size * 10 / 100
train_set = (inputs[:ts], outputs[:ts])
valid_set = (inputs[ts:ts + vs], outputs[ts:ts + vs])
test_set = (inputs[ts + vs:], outputs[ts + vs:])
return train_set, valid_set, test_set
def splitDataSetShuffle(inputs, outputs,percent_val_test=10):
assert len(inputs) == len(outputs)
size = len(inputs)
shuffle = np.random.permutation(size)
inps = np.asarray(inputs)[shuffle]
outs = np.asarray(outputs)[shuffle]
ts = size * (100-2*percent_val_test) / 100
vs = size * percent_val_test / 100
train_set = (inps[:ts], outs[:ts])
valid_set = (inps[ts:ts + vs], outs[ts:ts + vs])
test_set = (inps[ts + vs:], outs[ts + vs:])
return train_set, valid_set, test_set
def splitDataSetNoTest(inputs,outputs):
assert len(inputs) == len(outputs)
size=len(inputs)
ts=size*90/100
train_set=(inputs[:ts],outputs[:ts])
valid_set=(inputs[ts:],outputs[ts:])
return train_set, valid_set
def addTestSet(train_val,test_set):
return train_val[0], train_val[1], test_set
# cuts dataset into those where the input vectors that have a maxnorm smaller or equal to cut and the rest
def cutDataSet(inputs,outputs, cut):
sel = np.linalg.norm(inputs,ord=np.inf,axis=1) <= cut
# sel = np.array([not(all(x<=cut) and all(x>=-cut)) for x in inputs])
return (inputs[sel], outputs[sel]),(inputs[np.logical_not(sel)], outputs[np.logical_not(sel)])
def splitTrainValSets(inputs,outputs,cut):
data_full=splitDataSetNoTest(inputs,outputs)
(train_all,val_all) = data_full
dat_sel = cutDataSet(train_all[0],train_all[1], cut)
return data_full, dat_sel
def addNoise(data, size):
noise = np.random.normal(0,size,data.shape)
return data + noise
def loadState(filename):
with open(filename, "rb") as f:
return pickle.load(f, encoding='latin1')
def readCSVTable(filename, dtype='|S40'):
data = []
comments = []
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for row in reader:
if row[0][0] != '#':
data.append(row)
else:
comments.append(row)
return (np.asarray(data, dtype=dtype), comments)
def getIdx(header, colname): return np.asscalar(np.where(np.asarray(header) == colname)[0])
| 6,415 | 27.264317 | 106 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/graph.py | from graphviz import Digraph
import numpy as np
def getEdges(matrix,inputnames,outputnames,thresh=0.1):
edges=[]
it = np.nditer(matrix, flags=['multi_index'])
while not it.finished:
if np.abs(it[0])>thresh:
edges.append((inputnames[it.multi_index[0]],outputnames[it.multi_index[1]],np.round(it[0].item(),2)))
it.iternext()
return edges
def functionGraph1H(classifier,thresh=0.1):
functionGraph(classifier, thresh)
def functionGraph(classifier,thresh=0.1):
n_in,n_out = classifier.n_in, classifier.n_out
try:
shortcuts = classifier.with_shortcuts
except AttributeError:
shortcuts=False
names_in = [ 'x' + str(s) for s in range(1,n_in+1)]
names_out= [ 'y' + str(s) for s in range(1,n_out+1)]
alledges = []
allbiases = []
for l in range(len(classifier.hidden_layers)+1):
if l==0:
inp=names_in
else:
inp = classifier.hidden_layers[l-1].getNodeFunctions()
if l==len(classifier.hidden_layers):
if shortcuts:
inp = np.concatenate([ l.getNodeFunctions() for l in classifier.hidden_layers ])
out = names_out
ps = classifier.output_layer.get_params()
W = ps[0]
b = ps[1]
else:
out = classifier.hidden_layers[l].getWeightCorrespondence()
ps = classifier.hidden_layers[l].get_params()
W = ps[0]
b = ps[1]
alledges.extend(getEdges(W, inp, out ,thresh))
allbiases.extend(list(zip(out,b)))
nodes=list(set([e[0] for e in alledges])) + list(set([e[1] for e in alledges]))
def isArgument(name):
return ':' in name
def arity2node(name,b1, b2):
return '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD PORT="1">''' + b1 + '''</TD>
<TD PORT="2">''' + b2 + '''</TD>
</TR>
<TR><TD COLSPAN="2">''' + name + '''</TD></TR>
</TABLE>>'''
arity2set = set([n.split(':')[0] for n in nodes if isArgument(n)])
arity2 = list(arity2set)
arity1 = list(set([n for n in nodes if not isArgument(n)]) - arity2set)
bias_dict = dict(allbiases)
dot = Digraph(comment='Function Graph')
for n in arity1:
if n in bias_dict:
dot.node(n,str(np.round(bias_dict[n],2)) + '\n' + n.split('-')[0])
else:
dot.node(n,n.split('-')[0])
for n in arity2:
dot.node(n,arity2node(n.split('-')[0],
str(np.round(bias_dict.get(n+ ':1',0),2)),
str(np.round(bias_dict.get(n+ ':2',0),2)) ),shape='plaintext')
for e in alledges:
dot.edge(e[0], e[1], label=str(e[2]))
return dot
| 2,731 | 32.317073 | 113 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/mlfg_final.py | """
Multilayer function graph for system identification.
This is able to learn typical algebraic expressions with
maximal multiplicative/application term length given by the number of layers.
TWe use regression with square error and
L1 norm on weights to get a sparse representations.
It follows the multilayer perceptron style, but has more complicated
nodes.
.. math:: Each layer is
y(x) = {f^{(1)}(W^{(1)} x), f^{(2)}(W^{(2)} x), .., f^{(k)}(W^{(k)} x), g^{(1)}(W^{(k+1)}x, W^{(k+2)}x) }
The linear output layer may receive all outputs from previous layers (with shortcuts)
We groups the weight matrices W1-Wk etc
"""
import time
import sys
import timeit
import getopt
import random
import numpy
import theano.tensor as T
from theano import In
from theano.ifelse import ifelse
import lasagne.updates as Lupdates
# if problems with importing
# http://stackoverflow.com/questions/36088609/python-lasagne-importerror-cannot-import-batchnormlayer
from collections import OrderedDict
from utils import *
#from theano import config #remove
#config.floatX = 'float64' #remove
__docformat__ = 'restructedtext en'
class LinearRegression(object):
"""Regression layer (linear regression) #Add division over here
"""
def __init__(self, rng, inp, n_in, n_out, div_thresh, W=None, b=None):
""" Initialize the parameters of the linear regression
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inp: theano.tensor.TensorType
:param inp: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels/outputs lie
"""
# attention: Formula : x*W + b where x is a row vector
if W is None:
# initialize with random weights W as a matrix of shape (n_in, n_out)
W_values = numpy.asarray(
# rng.uniform(low=-numpy.sqrt(1.0 / (n_in + n_out)),high=numpy.sqrt(1.0 / (n_in + n_out)),
# size=(n_in, n_out)),
rng.normal(loc=0, scale=numpy.sqrt(1.0 / (n_in + 2*n_out)), size=(n_in, 2*n_out)),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
# initialize the biases b as a vector of 2 times n_out 1s (we use one to implement a more linear activation at init time)
b = theano.shared(value=numpy.ones((2*n_out,), dtype=theano.config.floatX), name='b', borrow=True)
self.W = W
self.b = b
node_inputs = T.dot(inp, self.W) + self.b
# node_inputs is composed of input 1 and input 2 after another
# input1 = node_inputs[0:n_out]; input2 = node_inputs[n_out:2*n_out]
numerator = node_inputs[:, 0:n_out]
denominator = node_inputs[:, n_out:2*n_out]
self.output = self.activation(denominator, div_thresh) * numerator
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = inp
self.L1 = abs(self.W).sum() + 0.01*abs(self.b).sum()
self.L2_sqr = T.sum(self.W ** 2) + 0.01*T.sum(self.b**2)
self.penalty = T.sum((div_thresh - denominator)*(denominator < div_thresh))
self.extrapol_loss = T.sum((abs(self.output)-100)*(abs(self.output)>100) + (div_thresh - denominator)*(denominator < div_thresh))
def activation(self,x,thresh):
return T.switch(x < thresh, 0.0, 1.0/x )
def get_params(self):
param_fun = theano.function(inputs=[], outputs=self.params)
return [np.asarray(p) for p in param_fun()]
def set_params(self, newParams):
newb = T.vector('newb')
newW = T.matrix('newW')
param_fun = theano.function(inputs=[newW, newb], outputs=None, updates=[(self.W, newW), (self.b, newb)])
return param_fun(newParams[0], newParams[1])
def get_state(self):
return self.get_params()
def set_state(self, newState):
self.set_params(newState)
def get_weights(self):
w_fun = theano.function(inputs=[], outputs=self.W)
return w_fun()
def set_out_weights(self, row, vec): # (row)
r = T.iscalar('row')
new = T.vector('new')
up_fun = theano.function(inputs=[r, new], outputs=self.W, updates=[(self.W, T.set_subtensor(self.W[r, :], new))])
up_fun(row, vec)
def loss(self, y):
"""Return the mean square error of the prediction.
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct value
"""
return T.mean(T.sqr(self.output - y))
class FGLayer(object):
def __init__(self, rng, inp, n_in, n_per_base, layer_idx,
basefuncs1=None, basefuncs2=None, W=None, b=None):
"""
Hidden layer of Multi layer function graph: units are fully-connected and have
the functions given by basefunc1 (arity 1) and basefunc2 (arity 2).
Weight matrix W is of shape (n_in+1,#f1*n_per_base+2*#f2*n_per_base),
where #f1=size(basefunc1), #f2=size(basefunc2)
output is computed as: basefunc1[i](dot(input,W))
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inp: theano.tensor.matrix
:param inp: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_per_base: int
:param n_per_base: number of nodes per basis function
:type basefuncs1: [int]
:param basefuncs1: index of base functions of arity 1 to use (may contain dupplicates)
(list: [sin cos logistic identity])
:type basefuncs2: [int]
:param basefuncs2: index of base functions to arity 2 to use (may contain dupplicates)
(list: [mult condition])
"""
#TODO: get rid of rectlin and div2
if basefuncs1 is None:
basefuncs1 = [0, 1, 2]
if basefuncs2 is None:
basefuncs2 = [0]
self.basefuncs1 = basefuncs1
self.basefuncs2 = basefuncs2
self.basefuncs1_uniq = list(set(basefuncs1))
self.n_basefuncs1_uniq = len(self.basefuncs1_uniq)
self.n_per_base = n_per_base
self.funcs1 = ['id', 'sin', 'cos']
self.funcs2 = ['mult']
self.layer_idx = layer_idx
self.input = inp
self.n_base1 = len(basefuncs1)
self.n_base2 = len(basefuncs2)
n_out = (self.n_base1 + self.n_base2) * n_per_base
n_w_out = (self.n_base1 + 2 * self.n_base2) * n_per_base
self.n_out = n_out
# attention: Formula : g(x*W + b) where x is a row vector
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# May need other values here.
if W is None:
W_values = numpy.asarray(
#rng.uniform(low=-numpy.sqrt(1. / (n_in + n_w_out)), high=numpy.sqrt(1. / (n_in + n_w_out)),
# size=(n_in, n_w_out)),
rng.normal(loc=0, scale=numpy.sqrt(1.0 / (n_in + n_w_out)), size=(n_in, n_w_out)),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_w_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
node_inputs = T.dot(inp, self.W) + self.b
# node_inputs.reshape((notes.shape[0],n_base1+2*n_base2,n_per_base))
z = node_inputs[:, :n_per_base * self.n_base1]
z1 = node_inputs[:, n_per_base * self.n_base1:n_per_base * (self.n_base1 + self.n_base2)]
z2 = node_inputs[:, n_per_base * (self.n_base1 + self.n_base2):]
node_type1_values = numpy.asarray(numpy.repeat(basefuncs1, n_per_base), dtype=np.int32)
self.nodes_type1 = theano.shared(value=node_type1_values,name='node_type1', borrow=False)
node_type2_values = numpy.asarray(numpy.repeat(basefuncs2, n_per_base), dtype=np.int32)
self.nodes_type2 = theano.shared(value=node_type2_values,name='node_type2', borrow=False)
fun1 = T.switch(T.eq(self.nodes_type1, 0), z, # identity
T.switch(T.eq(self.nodes_type1, 1), T.sin(z), # sine
T.cos(z))) # cosine
# further functions could be maxout, sqrt, exp?
fun2 = T.switch(T.eq(self.nodes_type2, 0), z1 * z2, # multiplication
# T.switch(T.eq(self.note_type2,1), z2 / (1 + T.exp(-z1)), # condition (does not work)
z1)
# StepOp(0.1)(z1) * z2, # if z1<0 then z2 else 0
self.output = T.concatenate([fun1, fun2], axis=1)
# parameters of the model
self.params = [self.W, self.b]
self.L1 = abs(self.W).sum() + 0.01*abs(self.b).sum()
self.L2_sqr = T.sum(self.W ** 2) + 0.01*T.sum(self.b**2)
def get_params(self):
fun = theano.function(inputs=[], outputs=self.params)
return [np.asarray(p) for p in fun()]
def set_params(self, newParams):
self.W.set_value(newParams[0])
self.b.set_value(newParams[1])
def get_state(self):
# fun=theano.function(inputs=[],outputs=[self.nodes_type1,self.nodes_type2])
return self.get_params() + [self.nodes_type1.get_value(), self.nodes_type2.get_value()]
def set_state(self, newState):
self.set_params(newState)
if len(newState) > 2:
self.nodes_type1.set_value(newState[2])
self.nodes_type2.set_value(newState[3])
else:
print("Not full reload: missing node-types")
def get_n_type1(self):
return self.n_base1 * self.n_per_base
def get_n_type2(self):
return self.n_base2 * self.n_per_base
def get_weights(self):
# w_fun=theano.function(inputs=[],outputs=self.W)
return self.W.get_value()
def get_in_weights(self, idx): # (column)
node_idx = T.iscalar('node-idx')
w_fun = theano.function(inputs=[node_idx], outputs=self.W[:, node_idx])
return w_fun(idx)
def set_out_weights(self, row, vec): # (row)
r = T.iscalar('row')
new = T.vector('new')
up_fun = theano.function(inputs=[r, new], outputs=self.W, updates=[(self.W, T.set_subtensor(self.W[r, :], new))])
up_fun(row, vec)
def set_in_weights(self, col, vec): # (col)
c = T.iscalar('col')
new = T.vector('new')
up_fun = theano.function(inputs=[c, new], outputs=self.W, updates=[(self.W, T.set_subtensor(self.W[:, c], new))])
up_fun(col, vec)
def get_bias(self, idx):
node_idx = T.iscalar('node-idx')
w_fun = theano.function(inputs=[node_idx], outputs=self.b[node_idx])
return w_fun(idx)
def set_bias(self, idx, value):
node_idx = T.iscalar('node-idx')
new = T.scalar('new')
up_fun = theano.function(inputs=[node_idx, new], outputs=None,
updates=[(self.b, T.set_subtensor(self.b[node_idx], new))])
up_fun(idx, value)
def get_nodes_type1(self):
# n_fun=theano.function(inputs=[], outputs=self.nodes_type1)
return self.nodes_type1.get_value()
def get_nodes_type2(self):
# n_fun=theano.function(inputs=[], outputs=self.nodes_type2)
return self.nodes_type2.get_value()
def get_node_type1(self, idx):
n_fun = theano.function(inputs=[], outputs=self.nodes_type1[idx])
return n_fun()
def set_node_type1(self, idx, typ):
node_idx = T.iscalar('node-idx')
new = T.iscalar('new')
up_fun = theano.function(inputs=[node_idx, new], outputs=None,
updates=[(self.nodes_type1, T.set_subtensor(self.nodes_type1[node_idx], new))])
up_fun(idx, typ)
def getNodeFunctions(self, withnumbers=True):
def name(func, idx):
if withnumbers:
return func + '-' + str(self.layer_idx) + '-' + str(idx)
else:
return func
return [name(self.funcs1[bf], i) for (i, bf) in
zip(list(range(1, len(self.get_nodes_type1()) + 1)), self.get_nodes_type1())] + \
[name(self.funcs2[bf], i) for (i, bf) in
zip(list(range(1, len(self.get_nodes_type2()) + 1)), self.get_nodes_type2())]
def getWeightCorrespondence(self):#here
def name(func, idx):
return func + '-' + str(self.layer_idx) + '-' + str(idx)
return [name(self.funcs1[bf], i) for (i, bf) in
zip(list(range(1, len(self.get_nodes_type1()) + 1)), self.get_nodes_type1())] + \
[name(self.funcs2[bf], i) + ':' + '1' for (i, bf) in
zip(list(range(1, len(self.get_nodes_type2()) + 1)), self.get_nodes_type2())] + \
[name(self.funcs2[bf], i) + ':' + '2' for (i, bf) in
zip(list(range(1, len(self.get_nodes_type2()) + 1)), self.get_nodes_type2())]
class MLFG(object):
"""Multi-Layer Function Graph
A multilayer function graph, like a artificial neural network model
that has one layer or more of hidden units and various activations.
"""
def __init__(self, rng, n_in, n_per_base, n_out, n_layer=1,
basefuncs1=None, basefuncs2=None, gradient=None, with_shortcuts=False):
"""Initialize the parameters for the multilayer function graph
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_layer: int
:param n_layer: number of hidden layers
:type n_per_base: int
:param n_per_base: number of nodes per basis function see FGLayer
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
:type basefuncs1: [int]
:param basefuncs1: see FGLayer
:type basefuncs2: [int]
:param basefuncs2: see FGLayer
:type gradient: string
:param gradient: type of gradient descent algo (None=="sgd+","adagrad","adadelta","nag")
:type with_shortcuts: bool
:param with_shortcuts: whether to use shortcut connections (output is connected to all units)
"""
self.input = T.matrix('input') # the data is presented as vector input
self.labels = T.matrix('labels') # the labels are presented as vector of continous values
self.rng = rng
self.n_layers = n_layer
self.hidden_layers = []
self.params = []
self.n_in = n_in
self.n_out = n_out
self.with_shortcuts = with_shortcuts
self.fixL0=False
for l in range(n_layer):
if l == 0:
layer_input = self.input
n_input = n_in
else:
layer_input = self.hidden_layers[l - 1].output
n_input = self.hidden_layers[l - 1].n_out
hiddenLayer = FGLayer(
rng=rng,
inp=layer_input,
n_in=n_input,
n_per_base=n_per_base,
basefuncs1=basefuncs1,
basefuncs2=basefuncs2,
layer_idx=l,
)
self.hidden_layers.append(hiddenLayer)
self.params.extend(hiddenLayer.params)
div_thresh = T.scalar("div_thresh")
# The linear output layer, either it gets as input the output of ALL previous layers
if self.with_shortcuts:
output_layer_inp = T.concatenate([l.output for l in reversed(self.hidden_layers)], axis=1)
output_layer_n_in = sum([l.n_out for l in self.hidden_layers])
else: # or just of the last hidden layer
output_layer_inp = self.hidden_layers[-1].output
output_layer_n_in = self.hidden_layers[-1].n_out
self.output_layer = LinearRegression(
rng=rng,
inp=output_layer_inp,
n_in=output_layer_n_in,
n_out=n_out,
div_thresh=div_thresh
)
self.params.extend(self.output_layer.params)
self.evalfun = theano.function(inputs=[self.input, In(div_thresh, value=0.0001)], outputs=self.output_layer.output)
L1_reg = T.scalar('L1_reg')
L2_reg = T.scalar('L2_reg')
fixL0 = T.bscalar('fixL0')
self.L1 = self.output_layer.L1 + sum([l.L1 for l in self.hidden_layers])
self.L2_sqr = self.output_layer.L2_sqr + sum([l.L2_sqr for l in self.hidden_layers])
self.penalty = self.output_layer.penalty
self.loss = self.output_layer.loss
self.errors = self.loss
self.cost = (self.loss(self.labels) + L1_reg * self.L1 + L2_reg * self.L2_sqr + self.penalty)
#Extrapol penalty
self.extrapol_cost = self.output_layer.extrapol_loss
learning_rate = T.scalar('learning_rate')
def process_updates(par, newp):
# print par.name
if par.name == "W":
# if fixL0 is True, then keep small weights at 0
'''
TODO: In the L1 phase lets say very small weights they died, Now they would want to surface up in l0 phase and we should allow this.
Maybe we could do the below step once a while in l0 phase?
'''
return par, ifelse(fixL0, T.switch(T.abs_(par) < 0.001, par*0, newp), newp)
return par, newp
print("Gradient:", gradient)
update = None
if gradient=='sgd+' or gradient=='sgd' or gradient==None:
gparams = [T.grad(self.cost, param) for param in self.params]
update = OrderedDict([(param, param - (learning_rate * gparam).clip(-1.0, 1.0))
for param, gparam in zip(self.params, gparams)])
elif gradient=='adam':
update = Lupdates.adam(self.cost, self.params, learning_rate, epsilon=1e-04)
elif gradient == 'adadelta':
update = Lupdates.adadelta(self.cost, self.params,learning_rate)
elif gradient == 'rmsprop':
update = Lupdates.rmsprop(self.cost, self.params,learning_rate)
elif gradient == 'nag':
update = Lupdates.nesterov_momentum(self.cost,self.params,learning_rate)
else:
assert("unknown gradient " + gradient)
#Extrapol gradient computation:
extrapol_updates = Lupdates.adam(self.extrapol_cost, self.params, learning_rate, epsilon=1e-04)
updates = [process_updates(*up) for up in list(update.items())]
self.train_model = theano.function(
inputs=[self.input, self.labels, L1_reg, L2_reg, fixL0, learning_rate, div_thresh],
outputs=self.cost,
updates=updates,
)
self.remove_extrapol_error = theano.function(
inputs=[self.input, learning_rate, div_thresh],
outputs=self.extrapol_cost,
updates=extrapol_updates,
)
self.test_model = theano.function(
inputs=[self.input, self.labels, In(div_thresh, value=0.0001)],
outputs=self.errors(self.labels),
)
self.validate_model = theano.function(
inputs=[self.input, self.labels, In(div_thresh, value=0.0001)],
outputs=self.errors(self.labels),
)
self.L1_loss = theano.function(
inputs=[],
outputs=self.L1,
)
self.MSE = theano.function(
inputs=[self.input, self.labels, In(div_thresh, value=0.0001)],
outputs=self.errors(self.labels),
)
target_input = T.matrix('target_input')
l1_input_reg = T.vector('l1_input_reg')
#TODO: This can go away
############ MINIMAL INTERVENTION ###########
@staticmethod
def vec_norm(vec):
return T.sqrt(T.sum(T.sqr(vec)))
@staticmethod
def vec_normalize(vec):
norm = MLFG.vec_norm(vec)
return vec / (norm + 1e-10)
def get_params(self):
paramfun = theano.function(inputs=[], outputs=self.params)
return paramfun()
def get_state(self):
return [l.get_state() for l in self.hidden_layers] + [self.output_layer.get_state()]
def set_state(self, newState):
for (s, l) in zip(newState, self.hidden_layers + [self.output_layer]):
l.set_state(s)
def evaluate(self, input):
return self.evalfun(cast_to_floatX(input))
def get_n_units_type1(self):
return sum([l.get_n_type1() for l in self.hidden_layers])
def get_n_units_type2(self):
return sum([l.get_n_type2() for l in self.hidden_layers])
# sparsity
'''
total += sum(numpy.fabs(out_weight_norm * in_weight_norm[: layer.get_n_type1() + layer.get_n_type2()]) > thresh * thresh)
'''
def get_num_active_units(self, thresh=0.1):
# count units with nonzero input * output weights
# in principle one could make a backward scan and identify units without path to the output
total = 0
for layer_idx in range(0, self.n_layers):
layer = self.hidden_layers[layer_idx]
in_weights = layer.get_weights()
#bias = layer.get_biasForNumActive()
out_weights = self.hidden_layers[layer_idx + 1].get_weights() if layer_idx + 1 < self.n_layers \
else self.output_layer.get_weights()
# noinspection PyTypeChecker
in_weight_norm = np.linalg.norm(in_weights, axis=0, ord=1)
out_weight_norm = np.linalg.norm(out_weights, axis=1, ord=1)
for i in range(layer.get_n_type2()):
if (in_weight_norm[layer.get_n_type1() + i] > thresh and \
in_weight_norm[layer.get_n_type1() + layer.get_n_type2() + i] > thresh):
in_weight_norm[layer.get_n_type1() + i] += in_weight_norm[layer.get_n_type1() + layer.get_n_type2() + i]
else:
in_weight_norm[layer.get_n_type1() + i] = 0
# noinspection PyTypeChecker
for i in range(layer.get_n_type1()):
if (out_weight_norm[i]*in_weight_norm[i] > thresh*thresh and layer.get_nodes_type1()[i] != 0): #nodes_type1 matrix of 00...011...1x`
total += 1
#print layer_idx, layer.get_nodes_type1()[i], out_weight_norm[i], in_weight_norm[i]
for i in range(layer.get_n_type1(), layer.get_n_type1() + layer.get_n_type2()):
if (out_weight_norm[i]*in_weight_norm[i] > thresh*thresh):
total += 1
#print layer_idx, "mult", out_weight_norm[i], in_weight_norm[i]
return total
def get_active_units_old(self, thresh=0.05):
# quick hack: count units with nonzero output weights not counting the inputs
total = 0
for layer_idx in range(1, self.n_layers + 1):
layer = self.hidden_layers[layer_idx] if layer_idx < self.n_layers else self.output_layer
# noinspection PyTypeChecker
out_weight_norm = np.linalg.norm(layer.get_weights(), axis=1, ord=1)
total += sum(out_weight_norm > thresh)
return total
def test_mlfg(datasets, learning_rate=0.01, L1_reg=0.001, L2_reg=0.00, n_epochs=200,
batch_size=20, n_layer=1, n_per_base=5, basefuncs1=None, basefuncs2=None,
with_shortcuts=False, id=None,
classifier=None,
gradient=None,
init_state=None,
verbose=True, param_store=None,
reg_start=0, reg_end=None,
validate_every=10,
k=100
):
"""
:type datasets: ((matrix,matrix),(matrix,matrix),(matrix,matrix))
:param datasets: ((train-x,train-y),(valid-x,valid-y),(test-x,test-y))
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type param_store: []
:param param_store: if not None then the weights of each episode are stored here
:type id: int
:param id: id of run (also used as random seed. if None then the time is used as seed
:param init_state: initial state for classifier to use
"""
train_set_x, train_set_y = cast_dataset_to_floatX(datasets[0])
valid_set_x, valid_set_y = cast_dataset_to_floatX(datasets[1])
MAX_INPUT_VAL = np.max(abs(train_set_x))
print("Max input value is: ", MAX_INPUT_VAL)
#extra_set_x, extra_set_y = cast_dataset_to_floatX(extrapol_dataset[0]) #0 has to be used and extrapol_dataset[1] has null entry
#extra_set_x has dimensions 5000x4 for cp_new dataset ... verified by the following print statement
#print "extrapol dim: ", len(extra_set_x), len(extra_set_x[0]), len(extra_set_y), len(extra_set_y[0])
if len(datasets) > 2 and len(datasets[2]) == 2:
test_set_x, test_set_y = cast_dataset_to_floatX(datasets[2])
n_test_batches = test_set_x.shape[0] // batch_size
else:
test_set_x = test_set_y = None
n_test_batches = 0
n_train_batches = train_set_x.shape[0] // batch_size
n_valid_batches = valid_set_x.shape[0] // batch_size
inputdim = len(datasets[0][0][0])
outputdim = len(datasets[0][1][0])
if verbose: print("Input/output dim:", (inputdim, outputdim))
if verbose: print("Training set, test set:", (train_set_x.shape[0], test_set_x.shape[0]))
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
rng = numpy.random.RandomState(int(time.time()) if id is None else id)
if classifier is None:
classifier = MLFG(
rng=rng,
n_in=inputdim,
n_per_base=n_per_base,
n_out=outputdim,
n_layer=n_layer,
gradient=gradient,
basefuncs1=basefuncs1,
basefuncs2=basefuncs2,
with_shortcuts=with_shortcuts,
)
if init_state:
classifier.set_state(init_state)
###############
# TRAIN MODEL #
###############
print('... training')
sys.stdout.flush()
# early-stopping parameters
improvement_threshold = 0.99 # a relative improvement of this much is considered significant
best_validation_error = numpy.inf
this_validation_error = numpy.inf
best_epoch = 0
test_score = 0.
best_state = classifier.get_state()
start_time = timeit.default_timer()
epoch = 0
done_looping = False
train_errors = []
extrapol_train_errors = []
validation_errors = []
test_errors = []
MSE = []
L1 = []
if param_store is not None:
param_store.append(classifier.get_params())
while (epoch < n_epochs) and (not done_looping):
#print epoch #remove
special_penalty = 0
epoch = epoch + 1
reg_factor = 0.0
if reg_start < epoch <= reg_end:
reg_factor = 1.0
L1.append([epoch, np.asscalar(classifier.L1_loss())])
if (epoch - reg_start)%k == 0 and epoch < reg_end:
special_penalty = 1
temp = list(zip(list(train_set_x),list(train_set_y)))
random.shuffle(temp)
train_set_x, train_set_y = list(zip(*temp))
train_set_x = numpy.asarray(train_set_x)
train_set_y = numpy.asarray(train_set_y)
del temp[:]
minibatch_avg_cost = 0.0
for minibatch_index in range(n_train_batches):
index = minibatch_index
minibatch_avg_cost += classifier.train_model(
input=train_set_x[index * batch_size: (index + 1) * batch_size],
labels=train_set_y[index * batch_size: (index + 1) * batch_size],
L1_reg=L1_reg * reg_factor,
L2_reg=L2_reg * reg_factor,
fixL0 = epoch > reg_end,
div_thresh = 1.0/np.sqrt(epoch + 1),
learning_rate=learning_rate,
)
if special_penalty == 1:
#max input val would ensure we don't have poles anywhere in twice the interpolation region
n_num, n_in = train_set_x.shape
extra_set_x = (2*np.random.rand(n_num, n_in)-1.0)*MAX_INPUT_VAL
assert extra_set_x.shape == train_set_x.shape
for x in range(n_num):
for y in range(n_in):
if (extra_set_x[x][y] >=0.0):
extra_set_x[x][y] += MAX_INPUT_VAL
else:
extra_set_x[x][y] -= MAX_INPUT_VAL
extrapol_error_training = 0.0
for minibatch_index in range(n_train_batches):
index = minibatch_index
extrapol_error_training += classifier.remove_extrapol_error(
input=extra_set_x[index * batch_size: (index + 1) * batch_size],
div_thresh = 1.0/np.sqrt(epoch + 1),
learning_rate=learning_rate,
)
extrapol_train_errors.append([epoch, extrapol_error_training/n_train_batches])
train_errors.append([epoch, minibatch_avg_cost/n_train_batches])
if param_store is not None:
param_store.append(classifier.get_params())
if epoch == 1 or epoch % validate_every == 0 or epoch == n_epochs:
this_validation_errors = [classifier.validate_model(
input=valid_set_x[index * batch_size:(index + 1) * batch_size],
labels=valid_set_y[index * batch_size:(index + 1) * batch_size])
for index in range(n_valid_batches)]
this_validation_error = np.asscalar(numpy.mean(this_validation_errors))
validation_errors.append([epoch, this_validation_error])
this_MSE = [classifier.MSE(input=train_set_x[index*batch_size:(index + 1)*batch_size],
labels=train_set_y[index*batch_size: (index + 1)*batch_size]) for index in range(n_train_batches)]
MSE.append([epoch, np.asscalar(np.mean(this_MSE))])
if verbose:
print((
'epoch %i, minibatch %i/%i, minibatch_avg_cost %f validation error %f' %
(
epoch,
minibatch_index + 1,
n_train_batches,
minibatch_avg_cost,
this_validation_error
)
))
# test it on the test set
if test_set_x is not None:
test_losses = [classifier.test_model(
input=test_set_x[index * batch_size:(index + 1) * batch_size],
labels=test_set_y[index * batch_size:(index + 1) * batch_size])
for index in range(n_test_batches)]
this_test_score = np.asscalar(numpy.mean(test_losses))
test_errors.append([epoch, this_test_score])
else:
this_test_score = np.inf
# if we got the best validation score until now
if this_validation_error < best_validation_error:
if this_validation_error < best_validation_error * improvement_threshold:
best_state = classifier.get_state()
best_validation_error = this_validation_error
best_epoch = epoch
test_score = this_test_score
if verbose:
print((('epoch %i, minibatch %i/%i, test error of '
'best model %f') %
(epoch, minibatch_index + 1, n_train_batches,
test_score)))
if epoch % 10 == 0:
print("Epoch: ", epoch, "\tBest val error: ", best_validation_error, "\tcurrent val error: ", this_validation_error)
sys.stdout.flush()
end_time = timeit.default_timer()
time_required = (end_time - start_time) / 60.
print((('Optimization complete. Best validation score of %f '
'obtained at epoch %i, with test performance %f ') %
(best_validation_error, best_epoch + 1, test_score)))
print(('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % time_required), file=sys.stderr)
if verbose:
numpy.set_printoptions(precision=4, suppress=True)
print((classifier.get_params()))
return {'train_losses': numpy.asarray(train_errors),
'extrapol_train_losses':numpy.asarray(extrapol_train_errors),
'MSE':numpy.asarray(MSE),
'L1':numpy.asarray(L1),
'val_errors': numpy.asarray(validation_errors),
'test_errors': numpy.asarray(test_errors),
'classifier': classifier,
'test_score': test_score,
'val_score': this_validation_error,
'best_val_score': best_validation_error,
'best_epoch': best_epoch,
'best_state': best_state,
'num_active': classifier.get_num_active_units(),
'runtime': time_required
}
def usage():
print((sys.argv[0] + "[-i id -d dataset -p extrapolationdataset -l layers -e epochs -n nodes -r learningrate --initfile=file --batchsize=k --l1=l1reg --l2=l2reg --shortcut --reg_start=start --reg_end=end --resfolder -v]"))
if __name__ == "__main__":
dataset_file = None
extra_pol_test_sets = []
extra_pols = []
n_epochs = 1200
n_layers = 3
n_nodes = 5
batch_size = 20
init_file = None
init_state = None
gradient = "sgd"
L1_reg = 0.001
L2_reg = 0.001
learning_rate = 0.01
with_shortcuts = False
reg_start = 0
reg_end = None
output = False
verbose = 0
k=99999999999
id = np.random.randint(0, 1000000)
result_folder = "./"
basefuncs1 = [0, 1, 2]
iterNum=0
theano.gof.compilelock.set_lock_status(False)
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:i:d:p:l:e:n:f:co",
["help", "verbose=", "id=", "dataset=", "extrapol=", "layers=", "epochs=",
"nodes=", "l1=", "l2=", "lr=", "resfolder=",
"batchsize=", "initfile=", "gradient=",
"reg_start=", "reg_end=", "shortcut", "output","k_update=","iterNum="
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-v", "--verbose"):
verbose = int(arg)
elif opt in ("-i", "--id"):
id = int(arg)
elif opt in ("-d", "--dataset"):
dataset_file = arg
elif opt in ("-p", "--extrapol"):
extra_pol_test_sets.append(arg)
elif opt in ("-l", "--layers"):
n_layers = int(arg)
elif opt in ("-e", "--epochs"):
n_epochs = int(arg)
elif opt in ("--batchsize"):
batch_size = int(arg)
elif opt in ("--l1"):
L1_reg = float(arg)
elif opt in ("--l2"):
L2_reg = float(arg)
elif opt in ("--lr"):
learning_rate = float(arg)
elif opt in ("-n", "--nodes"):
n_nodes = int(arg)
elif opt in ("-c", "--shortcut"):
with_shortcuts = True
elif opt in ("--initfile"):
init_file = arg
elif opt in ("--gradient"):
gradient= arg
elif opt in ("--reg_start"):
reg_start = int(arg)
elif opt in ("--reg_end"):
reg_end = int(arg)
elif opt in ("-o", "--output"):
output = True
elif opt in ("-f", "--resfolder"):
result_folder = arg
elif opt in ("--iterNum"):
iterNum = int(arg)
elif opt in ("--k_update"):
k = int(arg)
# load dataset
if not dataset_file:
print("provide datasetfile!")
usage()
exit(1)
dataset = load_data(dataset_file)
# load extrapolation test
if len(extra_pol_test_sets) > 0:
if verbose > 0:
print("do also extrapolation test(s)!")
extra_pols = [load_data(test_set) for test_set in extra_pol_test_sets]
if init_file:
with open(init_file, 'rb') as f:
init_state = cPickle.load(f, encoding='latin1')
print("load initial state from file " + init_file)
if not os.path.exists(result_folder):
os.makedirs(result_folder)
name = result_folder + "/" + str(id)
print(("Results go into " + result_folder))
result = test_mlfg(datasets=dataset, k=k, n_epochs=n_epochs, verbose=verbose > 0, learning_rate=learning_rate,
L1_reg=L1_reg, L2_reg=L2_reg, basefuncs2=[0], basefuncs1=basefuncs1, n_layer=n_layers,
n_per_base=n_nodes, id=id, gradient=gradient,
batch_size=batch_size, init_state=init_state,
reg_start=reg_start, reg_end=reg_end, with_shortcuts=with_shortcuts,
)
classifier = result['classifier']
with open(name + '.best_state', 'wb') as f:
cPickle.dump(result['best_state'], f, protocol=cPickle.HIGHEST_PROTOCOL)
with open(name + '.last_state', 'wb') as f:
cPickle.dump(classifier.get_state(), f, protocol=cPickle.HIGHEST_PROTOCOL)
extra_scores = []
extra_scores_best = []
for extra in extra_pols:
extra_set_x, extra_set_y = cast_dataset_to_floatX(extra[0])
extra_scores.append(classifier.test_model(input=extra_set_x, labels=extra_set_y))
# also for best_state
classifier.set_state(result['best_state'])
for extra in extra_pols:
extra_set_x, extra_set_y = cast_dataset_to_floatX(extra[0])
extra_scores_best.append(classifier.test_model(input=extra_set_x, labels=extra_set_y))
result_line = ""
with open(name + '.res', 'w') as f:
if (id <= 0):
f.write('#C k iter layers epochs nodes lr L1 L2 shortcut batchsize regstart regend' +
' id dataset gradient numactive bestnumactive bestepoch runtime' +
"".join([' extrapol' + str(i) for i in range(1, len(extra_scores) + 1)]) +
"".join([' extrapolbest' + str(i) for i in range(1, len(extra_scores_best) + 1)]) +
' valerror valerrorbest testerror\n')
f.write('# extra datasets: ' + " ".join(extra_pol_test_sets) + '\n')
result_line = [str(k), str(iterNum), str(n_layers), str(n_epochs), str(n_nodes), str(learning_rate), str(L1_reg), str(L2_reg),
str(with_shortcuts), str(batch_size), str(reg_start), str(reg_end),
str(id), dataset_file, gradient,
str(result['num_active']), str(classifier.get_num_active_units()),
str(result['best_epoch']), str(result['runtime'])] + \
[str(e) for e in extra_scores] + \
[str(e) for e in extra_scores_best] + \
[str(result['val_score']), str(result['best_val_score']), str(result['test_score'])]
f.write(str.join('\t', result_line) + '\n')
with open(name + '.validerror', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "val_error"]])
a.writerows([["# "] + result_line])
a.writerows(result['val_errors'])
with open(name + '.MSE', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "MSE"]])
a.writerows([["# "] + result_line])
a.writerows(result['MSE'])
with open(name + '.L1', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "L1"]])
a.writerows([["# "] + result_line])
a.writerows(result['L1'])
output=1
if output:
with open(name + '.trainloss', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "train_loss"]])
a.writerows([["# "] + result_line])
a.writerows(result['train_losses'])
if len(result['test_errors']) > 0:
with open(name + '.testerrors', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "test_error"]])
a.writerows([["# "] + result_line])
a.writerows(result['test_errors'])
with open(name + '.extrapoltrainloss', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "extrapol_train_loss"]])
a.writerows([["# "] + result_line])
a.writerows(result['extrapol_train_losses'])
| 35,927 | 33.446788 | 223 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/noise.py | # Copyright (c) 2011 Leif Johnson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Code for generating white and pink noise.'''
import numpy
import numpy.random as rng
import operator
from functools import reduce
def iterwhite():
'''Generate a sequence of samples of white noise.
Generates a never-ending sequence of floating-point values.
'''
while True:
for n in rng.randn(100):
yield n
def iterpink(depth=20):
'''Generate a sequence of samples of pink noise.
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculate the output. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
Generates a never-ending sequence of floating-point values. Any continuous
set of these samples will tend to have a 1/f power spectrum.
'''
values = rng.randn(depth)
smooth = rng.randn(depth)
source = rng.randn(depth)
sum = values.sum()
i = 0
while True:
yield sum + smooth[i]
# advance the index by 1. if the index wraps, generate noise to use in
# the calculations, but do not update any of the pink noise values.
i += 1
if i == depth:
i = 0
smooth = rng.randn(depth)
source = rng.randn(depth)
continue
# count trailing zeros in i
c = 0
while not (i >> c) & 1:
c += 1
# replace value c with a new source element
sum += source[i] - values[c]
values[c] = source[i]
def _asarray(source, shape):
noise = source()
if shape is None:
return next(noise)
count = reduce(operator.mul, shape)
return numpy.asarray([next(noise) for _ in range(count)]).reshape(shape)
def white(shape=None):
'''Generate white noise.
shape: If given, returns a numpy array of white noise with this shape. If
not given, return just one sample of white noise.
'''
return _asarray(iterwhite, shape)
def pink(shape=None, depth=20):
'''Generate an array of pink noise.
shape: If given, returns a numpy array of noise with this shape. If not
given, return just one sample of noise.
depth: Use this many samples of white noise to calculate pink noise. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
'''
return _asarray(lambda: iterpink(depth), shape)
# added by Georg
def pink_zero_mean_std(shape=None, depth=20):
'''Generate an array of pink noise.
shape: If given, returns a numpy array of noise with this shape. If not
given, return just one sample of noise.
depth: Use this many samples of white noise to calculate pink noise. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
'''
dat = _asarray(lambda: iterpink(depth), shape)
return (dat-numpy.mean(dat))/numpy.sqrt(numpy.log2(depth))
if __name__ == '__main__':
from matplotlib import pylab
k = numpy.ones(100.) / 10.
def spectrum(s):
a = abs(numpy.fft.rfft(list(s))) ** 2
return numpy.convolve(a, k, 'valid')
ax = pylab.gca()
w = iterwhite()
ax.loglog(spectrum(next(w) for _ in range(10000)), 'k')
for p, a in enumerate(numpy.logspace(-0.5, 0, 7)):
print(2 ** (p + 1))
p = iterpink(2 ** (p + 1))
ax.loglog(spectrum(next(p) for _ in range(10000)), 'r', alpha=a)
ax.grid(linestyle=':')
ax.set_xlim(10., None)
ax.set_ylim(None, 1e8)
pylab.show() | 4,719 | 31.777778 | 80 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/model_selection_val_sparsity.py | import os, sys
import stat
import numpy as np
from operator import itemgetter
'''
expects a file with one row per network and columns reporting the parameters and sparsity and performance
First line should be the column names, #C col1 col2 col3...
then one additional comments line: # extrapolation datasets etc
A sample file is in example_parameter_scan_result.txt
These are the typical columns is the file.
['k', 'iter', 'layers', 'epochs', 'nodes', 'lr', 'L1', 'L2', 'shortcut', 'batchsize', 'regstart', 'regend',
'id','dataset', 'gradient', 'numactive', 'bestnumactive', 'bestepoch','dups', 'inserts', 'runtime', 'extrapol1', 'extrapol2', 'extrapol3',
'extrapolbest1', 'extrapolbest2', 'extrapolbest3', 'valerror', 'valerrorbest', 'testerror']
'''
def select_instance(file):
value_dict = {}
with open(file ,'r') as file:
k = 0
lines = file.readlines()
keys = lines[0].split()[1:]
extrapolL = [x for x in keys if ("extrapol" in x and not "best" in x)]
for key in keys:
nums = []
for l in lines[2:]: # to remove #the line containing "#extra datasets"
nums.append(l.split()[k])
k += 1
value_dict[key] = nums
#print key , value_dict[key]
lines = 0
e = []
e_mean = []
e_var = []
for i in range(len(value_dict["id"])):
value_dict["id"][int(i)] = int(value_dict["id"][int(i)])
value_dict["nodes"][int(i)] = int(value_dict["nodes"][int(i)])
value_dict["numactive"][int(i)] = float(value_dict["numactive"][int(i)])
value_dict["iter"][int(i)] = int(value_dict["iter"][int(i)])
value_dict["valerror"][int(i)] = float(value_dict["valerror"][int(i)])
# value_dict["valextrapol"][int(i)] = float(value_dict["valextrapol"][int(i)])
for k in extrapolL:
value_dict[k][int(i)] = float(value_dict[k][int(i)])
lines += 1
print("lines: ", lines)
active_ = []
validation_ = []
id_ = []
extrapol_ = []
#extrapol_val = []
for i in range(lines):
validation_.append(value_dict["valerror"][i])
active_.append(value_dict["numactive"][i])
if "extrapol2" in value_dict:
extrapol_.append(value_dict["extrapol2"][i])
id_.append(value_dict["id"][i])
#extrapol_val.append(value_dict["valextrapol"][i])
active = np.asarray(active_)
active = (active-np.min(active))/(np.max(active)-np.min(active)) # normalize
validation = np.asarray(validation_)
validation = (validation-np.min(validation))/(np.max(validation)-np.min(validation)) # normalize
norm_score = np.sqrt(active**2 + validation**2)
# only for information
if len(extrapol_) > 0:
best_extrapol = sorted(zip(id_, extrapol_), key=itemgetter(1))[0]
print((" best extrapolating model: (only for information):", best_extrapol))
score = list(zip(list(norm_score), id_, active_, validation_, extrapol_))
score.sort(key = itemgetter(0))
best_instance = score[0]
print(("selected instance model: score: {} id: {} #active: {}\t val-error: {}\t extra-pol2-error: {}".format(*best_instance)))
# (best_instance[3], score)
return dict(list(zip(['score','id', 'num_active', 'valerror', 'extrapol2'], best_instance)))
| 3,446 | 41.555556 | 138 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/graph_div.py | from graphviz import Digraph
import numpy as np
def getEdges(matrix,inputnames,outputnames,thresh=0.1):
edges=[]
it = np.nditer(matrix, flags=['multi_index'])
while not it.finished:
if np.abs(it[0])>thresh:
edges.append((inputnames[it.multi_index[0]],outputnames[it.multi_index[1]],np.round(it[0].item(),2)))
it.iternext()
return edges
def functionGraph1H(classifier,thresh=0.1):
functionGraph(classifier, thresh)
def functionGraph(classifier,thresh=0.1):
n_in,n_out = classifier.n_in, classifier.n_out
try:
shortcuts = classifier.with_shortcuts
except AttributeError:
shortcuts=False
names_in = [ 'x' + str(s) for s in range(1,n_in+1)]
names_out= [ 'div/y' + str(s) for s in range(1,n_out+1)]
alledges = []
allbiases = []
for l in range(len(classifier.hidden_layers)+1):
if l==0:
inp=names_in
else:
inp = classifier.hidden_layers[l-1].getNodeFunctions()
if l==len(classifier.hidden_layers): # last layer
if shortcuts:
inp = np.concatenate([ l.getNodeFunctions() for l in classifier.hidden_layers ])
out = [ n + ":1" for n in names_out] + [ n + ":2" for n in names_out]
ps = classifier.output_layer.get_params()
W = ps[0]
b = ps[1]
else:
out = classifier.hidden_layers[l].getWeightCorrespondence()
ps = classifier.hidden_layers[l].get_params()
W = ps[0]
b = ps[1]
alledges.extend(getEdges(W, inp, out ,thresh))
allbiases.extend(list(zip(out,b)))
nodes=list(set([e[0] for e in alledges])) + list(set([e[1] for e in alledges]))
def isArgument(name):
return ':' in name
def arity2node(name,b1, b2):
op=None
if len(name.split('/')) > 1:
op, name = name.split('/')
operatorname = ('<TR><TD COLSPAN="2">' + op + '</TD></TR>' if op is not None else "")
return '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD PORT="1">''' + b1 + '''</TD>
<TD PORT="2">''' + b2 + '''</TD>
</TR>''' + operatorname + '<TR><TD COLSPAN="2">' + name + '</TD></TR></TABLE>>'
arity2set = set([n.split(':')[0] for n in nodes if isArgument(n)])
arity2 = list(arity2set)
arity1 = list(set([n for n in nodes if not isArgument(n)]) - arity2set)
bias_dict = dict(allbiases)
dot = Digraph(comment='Function Graph')
for n in arity1:
if n in bias_dict:
dot.node(n,str(np.round(bias_dict[n],2)) + '\n' + n.split('-')[0])
else:
dot.node(n,n.split('-')[0])
for n in arity2:
dot.node(n,arity2node(n.split('-')[0],
str(np.round(bias_dict.get(n+ ':1',0),2)),
str(np.round(bias_dict.get(n+ ':2',0),2)) ),shape='plaintext')
for e in alledges:
dot.edge(e[0], e[1], label=str(e[2]))
return dot
| 2,998 | 34.702381 | 113 | py |
EQL | EQL-master/EQL-DIV-ICML-Python3/src/__init__.py | 0 | 0 | 0 | py |
|
EQL | EQL-master/EQL-DIV-ICML-Python3/src/svr.py | """
SVR from sklearn
"""
import time
import sys
import timeit
import getopt
import numpy
import pickle
from sklearn.svm import SVR
from .utils import *
__docformat__ = 'restructedtext en'
def evaluate_svr(x,model):
predictions = []
for (d, svr) in model:
predictions.append(svr.predict(x))
return np.transpose(np.asarray(predictions))
def test_svr(x,y, model):
errors = []
for (d, svr) in model:
pred_y = svr.predict(x)
errors.append(np.mean(np.square(y[:, d] - pred_y)))
return np.mean(errors)
def train_test_svr(datasets, C=1.0, epsilon=0.001, gamma=0.1,
model=None,
id=None,
init_file=None):
"""
:type datasets: ((matrix,matrix),(matrix,matrix),(matrix,matrix))
:param datasets: ((train-x,train-y),(valid-x,valid-y),(test-x,test-y))
"""
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
if len(datasets) > 2:
test_set_x, test_set_y = datasets[2]
else:
test_set_x = test_set_y = None
inputdim = len(datasets[0][0][0])
outputdim = len(datasets[0][1][0])
print("Input/output dim:", (inputdim, outputdim))
print("Training set, val set:", (train_set_x.shape[0], valid_set_x.shape[0]))
start_time = timeit.default_timer()
# need one SVR for each output dimension
rng = numpy.random.RandomState(int(time.time()) if id is None else id)
if model is None:
model = [(d, SVR(kernel='rbf', C=C, epsilon=epsilon, gamma=gamma)) for d in range(outputdim)]
if init_file:
model = pickle.loads(init_file)
###############
# TRAIN MODEL #
###############
print('... training')
sys.stdout.flush()
for (d, svr) in model:
svr.fit(train_set_x, train_set_y[:,d])
validation_error = test_svr(valid_set_x, valid_set_y, model)
if test_set_x is not None:
test_error = test_svr(test_set_x, test_set_y, model)
else:
test_error = np.inf
end_time = timeit.default_timer()
time_required = (end_time - start_time) / 60.
print((('Optimization complete. Best validation score of %f and test performance %f ') %
(validation_error, test_error)))
return {'classifier': model,
'test_score': test_error,
'val_score': validation_error,
'runtime': time_required
}
def usage():
print((sys.argv[0] + "[-i id -d dataset -p extrapolationdataset -C costfactor -e epsilon -g gamma" +
" --resfolder -v"))
if __name__ == "__main__":
dataset_file = None
extra_pol_test_sets = []
extra_pols = []
init_file = None
epsilon = 0.1
Ccost=1.0
gamma = 1.0
verbose = 0
id = np.random.randint(0, 1000000)
result_folder = "./"
num_points=None
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:i:d:p:e:g:f:C:o",
["help", "verbose=", "id=", "dataset=", "extrapol=",
"cost=", "epsilon=",
"gamma=", "resfolder=",
"initfile=", "num_points="
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-v", "--verbose"):
verbose = int(arg)
elif opt in ("-i", "--id"):
id = int(arg)
elif opt in ("-d", "--dataset"):
dataset_file = arg
elif opt in ("-p", "--extrapol"):
extra_pol_test_sets.append(arg)
elif opt in ("-C", "--cost"):
Ccost = float(arg)
elif opt in ("-e","--epsilon"):
epsilon = float(arg)
elif opt in ("-g","--gamma"):
gamma = float(arg)
elif opt in ("--initfile"):
init_file = arg
elif opt in ("-o", "--output"):
output = True
elif opt in ("-f", "--resfolder"):
result_folder = arg
elif opt in ("--num_points"):
num_points = int(arg)
# load dataset
if not dataset_file:
print("provide datasetfile!")
usage()
exit(1)
dataset = load_data(dataset_file)
# restrict
num_pts = len(dataset[0][0])
if num_points is None:
num_points = num_pts
if num_points > num_pts:
num_points = num_pts
if num_points != num_pts:
print("retrict dataset to use: " + str(num_points))
datasetnew = ((dataset[0][0][:num_points, :], dataset[0][1][:num_points, :]), dataset[1])
if len(dataset) > 2:
dataset = datasetnew + (dataset[2],)
else:
dataset = datasetnew
# load extrapolation test
if len(extra_pol_test_sets) > 0:
if verbose > 0:
print("do also extrapolation test(s)!")
extra_pols = [load_data(test_set) for test_set in extra_pol_test_sets]
if not os.path.exists(result_folder):
try:
os.makedirs(result_folder)
except OSError:
pass
name = result_folder + str(id)
result = train_test_svr(datasets=dataset, C=Ccost, epsilon=epsilon, gamma=gamma, id=id, init_file=init_file)
classifier = result['classifier']
with open(name + '.last_state', 'wb') as f:
pickle.dump(classifier, f)
extra_scores = []
for extra in extra_pols:
extra_set_x, extra_set_y = cast_dataset_to_floatX(extra[0])
extra_scores.append(test_svr(extra_set_x, extra_set_y, classifier))
result_line = ""
with open(name + '.res', 'w') as f:
if (id == 0):
f.write('#C Ccost epsilon gamma' +
' id dataset' +
' runtime' +
"".join([' extrapol' + str(i) for i in range(1, len(extra_scores) + 1)]) +
' valerror testerror\n')
f.write('# extra datasets: ' + " ".join(extra_pol_test_sets) + '\n')
result_line = [str(Ccost), str(epsilon), str(gamma),
str(id), dataset_file, str(result['runtime'])] + \
[str(e) for e in extra_scores] + \
[str(result['val_score']), str(result['test_score'])]
f.write(str.join('\t', result_line) + '\n')
| 5,887 | 27.307692 | 111 | py |
EQL | EQL-master/EQL-DIV-ICML/createjobs.py | #!/usr/bin/python
# sample perl script to create SGE jobs (sun grid engine)
# for scanning a parameter space
import os
jobname = "F0_" # should be short
name = "" + jobname # name of shell scripts
res = "result_f0-EQLDIV"
submitfile = "submit_" + name + ".sh"
SUBMIT = open(submitfile,'w')
SUBMIT.write("#/bin/bash\n")
pwd=os.getcwd()
#number of epochs!
e=4000
regstart = e/4
regend = e-e/20
i = 0
for l1 in [10**(-l1exp/10.0) for l1exp in range(35,60)]:
for l_n in [3]:
for normal in ([True] if i > 0 else [False, True]):
epochs = e if normal else 1
result = res + "/" if normal else res + "test/"
base_cmd = ["python src/mlfg_final.py -i ", str(i),
" -f ", result,
" -d data/f0-n-10k-1.dat.gz",
" --extrapol=data/f0-n-5k-1-test.dat.gz",
" --extrapol=data/f0-n-5k-1-2-test.dat.gz",
" --epochs=", str(epochs),
" --l1=", str(l1),
" --layers=", str(l_n),
" --iterNum=1",
" --reg_start=", str(regstart),
" --reg_end=", str(regend),
" -o",
]
cmd= "".join(base_cmd)
script_fname = ((name + str(i)) if normal else name + "000_test") + ".sh"
if normal:
SUBMIT.write("./" + script_fname + "\n")
with open(script_fname, 'w') as FILE:
FILE.write("""\
#!/bin/bash
# %(jobname)s%(i)d
cd %(pwd)s
export OMP_NUM_THREADS=1
export PATH=${HOME}/bin:/usr/bin:${PATH}
if %(cmd)s; then
rm -f %(script_fname)s
else
touch %(script_fname)s.dead
fi
""" % locals())
os.chmod(script_fname,0755)
i += 1
SUBMIT.close()
os.chmod(submitfile,0755)
print "Jobs:" , i
with open("finished_" + name + ".sh",'w') as FINISHED:
FINISHED.write("#!/bin/bash\nset -e\n" +
'grep "#" $(ls ' + res + '/*.res -1 | head -n 1) >' + res + '/all.dat\n' +
"cat " + res + '/*.res | grep -v "#" >>' + res + '/all.dat\n' +
"cp " + __file__ + ' ' + res + '/\n' +
"rm finished_" + name+ '.sh ' + submitfile + '\n')
os.chmod("finished_" + name + ".sh",0755)
| 2,184 | 28.133333 | 92 | py |
EQL | EQL-master/EQL-DIV-ICML/createjobs-f1.py | #!/usr/bin/python
# sample perl script to create SGE jobs (sun grid engine)
# for scanning a parameter space
import os
jobname = "F1_" # should be short
name = "" + jobname # name of shell scripts
res = "result_f1-EQLDIV"
submitfile = "submit_" + name + ".sh"
SUBMIT = open(submitfile,'w')
SUBMIT.write("#/bin/bash\n")
pwd=os.getcwd()
#number of epochs
e=10000
regstart = e/4
regend = e-e/20
i = 0
for l1 in [10**(-l1exp/10.0) for l1exp in range(35,60)]:
for l_n in [2,3]:
for normal in ([True] if i > 0 else [False, True]):
epochs = e if normal else 1
result = res + "/" if normal else res + "test/"
base_cmd = ["python src/mlfg_final.py -i ", str(i),
" -f ", result,
" -d data/f1-n-10k-1.dat.gz",
" --extrapol=data/f1-n-5k-1-test.dat.gz",
" --extrapol=data/f1-n-5k-1-2-test.dat.gz",
" --epochs=", str(epochs),
" --l1=", str(l1),
" --layers=", str(l_n),
" --iterNum=1",
" --reg_start=", str(regstart),
" --reg_end=", str(regend),
" -o",
]
cmd= "".join(base_cmd)
script_fname = ((name + str(i)) if normal else name + "000_test") + ".sh"
if normal:
SUBMIT.write("./" + script_fname + "\n")
with open(script_fname, 'w') as FILE:
FILE.write("""\
#!/bin/bash
# %(jobname)s%(i)d
cd %(pwd)s
export OMP_NUM_THREADS=1
export PATH=${HOME}/bin:/usr/bin:${PATH}
if %(cmd)s; then
rm -f %(script_fname)s
else
touch %(script_fname)s.dead
fi
""" % locals())
os.chmod(script_fname,0755)
i += 1
SUBMIT.close()
os.chmod(submitfile,0755)
print "Jobs:" , i
with open("finished_" + name + ".sh",'w') as FINISHED:
FINISHED.write("#!/bin/bash\nset -e\n" +
'grep "#" $(ls ' + res + '/*.res -1 | head -n 1) >' + res + '/all.dat\n' +
"cat " + res + '/*.res | grep -v "#" >>' + res + '/all.dat\n' +
"cp " + __file__ + ' ' + res + '/\n' +
"rm finished_" + name+ '.sh ' + submitfile + '\n')
os.chmod("finished_" + name + ".sh",0755)
| 2,186 | 28.16 | 92 | py |
EQL | EQL-master/EQL-DIV-ICML/__init__.py | 0 | 0 | 0 | py |
|
EQL | EQL-master/EQL-DIV-ICML/result_f0-EQLDIV/createtasksIS-base.py | #!/usr/bin/python
# sample perl script to create SGE jobs (sun grid engine)
# for scanning a parameter space
import os
jobname = "FG1_" # should be short
name = "" + jobname # name of shell scripts
res = "result_fg1a-fg"
mem = "2000"
#maxtime = "4:00:00"
submitfile = "submit_" + name + ".sh"
SUBMIT = open(submitfile,'w')
SUBMIT.write("#/bin/bash\n")
pwd=os.getcwd()
e=4000
i = 0
for iter in (range(0,1)):
for lr in [0.01]:
for l_n in [3]:
for n_n in [10]:
for l1 in [10**(-l1exp/10.0) for l1exp in range(35,60)]:
for l2 in [0]:
for c in [""]:
for regstart in [500]:
regend = 3500
for batch in [20]:
for normal in ([True] if i > 0 else [False, True]):
epochs = e if normal else 1
result = res + "/" if normal else res + "test/"
base_cmd = ["python src/mlfg_final.py -i ", str(i),
" -f ", result,
" -d data/fg1a-n-10k-1.dat.gz",
" --extrapol=data/fg1a-n-5k-1-test.dat.gz",
" --extrapol=data/fg1a-n-5k-1-1_5-test.dat.gz",
" --extrapol=data/fg1a-n-5k-1-2-test.dat.gz",
" --epochs=", str(epochs),
" --lr=", str(lr),
" --l1=", str(l1),
" --l2=", str(l2),
" --layers=", str(l_n),
" --nodes=", str(n_n),
" --batchsize=", str(batch),
" --iterNum=", str(iter),
c,
" --reg_start=", str(regstart),
" --reg_end=", str(regend)
]
if iter==1:
base_cmd.append(" -o")
cmd= "".join(base_cmd)
script_fname = ((name + str(i)) if normal else name + "000_test") + ".sh"
job_fname = ((name + str(i)) if normal else name + "000_test") + ".sh.sub"
if normal:
SUBMIT.write("condor_submit " + job_fname + "\n")
with open(script_fname, 'w') as FILE:
FILE.write("""\
#!/bin/bash
# %(jobname)s%(i)d
cd %(pwd)s
export OMP_NUM_THREADS=1
export PATH=${HOME}/bin:/usr/bin:${PATH}
if %(cmd)s; then
rm -f %(script_fname)s
rm -f %(job_fname)s
else
touch %(job_fname)s.dead
fi
""" % locals())
with open(job_fname, 'w') as FILE:
FILE.write("""\
executable = %(pwd)s/%(script_fname)s
error = %(script_fname)s.err
output = %(script_fname)s.out
log = %(script_fname)s.log
request_memory=%(mem)s
request_cpus=1
queue
""" % locals())
os.chmod(script_fname,0755)
i += 1
SUBMIT.close()
os.chmod(submitfile,0755)
print "Jobs:" , i
with open("re" + submitfile,'w') as RESUBMIT:
RESUBMIT.write("for F in " + name + "*.sh; do qsub $F; done;\n")
with open("finished_" + name + ".sh",'w') as FINISHED:
FINISHED.write("#!/bin/bash\nset -e\n" +
"cat " + res + '/*.res >' + res + '/all.dat\n' +
"mv " + name + "*.* " + res + '/\n' +
"cp " + __file__ + ' ' + res + '/\n' +
"rm finished_" + name+ '.sh\n' +
"rm re" + submitfile + ' \n')
os.chmod("finished_" + name + ".sh",0755)
| 3,626 | 32.897196 | 94 | py |
EQL | EQL-master/EQL-DIV-ICML/src/utils.py | """
Utility functions
"""
import csv
import numpy as np
import theano
from itertools import chain
import os
import gzip
import cPickle
__docformat__ = 'restructedtext en'
def softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def relative_prob(x):
e_x = (x - np.min(x))
out = e_x / e_x.sum()
return out
def sample_from_dist(pdf,rnd=None):
return samples_from_dist(pdf, 1, rnd)[0]
def samples_from_dist(pdf,n=1,rnd=None):
if rnd is None:
return np.random.choice(len(pdf),n,p=pdf)
else:
return rnd.choice(len(pdf),n,p=pdf)
def samples_distinct_from_dist(pdf,n=1,rnd=None):
samples=list(set(samples_from_dist(pdf,3*n,rnd)))
if len(samples)<n:
samples=list(set(samples_from_dist(pdf,50*n,rnd)))
if len(samples)<n:
return samples
else:
return samples[:n]
def is_sequence(obj):
return hasattr(obj, '__len__') and hasattr(obj, '__getitem__')
def flatten(l):
return list(chain.from_iterable(l))
def normalize(vec):
n = np.linalg.norm(vec)
if n > 0:
return vec / n
else:
return vec
def add_diagonal_limit(mat, val, max_size):
di = np.diag_indices(min(min(mat.shape), max_size), mat.ndim)
mat[di] += val
def wrap_mat_to_vec_func_3(func, *args):
return lambda p1, p2, p3: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
np.expand_dims(p3, axis=0), *args)
def wrap_mat_to_vec_func_3_0(func, *args):
return lambda p1, p2, p3: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
np.expand_dims(p3, axis=0), *args)[0]
def wrap_mat_to_vec_func_2(func, *args):
return lambda p1, p2: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
*args)
def wrap_mat_to_vec_func_2_0(func, *args):
return lambda p1, p2: func(np.expand_dims(p1, axis=0),
np.expand_dims(p2, axis=0),
*args)[0]
def cast_dataset_to_floatX(data_xy):
data_x, data_y = data_xy
return np.asarray(data_x,dtype=theano.config.floatX),np.asarray(data_y,dtype=theano.config.floatX)
def cast_to_floatX(array):
return np.asarray(array, dtype=theano.config.floatX)
def load_from_hold(name):
assert (not name is None)
if os.path.exists(name):
with open(name, 'rb') as f:
data = cPickle.load(f)
print "loaded data from the file " + name
else:
print "Initialising with an empty list"
data = []
return data
def dump_for_hold(data, name):
print "Dumping:", name
assert (not name is None) and (not data is None)
with open(name , 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path):
dataset = new_path
if not os.path.isfile(dataset):
print 'cannot find dataset', dataset
return
print '... loading data ' + dataset
# Load the dataset
f = gzip.open(dataset, 'rb')
datasets = cPickle.load(f)
f.close()
return datasets
def splitDataSet(inputs, outputs):
assert len(inputs) == len(outputs)
size = len(inputs)
ts = size * 80 / 100
vs = size * 10 / 100
train_set = (inputs[:ts], outputs[:ts])
valid_set = (inputs[ts:ts + vs], outputs[ts:ts + vs])
test_set = (inputs[ts + vs:], outputs[ts + vs:])
return train_set, valid_set, test_set
def splitDataSetShuffle(inputs, outputs,percent_val_test=10):
assert len(inputs) == len(outputs)
size = len(inputs)
shuffle = np.random.permutation(size)
inps = np.asarray(inputs)[shuffle]
outs = np.asarray(outputs)[shuffle]
ts = size * (100-2*percent_val_test) / 100
vs = size * percent_val_test / 100
train_set = (inps[:ts], outs[:ts])
valid_set = (inps[ts:ts + vs], outs[ts:ts + vs])
test_set = (inps[ts + vs:], outs[ts + vs:])
return train_set, valid_set, test_set
def splitDataSetNoTest(inputs,outputs):
assert len(inputs) == len(outputs)
size=len(inputs)
ts=size*90/100
train_set=(inputs[:ts],outputs[:ts])
valid_set=(inputs[ts:],outputs[ts:])
return train_set, valid_set
def addTestSet(train_val,test_set):
return train_val[0], train_val[1], test_set
# cuts dataset into those where the input vectors that have a maxnorm smaller or equal to cut and the rest
def cutDataSet(inputs,outputs, cut):
sel = np.linalg.norm(inputs,ord=np.inf,axis=1) <= cut
# sel = np.array([not(all(x<=cut) and all(x>=-cut)) for x in inputs])
return (inputs[sel], outputs[sel]),(inputs[np.logical_not(sel)], outputs[np.logical_not(sel)])
def splitTrainValSets(inputs,outputs,cut):
data_full=splitDataSetNoTest(inputs,outputs)
(train_all,val_all) = data_full
dat_sel = cutDataSet(train_all[0],train_all[1], cut)
return data_full, dat_sel
def addNoise(data, size):
noise = np.random.normal(0,size,data.shape)
return data + noise
def loadState(filename):
with open(filename, "rb") as f:
return cPickle.load(f)
def readCSVTable(filename, dtype='|S40'):
data = []
comments = []
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for row in reader:
if row[0][0] != '#':
data.append(row)
else:
comments.append(row)
return (np.asarray(data, dtype=dtype), comments)
def getIdx(header, colname): return np.asscalar(np.where(np.asarray(header) == colname)[0]) | 6,220 | 27.277273 | 106 | py |
EQL | EQL-master/EQL-DIV-ICML/src/mlfg_final.py | """
Multilayer function graph for system identification.
This is able to learn typical algebraic expressions with
maximal multiplicative/application term length given by the number of layers.
We use regression with square error and
L1 norm on weights to get a sparse representations.
It follows the multilayer perceptron style, but has more complicated
nodes.
.. math:: Each layer is
y(x) = {f^{(1)}(W^{(1)} x), f^{(2)}(W^{(2)} x), .., f^{(k)}(W^{(k)} x), g^{(1)}(W^{(k+1)}x, W^{(k+2)}x) }
We groups the weight matrices W1-Wk etc.
The final layer contains a division
"""
import time
import sys
import timeit
import getopt
import random
import numpy
import theano.tensor as T
from theano import In
from theano.ifelse import ifelse
import lasagne.updates as Lupdates
# if problems with importing
# http://stackoverflow.com/questions/36088609/python-lasagne-importerror-cannot-import-batchnormlayer
from collections import OrderedDict
from utils import *
#from theano import config
#config.floatX = 'float64'
__docformat__ = 'restructedtext en'
class DivisionRegression(object):
"""Regression layer (linear regression with division (numerator/denomator)
"""
def __init__(self, rng, inp, n_in, n_out, div_thresh, W=None, b=None):
""" Initialize the parameters
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inp: theano.tensor.TensorType
:param inp: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels/outputs lie
:type div_thresh: T.scalar
:param div_thresh: threshold variable for the "soft" devision
"""
# attention: Formula : x*W + b where x is a row vector
if W is None:
# initialize with random weights W as a matrix of shape (n_in, n_out)
W_values = numpy.asarray(
rng.normal(loc=0, scale=numpy.sqrt(1.0 / (n_in + 2*n_out)), size=(n_in, 2*n_out)),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
# initialize the biases b as a vector of 2 times n_out 1s
# (we use one to implement a more linear activation at init time)
b = theano.shared(value=numpy.ones((2*n_out,), dtype=theano.config.floatX), name='b', borrow=True)
self.W = W
self.b = b
node_inputs = T.dot(inp, self.W) + self.b
# node_inputs is composed of input 1 and input 2 after another
# input1 = node_inputs[0:n_out]; input2 = node_inputs[n_out:2*n_out]
numerator = node_inputs[:, 0:n_out]
denominator = node_inputs[:, n_out:2*n_out]
self.output = self.activation(denominator, div_thresh) * numerator
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = inp
self.L1 = abs(self.W).sum() + 0.01*abs(self.b).sum()
self.L2_sqr = T.sum(self.W ** 2) + 0.01*T.sum(self.b**2)
self.penalty = T.sum((div_thresh - denominator)*(denominator < div_thresh))
self.extrapol_loss = T.sum((abs(self.output)-10)*(abs(self.output)>10) + (div_thresh - denominator)*(denominator < div_thresh))
def activation(self,x,thresh):
return T.switch(x < thresh, 0.0, 1.0/x )
def get_params(self):
param_fun = theano.function(inputs=[], outputs=self.params)
return [np.asarray(p) for p in param_fun()]
def set_params(self, newParams):
newb = T.vector('newb')
newW = T.matrix('newW')
param_fun = theano.function(inputs=[newW, newb], outputs=None, updates=[(self.W, newW), (self.b, newb)])
return param_fun(newParams[0], newParams[1])
def get_state(self):
return self.get_params()
def set_state(self, newState):
self.set_params(newState)
def get_weights(self):
w_fun = theano.function(inputs=[], outputs=self.W)
return w_fun()
def set_out_weights(self, row, vec): # (row)
r = T.iscalar('row')
new = T.vector('new')
up_fun = theano.function(inputs=[r, new], outputs=self.W, updates=[(self.W, T.set_subtensor(self.W[r, :], new))])
up_fun(row, vec)
def loss(self, y):
"""Return the mean square error of the prediction.
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct value
"""
return T.mean(T.sqr(self.output - y))
class FGLayer(object):
def __init__(self, rng, inp, n_in, n_per_base, layer_idx,
basefuncs1=None, basefuncs2=None, W=None, b=None):
"""
Hidden layer of Multi layer function graph: units are fully-connected and have
the functions given by basefunc1 (arity 1) and basefunc2 (arity 2).
Weight matrix W is of shape (n_in+1,#f1*n_per_base+2*#f2*n_per_base),
where #f1=size(basefunc1), #f2=size(basefunc2)
output is computed as: basefunc1[i](dot(input,W))
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inp: theano.tensor.matrix
:param inp: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_per_base: int
:param n_per_base: number of nodes per basis function
:type basefuncs1: [int]
:param basefuncs1: index of base functions of arity 1 to use (may contain dupplicates)
(list: [sin cos logistic identity])
:type basefuncs2: [int]
:param basefuncs2: index of base functions to arity 2 to use (may contain dupplicates)
(list: [mult condition])
"""
#TODO: get rid of rectlin and div2
if basefuncs1 is None:
basefuncs1 = [0, 1, 2]
if basefuncs2 is None:
basefuncs2 = [0]
self.basefuncs1 = basefuncs1
self.basefuncs2 = basefuncs2
self.basefuncs1_uniq = list(set(basefuncs1))
self.n_basefuncs1_uniq = len(self.basefuncs1_uniq)
self.n_per_base = n_per_base
self.funcs1 = ['id', 'sin', 'cos']
self.funcs2 = ['mult']
self.layer_idx = layer_idx
self.input = inp
self.n_base1 = len(basefuncs1)
self.n_base2 = len(basefuncs2)
n_out = (self.n_base1 + self.n_base2) * n_per_base
n_w_out = (self.n_base1 + 2 * self.n_base2) * n_per_base
self.n_out = n_out
# attention: Formula : g(x*W + b) where x is a row vector
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# May need other values here.
if W is None:
W_values = numpy.asarray(
#rng.uniform(low=-numpy.sqrt(1. / (n_in + n_w_out)), high=numpy.sqrt(1. / (n_in + n_w_out)),
# size=(n_in, n_w_out)),
rng.normal(loc=0, scale=numpy.sqrt(1.0 / (n_in + n_w_out)), size=(n_in, n_w_out)),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_w_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
node_inputs = T.dot(inp, self.W) + self.b
# node_inputs.reshape((notes.shape[0],n_base1+2*n_base2,n_per_base))
z = node_inputs[:, :n_per_base * self.n_base1]
z1 = node_inputs[:, n_per_base * self.n_base1:n_per_base * (self.n_base1 + self.n_base2)]
z2 = node_inputs[:, n_per_base * (self.n_base1 + self.n_base2):]
node_type1_values = numpy.asarray(numpy.repeat(basefuncs1, n_per_base), dtype=np.int32)
self.nodes_type1 = theano.shared(value=node_type1_values,name='node_type1', borrow=False)
node_type2_values = numpy.asarray(numpy.repeat(basefuncs2, n_per_base), dtype=np.int32)
self.nodes_type2 = theano.shared(value=node_type2_values,name='node_type2', borrow=False)
fun1 = T.switch(T.eq(self.nodes_type1, 0), z, # identity
T.switch(T.eq(self.nodes_type1, 1), T.sin(z), # sine
T.cos(z))) # cosine
# further functions could be maxout, sqrt, exp?
fun2 = T.switch(T.eq(self.nodes_type2, 0), z1 * z2, # multiplication
# T.switch(T.eq(self.note_type2,1), z2 / (1 + T.exp(-z1)), # condition (does not work)
z1)
# StepOp(0.1)(z1) * z2, # if z1<0 then z2 else 0
self.output = T.concatenate([fun1, fun2], axis=1)
# parameters of the model
self.params = [self.W, self.b]
self.L1 = abs(self.W).sum() + 0.01*abs(self.b).sum()
self.L2_sqr = T.sum(self.W ** 2) + 0.01*T.sum(self.b**2)
def get_params(self):
fun = theano.function(inputs=[], outputs=self.params)
return [np.asarray(p) for p in fun()]
def set_params(self, newParams):
self.W.set_value(newParams[0])
self.b.set_value(newParams[1])
def get_state(self):
# fun=theano.function(inputs=[],outputs=[self.nodes_type1,self.nodes_type2])
return self.get_params() + [self.nodes_type1.get_value(), self.nodes_type2.get_value()]
def set_state(self, newState):
self.set_params(newState)
if len(newState) > 2:
self.nodes_type1.set_value(newState[2])
self.nodes_type2.set_value(newState[3])
else:
print "Not full reload: missing node-types"
def get_n_type1(self):
return self.n_base1 * self.n_per_base
def get_n_type2(self):
return self.n_base2 * self.n_per_base
def get_weights(self):
# w_fun=theano.function(inputs=[],outputs=self.W)
return self.W.get_value()
def get_in_weights(self, idx): # (column)
node_idx = T.iscalar('node-idx')
w_fun = theano.function(inputs=[node_idx], outputs=self.W[:, node_idx])
return w_fun(idx)
def set_out_weights(self, row, vec): # (row)
r = T.iscalar('row')
new = T.vector('new')
up_fun = theano.function(inputs=[r, new], outputs=self.W, updates=[(self.W, T.set_subtensor(self.W[r, :], new))])
up_fun(row, vec)
def set_in_weights(self, col, vec): # (col)
c = T.iscalar('col')
new = T.vector('new')
up_fun = theano.function(inputs=[c, new], outputs=self.W, updates=[(self.W, T.set_subtensor(self.W[:, c], new))])
up_fun(col, vec)
def get_bias(self, idx):
node_idx = T.iscalar('node-idx')
w_fun = theano.function(inputs=[node_idx], outputs=self.b[node_idx])
return w_fun(idx)
def set_bias(self, idx, value):
node_idx = T.iscalar('node-idx')
new = T.scalar('new')
up_fun = theano.function(inputs=[node_idx, new], outputs=None,
updates=[(self.b, T.set_subtensor(self.b[node_idx], new))])
up_fun(idx, value)
def get_nodes_type1(self):
# n_fun=theano.function(inputs=[], outputs=self.nodes_type1)
return self.nodes_type1.get_value()
def get_nodes_type2(self):
# n_fun=theano.function(inputs=[], outputs=self.nodes_type2)
return self.nodes_type2.get_value()
def get_node_type1(self, idx):
n_fun = theano.function(inputs=[], outputs=self.nodes_type1[idx])
return n_fun()
def set_node_type1(self, idx, typ):
node_idx = T.iscalar('node-idx')
new = T.iscalar('new')
up_fun = theano.function(inputs=[node_idx, new], outputs=None,
updates=[(self.nodes_type1, T.set_subtensor(self.nodes_type1[node_idx], new))])
up_fun(idx, typ)
def getNodeFunctions(self, withnumbers=True):
def name(func, idx):
if withnumbers:
return func + '-' + str(self.layer_idx) + '-' + str(idx)
else:
return func
return [name(self.funcs1[bf], i) for (i, bf) in
zip(range(1, len(self.get_nodes_type1()) + 1), self.get_nodes_type1())] + \
[name(self.funcs2[bf], i) for (i, bf) in
zip(range(1, len(self.get_nodes_type2()) + 1), self.get_nodes_type2())]
def getWeightCorrespondence(self):
def name(func, idx):
return func + '-' + str(self.layer_idx) + '-' + str(idx)
return [name(self.funcs1[bf], i) for (i, bf) in
zip(range(1, len(self.get_nodes_type1()) + 1), self.get_nodes_type1())] + \
[name(self.funcs2[bf], i) + ':' + '1' for (i, bf) in
zip(range(1, len(self.get_nodes_type2()) + 1), self.get_nodes_type2())] + \
[name(self.funcs2[bf], i) + ':' + '2' for (i, bf) in
zip(range(1, len(self.get_nodes_type2()) + 1), self.get_nodes_type2())]
class MLFG(object):
"""Multi-Layer Function Graph aka EQLDiv
A multilayer function graph, like a artificial neural network model
that has one or more layers with hidden units of various activation functions.
"""
def __init__(self, rng, n_in, n_per_base, n_out, n_layer=1,
basefuncs1=None, basefuncs2=None, gradient=None, with_shortcuts=False):
"""Initialize the parameters for the multilayer function graph
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_layer: int
:param n_layer: number of hidden layers
:type n_per_base: int
:param n_per_base: number of nodes per basis function see FGLayer
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
:type basefuncs1: [int]
:param basefuncs1: see FGLayer
:type basefuncs2: [int]
:param basefuncs2: see FGLayer
:type gradient: string
:param gradient: type of gradient descent algo (None=="sgd+","adagrad","adadelta","nag")
:type with_shortcuts: bool
:param with_shortcuts: whether to use shortcut connections (output is connected to all units)
"""
self.input = T.matrix('input') # the data is presented as vector input
self.labels = T.matrix('labels') # the labels are presented as vector of continous values
self.rng = rng
self.n_layers = n_layer
self.hidden_layers = []
self.params = []
self.n_in = n_in
self.n_out = n_out
self.with_shortcuts = with_shortcuts
self.fixL0=False
for l in xrange(n_layer):
if l == 0:
layer_input = self.input
n_input = n_in
else:
layer_input = self.hidden_layers[l - 1].output
n_input = self.hidden_layers[l - 1].n_out
hiddenLayer = FGLayer(
rng=rng,
inp=layer_input,
n_in=n_input,
n_per_base=n_per_base,
basefuncs1=basefuncs1,
basefuncs2=basefuncs2,
layer_idx=l,
)
self.hidden_layers.append(hiddenLayer)
self.params.extend(hiddenLayer.params)
div_thresh = T.scalar("div_thresh")
# The linear output layer, either it gets as input the output of ALL previous layers
if self.with_shortcuts:
output_layer_inp = T.concatenate([l.output for l in reversed(self.hidden_layers)], axis=1)
output_layer_n_in = sum([l.n_out for l in self.hidden_layers])
else: # or just of the last hidden layer
output_layer_inp = self.hidden_layers[-1].output
output_layer_n_in = self.hidden_layers[-1].n_out
self.output_layer = DivisionRegression(
rng=rng,
inp=output_layer_inp,
n_in=output_layer_n_in,
n_out=n_out,
div_thresh=div_thresh
)
self.params.extend(self.output_layer.params)
self.evalfun = theano.function(inputs=[self.input, In(div_thresh, value=0.0001)], outputs=self.output_layer.output)
L1_reg = T.scalar('L1_reg')
L2_reg = T.scalar('L2_reg')
fixL0 = T.bscalar('fixL0')
self.L1 = self.output_layer.L1 + sum([l.L1 for l in self.hidden_layers])
self.L2_sqr = self.output_layer.L2_sqr + sum([l.L2_sqr for l in self.hidden_layers])
self.penalty = self.output_layer.penalty
self.loss = self.output_layer.loss
self.errors = self.loss
self.cost = (self.loss(self.labels) + L1_reg * self.L1 + L2_reg * self.L2_sqr + self.penalty)
#Extrapol penalty
self.extrapol_cost = self.output_layer.extrapol_loss
learning_rate = T.scalar('learning_rate')
def process_updates(par, newp):
# print par.name
if par.name == "W":
# if fixL0 is True, then keep small weights at 0
return par, ifelse(fixL0, T.switch(T.abs_(par) < 0.001, par*0, newp), newp)
return par, newp
print "Gradient:", gradient
update = None
if gradient=='sgd+' or gradient=='sgd' or gradient==None:
gparams = [T.grad(self.cost, param) for param in self.params]
update = OrderedDict([(param, param - (learning_rate * gparam).clip(-1.0, 1.0))
for param, gparam in zip(self.params, gparams)])
elif gradient=='adam':
update = Lupdates.adam(self.cost, self.params, learning_rate, epsilon=1e-04)
elif gradient == 'adadelta':
update = Lupdates.adadelta(self.cost, self.params,learning_rate)
elif gradient == 'rmsprop':
update = Lupdates.rmsprop(self.cost, self.params,learning_rate)
elif gradient == 'nag':
update = Lupdates.nesterov_momentum(self.cost,self.params,learning_rate)
else:
assert("unknown gradient " + gradient)
#Extrapol sanity gradient computation:
extrapol_updates = Lupdates.adam(self.extrapol_cost, self.params, learning_rate, epsilon=1e-04)
updates = [process_updates(*up) for up in update.items()]
self.train_model = theano.function(
inputs=[self.input, self.labels, L1_reg, L2_reg, fixL0, learning_rate, div_thresh],
outputs=self.cost,
updates=updates,
)
# avoid too large outputs in extrapolation domain
self.remove_extrapol_error = theano.function(
inputs=[self.input, learning_rate, div_thresh],
outputs=self.extrapol_cost,
updates=extrapol_updates,
)
self.test_model = theano.function(
inputs=[self.input, self.labels, In(div_thresh, value=0.0001)],
outputs=self.errors(self.labels),
)
self.validate_model = theano.function(
inputs=[self.input, self.labels, In(div_thresh, value=0.0001)],
outputs=self.errors(self.labels),
)
self.L1_loss = theano.function(
inputs=[],
outputs=self.L1,
)
self.MSE = theano.function(
inputs=[self.input, self.labels, In(div_thresh, value=0.0001)],
outputs=self.errors(self.labels),
)
@staticmethod
def vec_norm(vec):
return T.sqrt(T.sum(T.sqr(vec)))
@staticmethod
def vec_normalize(vec):
norm = MLFG.vec_norm(vec)
return vec / (norm + 1e-10)
def get_params(self):
paramfun = theano.function(inputs=[], outputs=self.params)
return paramfun()
def get_state(self):
return [l.get_state() for l in self.hidden_layers] + [self.output_layer.get_state()]
def set_state(self, newState):
for (s, l) in zip(newState, self.hidden_layers + [self.output_layer]):
l.set_state(s)
def evaluate(self, input):
return self.evalfun(cast_to_floatX(input))
def get_n_units_type1(self):
return sum([l.get_n_type1() for l in self.hidden_layers])
def get_n_units_type2(self):
return sum([l.get_n_type2() for l in self.hidden_layers])
# sparsity
def get_num_active_units(self, thresh=0.1):
# count units with nonzero input * output weights (only non-identity units)
# in principle one could make a backward scan and identify units without path to the output, but
# we keep it simpler.
total = 0
for layer_idx in range(0, self.n_layers):
layer = self.hidden_layers[layer_idx]
in_weights = layer.get_weights()
#bias = layer.get_biasForNumActive()
out_weights = self.hidden_layers[layer_idx + 1].get_weights() if layer_idx + 1 < self.n_layers \
else self.output_layer.get_weights()
# noinspection PyTypeChecker
in_weight_norm = np.linalg.norm(in_weights, axis=0, ord=1)
out_weight_norm = np.linalg.norm(out_weights, axis=1, ord=1)
# countering non-identity unary units
# noinspection PyTypeChecker
for i in range(layer.get_n_type1()):
if (out_weight_norm[i]*in_weight_norm[i] > thresh*thresh and layer.get_nodes_type1()[i] != 0): #nodes_type1 matrix of 00...011...1x`
total += 1
#print layer_idx, layer.get_nodes_type1()[i], out_weight_norm[i], in_weight_norm[i]
# Note that multiplication units can also be linear units of one of their inputs is constant
# here the norm of the input weight is set to 0 if onw of the inputs is below thresh
for i in range(layer.get_n_type2()):
if (in_weight_norm[layer.get_n_type1() + i] > thresh and \
in_weight_norm[layer.get_n_type1() + layer.get_n_type2() + i] > thresh):
in_weight_norm[layer.get_n_type1() + i] += in_weight_norm[layer.get_n_type1() + layer.get_n_type2() + i]
else:
in_weight_norm[layer.get_n_type1() + i] = 0
# countering non-identity multiplicative units
for i in range(layer.get_n_type1(), layer.get_n_type1() + layer.get_n_type2()):
if (out_weight_norm[i]*in_weight_norm[i] > thresh*thresh):
total += 1
#print layer_idx, "mult", out_weight_norm[i], in_weight_norm[i]
return total
def get_active_units_old(self, thresh=0.05):
# quick hack: count units with nonzero output weights not counting the inputs
total = 0
for layer_idx in range(1, self.n_layers + 1):
layer = self.hidden_layers[layer_idx] if layer_idx < self.n_layers else self.output_layer
# noinspection PyTypeChecker
out_weight_norm = np.linalg.norm(layer.get_weights(), axis=1, ord=1)
total += sum(out_weight_norm > thresh)
return total
def test_mlfg(datasets, learning_rate=0.01, L1_reg=0.001, L2_reg=0.00, n_epochs=200,
batch_size=20, n_layer=1, n_per_base=5, basefuncs1=None, basefuncs2=None,
with_shortcuts=False, id=None,
classifier=None,
gradient=None,
init_state=None,
verbose=True, param_store=None,
reg_start=0, reg_end=None,
validate_every=50,
k=100
):
"""
:type datasets: ((matrix,matrix),(matrix,matrix),(matrix,matrix))
:param datasets: ((train-x,train-y),(valid-x,valid-y),(test-x,test-y))
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type param_store: []
:param param_store: if not None then the weights of each episode are stored here
:type id: int
:param id: id of run (also used as random seed. if None then the time is used as seed
:param init_state: initial state for classifier to use
"""
train_set_x, train_set_y = cast_dataset_to_floatX(datasets[0])
valid_set_x, valid_set_y = cast_dataset_to_floatX(datasets[1])
MAX_INPUT_VAL = np.max(abs(train_set_x))
print "Max input value is: ", MAX_INPUT_VAL
#extra_set_x, extra_set_y = cast_dataset_to_floatX(extrapol_dataset[0]) #0 has to be used and extrapol_dataset[1] has null entry
#extra_set_x has dimensions 5000x4 for cp_new dataset ... verified by the following print statement
#print "extrapol dim: ", len(extra_set_x), len(extra_set_x[0]), len(extra_set_y), len(extra_set_y[0])
if len(datasets) > 2:
test_set_x, test_set_y = cast_dataset_to_floatX(datasets[2])
n_test_batches = test_set_x.shape[0] / batch_size
else:
test_set_x = test_set_y = None
n_test_batches = 0
n_train_batches = train_set_x.shape[0] / batch_size
n_valid_batches = valid_set_x.shape[0] / batch_size
inputdim = len(datasets[0][0][0])
outputdim = len(datasets[0][1][0])
if verbose: print "Input/output dim:", (inputdim, outputdim)
if verbose: print "Training set, test set:", (train_set_x.shape[0], test_set_x.shape[0])
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
rng = numpy.random.RandomState(int(time.time()) if id is None else id)
if classifier is None:
classifier = MLFG(
rng=rng,
n_in=inputdim,
n_per_base=n_per_base,
n_out=outputdim,
n_layer=n_layer,
gradient=gradient,
basefuncs1=basefuncs1,
basefuncs2=basefuncs2,
with_shortcuts=with_shortcuts,
)
if init_state:
classifier.set_state(init_state)
###############
# TRAIN MODEL #
###############
print '... training'
sys.stdout.flush()
# early-stopping parameters
improvement_threshold = 0.99 # a relative improvement of this much is considered significant
best_validation_error = numpy.inf
this_validation_error = numpy.inf
best_epoch = 0
test_score = 0.
best_state = classifier.get_state()
start_time = timeit.default_timer()
epoch = 0
done_looping = False
train_errors = []
extrapol_train_errors = []
validation_errors = []
test_errors = []
MSE = []
L1 = []
if param_store is not None:
param_store.append(classifier.get_params())
while (epoch < n_epochs) and (not done_looping):
#print epoch #remove
special_penalty = 0
epoch = epoch + 1
reg_factor = 0.0
if reg_start < epoch <= reg_end:
reg_factor = 1.0
L1.append([epoch, np.asscalar(classifier.L1_loss())])
if (epoch - reg_start)%k == 0 and epoch < reg_end:
special_penalty = 1
temp = zip(list(train_set_x),list(train_set_y))
random.shuffle(temp)
train_set_x, train_set_y = zip(*temp)
train_set_x = numpy.asarray(train_set_x)
train_set_y = numpy.asarray(train_set_y)
del temp[:]
minibatch_avg_cost = 0.0
for minibatch_index in xrange(n_train_batches):
index = minibatch_index
minibatch_avg_cost += classifier.train_model(
input=train_set_x[index * batch_size: (index + 1) * batch_size],
labels=train_set_y[index * batch_size: (index + 1) * batch_size],
L1_reg=L1_reg * reg_factor,
L2_reg=L2_reg * reg_factor,
fixL0 = epoch > reg_end,
div_thresh = 1.0/np.sqrt(epoch + 1),
learning_rate=learning_rate,
)
if special_penalty == 1:
#max input val would ensure we don't have poles anywhere in twice the interpolation region
n_num, n_in = train_set_x.shape
extra_set_x = (2*np.random.rand(n_num, n_in)-1.0)*MAX_INPUT_VAL
assert extra_set_x.shape == train_set_x.shape
for x in range(n_num):
for y in range(n_in):
if (extra_set_x[x][y] >=0.0):
extra_set_x[x][y] += MAX_INPUT_VAL
else:
extra_set_x[x][y] -= MAX_INPUT_VAL
extrapol_error_training = 0.0
for minibatch_index in xrange(n_train_batches):
index = minibatch_index
extrapol_error_training += classifier.remove_extrapol_error(
input=extra_set_x[index * batch_size: (index + 1) * batch_size],
div_thresh = 1.0/np.sqrt(epoch + 1),
learning_rate=learning_rate,
)
extrapol_train_errors.append([epoch, extrapol_error_training/n_train_batches])
train_errors.append([epoch, minibatch_avg_cost/n_train_batches])
if param_store is not None:
param_store.append(classifier.get_params())
if epoch == 1 or epoch % validate_every == 0 or epoch == n_epochs:
this_validation_errors = [classifier.validate_model(
input=valid_set_x[index * batch_size:(index + 1) * batch_size],
labels=valid_set_y[index * batch_size:(index + 1) * batch_size])
for index in xrange(n_valid_batches)]
this_validation_error = np.asscalar(numpy.mean(this_validation_errors))
validation_errors.append([epoch, this_validation_error])
this_MSE = [classifier.MSE(input=train_set_x[index*batch_size:(index + 1)*batch_size],
labels=train_set_y[index*batch_size: (index + 1)*batch_size]) for index in xrange(n_train_batches)]
MSE.append([epoch, np.asscalar(np.mean(this_MSE))])
if verbose:
print(
'epoch %i, minibatch %i/%i, minibatch_avg_cost %f validation error %f' %
(
epoch,
minibatch_index + 1,
n_train_batches,
minibatch_avg_cost,
this_validation_error
)
)
# test it on the test set
if test_set_x is not None:
test_losses = [classifier.test_model(
input=test_set_x[index * batch_size:(index + 1) * batch_size],
labels=test_set_y[index * batch_size:(index + 1) * batch_size])
for index in xrange(n_test_batches)]
this_test_score = np.asscalar(numpy.mean(test_losses))
test_errors.append([epoch, this_test_score])
else:
this_test_score = np.inf
# if we got the best validation score until now
if this_validation_error < best_validation_error:
if this_validation_error < best_validation_error * improvement_threshold:
best_state = classifier.get_state()
best_validation_error = this_validation_error
best_epoch = epoch
test_score = this_test_score
if verbose:
print(('epoch %i, minibatch %i/%i, test error of '
'best model %f') %
(epoch, minibatch_index + 1, n_train_batches,
test_score))
if epoch % 100 == 0:
print "Epoch: ", epoch, "\tBest val error: ", best_validation_error, "\tcurrent val error: ", this_validation_error
sys.stdout.flush()
end_time = timeit.default_timer()
time_required = (end_time - start_time) / 60.
print(('Optimization complete. Best validation score of %f '
'obtained at epoch %i, with test performance %f ') %
(best_validation_error, best_epoch + 1, test_score))
print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % time_required)
if verbose:
numpy.set_printoptions(precision=4, suppress=True)
print(classifier.get_params())
return {'train_losses': numpy.asarray(train_errors),
'extrapol_train_losses':numpy.asarray(extrapol_train_errors),
'MSE':numpy.asarray(MSE),
'L1':numpy.asarray(L1),
'val_errors': numpy.asarray(validation_errors),
'test_errors': numpy.asarray(test_errors),
'classifier': classifier,
'test_score': test_score,
'val_score': this_validation_error,
'best_val_score': best_validation_error,
'best_epoch': best_epoch,
'best_state': best_state,
'num_active': classifier.get_num_active_units(),
'runtime': time_required
}
def usage():
print(sys.argv[0] + "[-i id -d dataset -p extrapolationdataset -l layers -e epochs -n nodes -r learningrate --initfile=file --batchsize=k --l1=l1reg --l2=l2reg --shortcut --reg_start=start --reg_end=end --resfolder -v]")
if __name__ == "__main__":
dataset_file = None
extra_pol_test_sets = []
extra_pols = []
n_epochs = 1200
n_layers = 3
n_nodes = 10
batch_size = 20
init_file = None
init_state = None
gradient = "sgd"
L1_reg = 0.001
L2_reg = 0.001
learning_rate = 0.01
with_shortcuts = False
reg_start = 0
reg_end = None
output = False
verbose = 0
k=99999999999
id = np.random.randint(0, 1000000)
result_folder = "./"
basefuncs1 = [0, 1, 2]
iterNum=0
theano.gof.compilelock.set_lock_status(False)
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:i:d:p:l:e:n:f:co",
["help", "verbose=", "id=", "dataset=", "extrapol=", "layers=", "epochs=",
"nodes=", "l1=", "l2=", "lr=", "resfolder=",
"batchsize=", "initfile=", "gradient=",
"reg_start=", "reg_end=", "shortcut", "output","k_update=","iterNum="
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-v", "--verbose"):
verbose = int(arg)
elif opt in ("-i", "--id"):
id = int(arg)
elif opt in ("-d", "--dataset"):
dataset_file = arg
elif opt in ("-p", "--extrapol"):
extra_pol_test_sets.append(arg)
elif opt in ("-l", "--layers"):
n_layers = int(arg)
elif opt in ("-e", "--epochs"):
n_epochs = int(arg)
elif opt in ("--batchsize"):
batch_size = int(arg)
elif opt in ("--l1"):
L1_reg = float(arg)
elif opt in ("--l2"):
L2_reg = float(arg)
elif opt in ("--lr"):
learning_rate = float(arg)
elif opt in ("-n", "--nodes"):
n_nodes = int(arg)
elif opt in ("-c", "--shortcut"):
with_shortcuts = True
elif opt in ("--initfile"):
init_file = arg
elif opt in ("--gradient"):
gradient= arg
elif opt in ("--reg_start"):
reg_start = int(arg)
elif opt in ("--reg_end"):
reg_end = int(arg)
elif opt in ("-o", "--output"):
output = True
elif opt in ("-f", "--resfolder"):
result_folder = arg
elif opt in ("--iterNum"):
iterNum = int(arg)
elif opt in ("--k_update"):
k = int(arg)
# load dataset
if not dataset_file:
print("provide datasetfile!")
usage()
exit(1)
dataset = load_data(dataset_file)
# load extrapolation test
if len(extra_pol_test_sets) > 0:
if verbose > 0:
print("do also extrapolation test(s)!")
extra_pols = [load_data(test_set) for test_set in extra_pol_test_sets]
if init_file:
with open(init_file, 'rb') as f:
init_state = cPickle.load(f)
print "load initial state from file " + init_file
if not os.path.exists(result_folder):
os.makedirs(result_folder)
name = result_folder + "/" + str(id)
print ("Results go into " + result_folder)
result = test_mlfg(datasets=dataset, k=k, n_epochs=n_epochs, verbose=verbose > 0, learning_rate=learning_rate,
L1_reg=L1_reg, L2_reg=L2_reg, basefuncs2=[0], basefuncs1=basefuncs1, n_layer=n_layers,
n_per_base=n_nodes, id=id, gradient=gradient,
batch_size=batch_size, init_state=init_state,
reg_start=reg_start, reg_end=reg_end, with_shortcuts=with_shortcuts,
)
classifier = result['classifier']
with open(name + '.best_state', 'wb') as f:
cPickle.dump(result['best_state'], f, protocol=cPickle.HIGHEST_PROTOCOL)
with open(name + '.last_state', 'wb') as f:
cPickle.dump(classifier.get_state(), f, protocol=cPickle.HIGHEST_PROTOCOL)
extra_scores = []
extra_scores_best = []
for extra in extra_pols:
extra_set_x, extra_set_y = cast_dataset_to_floatX(extra[0])
extra_scores.append(classifier.test_model(input=extra_set_x, labels=extra_set_y))
# also for best_state
classifier.set_state(result['best_state'])
for extra in extra_pols:
extra_set_x, extra_set_y = cast_dataset_to_floatX(extra[0])
extra_scores_best.append(classifier.test_model(input=extra_set_x, labels=extra_set_y))
result_line = ""
with open(name + '.res', 'w') as f:
f.write('#C k iter layers epochs nodes lr L1 L2 shortcut batchsize regstart regend' +
' id dataset gradient numactive bestnumactive bestepoch runtime' +
"".join([' extrapol' + str(i) for i in range(1, len(extra_scores) + 1)]) +
"".join([' extrapolbest' + str(i) for i in range(1, len(extra_scores_best) + 1)]) +
' valerror valerrorbest testerror\n')
f.write('# extra datasets: ' + " ".join(extra_pol_test_sets) + '\n')
result_line = [str(k), str(iterNum), str(n_layers), str(n_epochs), str(n_nodes), str(learning_rate),
str(L1_reg), str(L2_reg),
str(with_shortcuts), str(batch_size), str(reg_start), str(reg_end),
str(id), dataset_file, gradient,
str(result['num_active']), str(classifier.get_num_active_units()),
str(result['best_epoch']), str(result['runtime'])] + \
[str(e) for e in extra_scores] + \
[str(e) for e in extra_scores_best] + \
[str(result['val_score']), str(result['best_val_score']), str(result['test_score'])]
f.write(str.join('\t', result_line) + '\n')
with open(name + '.validerror', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "val_error"]])
a.writerows([["# "] + result_line])
a.writerows(result['val_errors'])
with open(name + '.MSE', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "MSE"]])
a.writerows([["# "] + result_line])
a.writerows(result['MSE'])
with open(name + '.L1', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "L1"]])
a.writerows([["# "] + result_line])
a.writerows(result['L1'])
output=1
if output:
with open(name + '.trainloss', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "train_loss"]])
a.writerows([["# "] + result_line])
a.writerows(result['train_losses'])
if len(result['test_errors']) > 0:
with open(name + '.testerrors', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "test_error"]])
a.writerows([["# "] + result_line])
a.writerows(result['test_errors'])
with open(name + '.extrapoltrainloss', 'wb') as csvfile:
a = csv.writer(csvfile, delimiter='\t')
a.writerows([["#C epoch", "extrapol_train_loss"]])
a.writerows([["# "] + result_line])
a.writerows(result['extrapol_train_losses'])
| 35,587 | 33.384541 | 221 | py |
EQL | EQL-master/EQL-DIV-ICML/src/noise.py | # Copyright (c) 2011 Leif Johnson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Code for generating white and pink noise.'''
import numpy
import numpy.random as rng
import operator
def iterwhite():
'''Generate a sequence of samples of white noise.
Generates a never-ending sequence of floating-point values.
'''
while True:
for n in rng.randn(100):
yield n
def iterpink(depth=20):
'''Generate a sequence of samples of pink noise.
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculate the output. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
Generates a never-ending sequence of floating-point values. Any continuous
set of these samples will tend to have a 1/f power spectrum.
'''
values = rng.randn(depth)
smooth = rng.randn(depth)
source = rng.randn(depth)
sum = values.sum()
i = 0
while True:
yield sum + smooth[i]
# advance the index by 1. if the index wraps, generate noise to use in
# the calculations, but do not update any of the pink noise values.
i += 1
if i == depth:
i = 0
smooth = rng.randn(depth)
source = rng.randn(depth)
continue
# count trailing zeros in i
c = 0
while not (i >> c) & 1:
c += 1
# replace value c with a new source element
sum += source[i] - values[c]
values[c] = source[i]
def _asarray(source, shape):
noise = source()
if shape is None:
return noise.next()
count = reduce(operator.mul, shape)
return numpy.asarray([noise.next() for _ in range(count)]).reshape(shape)
def white(shape=None):
'''Generate white noise.
shape: If given, returns a numpy array of white noise with this shape. If
not given, return just one sample of white noise.
'''
return _asarray(iterwhite, shape)
def pink(shape=None, depth=20):
'''Generate an array of pink noise.
shape: If given, returns a numpy array of noise with this shape. If not
given, return just one sample of noise.
depth: Use this many samples of white noise to calculate pink noise. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
'''
return _asarray(lambda: iterpink(depth), shape)
# added by Georg
def pink_zero_mean_std(shape=None, depth=20):
'''Generate an array of pink noise.
shape: If given, returns a numpy array of noise with this shape. If not
given, return just one sample of noise.
depth: Use this many samples of white noise to calculate pink noise. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
'''
dat = _asarray(lambda: iterpink(depth), shape)
return (dat-numpy.mean(dat))/numpy.sqrt(numpy.log2(depth))
if __name__ == '__main__':
from matplotlib import pylab
k = numpy.ones(100.) / 10.
def spectrum(s):
a = abs(numpy.fft.rfft(list(s))) ** 2
return numpy.convolve(a, k, 'valid')
ax = pylab.gca()
w = iterwhite()
ax.loglog(spectrum(w.next() for _ in range(10000)), 'k')
for p, a in enumerate(numpy.logspace(-0.5, 0, 7)):
print 2 ** (p + 1)
p = iterpink(2 ** (p + 1))
ax.loglog(spectrum(p.next() for _ in range(10000)), 'r', alpha=a)
ax.grid(linestyle=':')
ax.set_xlim(10., None)
ax.set_ylim(None, 1e8)
pylab.show() | 4,693 | 31.825175 | 80 | py |
EQL | EQL-master/EQL-DIV-ICML/src/model_selection_val_sparsity.py | import os, sys
import stat
import numpy as np
from operator import itemgetter
'''
expects a file with one row per network and columns reporting the parameters and sparsity and performance
First line should be the column names, #C col1 col2 col3...
then one additional comments line: # extrapolation datasets etc
A sample file is in example_parameter_scan_result.txt
These are the typical columns is the file.
['k', 'iter', 'layers', 'epochs', 'nodes', 'lr', 'L1', 'L2', 'shortcut', 'batchsize', 'regstart', 'regend',
'id','dataset', 'gradient', 'numactive', 'bestnumactive', 'bestepoch','dups', 'inserts', 'runtime', 'extrapol1', 'extrapol2', 'extrapol3',
'extrapolbest1', 'extrapolbest2', 'extrapolbest3', 'valerror', 'valerrorbest', 'testerror']
'''
def select_instance(file):
value_dict = {}
with open(file ,'r') as file:
k = 0
lines = file.readlines()
keys = lines[0].split()[1:]
extrapolL = [x for x in keys if ("extrapol" in x and not "best" in x)]
for key in keys:
nums = []
for l in lines[2:]: # to remove #the line containing "#extra datasets"
nums.append(l.split()[k])
k += 1
value_dict[key] = nums
#print key , value_dict[key]
lines = 0
e = []
e_mean = []
e_var = []
for i in range(len(value_dict["id"])):
value_dict["id"][int(i)] = int(value_dict["id"][int(i)])
value_dict["nodes"][int(i)] = int(value_dict["nodes"][int(i)])
value_dict["numactive"][int(i)] = float(value_dict["numactive"][int(i)])
value_dict["iter"][int(i)] = int(value_dict["iter"][int(i)])
value_dict["valerror"][int(i)] = float(value_dict["valerror"][int(i)])
# value_dict["valextrapol"][int(i)] = float(value_dict["valextrapol"][int(i)])
for k in extrapolL:
value_dict[k][int(i)] = float(value_dict[k][int(i)])
lines += 1
print "lines: ", lines
active_ = []
validation_ = []
id_ = []
extrapol_ = []
#extrapol_val = []
for i in range(lines):
validation_.append(value_dict["valerror"][i])
active_.append(value_dict["numactive"][i])
if "extrapol2" in value_dict:
extrapol_.append(value_dict["extrapol2"][i])
id_.append(value_dict["id"][i])
#extrapol_val.append(value_dict["valextrapol"][i])
active = np.asarray(active_)
active = (active-np.min(active))/(np.max(active)-np.min(active)) # normalize
validation = np.asarray(validation_)
validation = (validation-np.min(validation))/(np.max(validation)-np.min(validation)) # normalize
norm_score = np.sqrt(active**2 + validation**2)
# only for information
if len(extrapol_) > 0:
best_extrapol = sorted(zip(id_, extrapol_), key=itemgetter(1))[0]
print (" best extrapolating model: (only for information):", best_extrapol)
score = zip(list(norm_score), id_, active_, validation_, extrapol_)
score.sort(key = itemgetter(0))
best_instance = score[0]
print ("selected instance model: score: {} id: {} #active: {}\t val-error: {}\t extra-pol2-error: {}".format(*best_instance))
# (best_instance[3], score)
return dict(zip(['score','id', 'num_active', 'valerror', 'extrapol2'], best_instance))
| 3,431 | 41.37037 | 138 | py |
EQL | EQL-master/EQL-DIV-ICML/src/graph_div.py | from graphviz import Digraph
import numpy as np
def getEdges(matrix,inputnames,outputnames,thresh=0.1):
edges=[]
it = np.nditer(matrix, flags=['multi_index'])
while not it.finished:
if np.abs(it[0])>thresh:
edges.append((inputnames[it.multi_index[0]],outputnames[it.multi_index[1]],np.round(it[0].item(),2)))
it.iternext()
return edges
def functionGraph1H(classifier,thresh=0.1):
functionGraph(classifier, thresh)
def functionGraph(classifier,thresh=0.1):
n_in,n_out = classifier.n_in, classifier.n_out
try:
shortcuts = classifier.with_shortcuts
except AttributeError:
shortcuts=False
names_in = [ 'x' + str(s) for s in range(1,n_in+1)]
names_out= [ 'div/y' + str(s) for s in range(1,n_out+1)]
alledges = []
allbiases = []
for l in xrange(len(classifier.hidden_layers)+1):
if l==0:
inp=names_in
else:
inp = classifier.hidden_layers[l-1].getNodeFunctions()
if l==len(classifier.hidden_layers): # last layer
if shortcuts:
inp = np.concatenate([ l.getNodeFunctions() for l in classifier.hidden_layers ])
out = [ n + ":1" for n in names_out] + [ n + ":2" for n in names_out]
ps = classifier.output_layer.get_params()
W = ps[0]
b = ps[1]
else:
out = classifier.hidden_layers[l].getWeightCorrespondence()
ps = classifier.hidden_layers[l].get_params()
W = ps[0]
b = ps[1]
alledges.extend(getEdges(W, inp, out ,thresh))
allbiases.extend(zip(out,b))
nodes=list(set([e[0] for e in alledges])) + list(set([e[1] for e in alledges]))
def isArgument(name):
return ':' in name
def arity2node(name,b1, b2):
op=None
if len(name.split('/')) > 1:
op, name = name.split('/')
operatorname = ('<TR><TD COLSPAN="2">' + op + '</TD></TR>' if op is not None else "")
return '''<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
<TR>
<TD PORT="1">''' + b1 + '''</TD>
<TD PORT="2">''' + b2 + '''</TD>
</TR>''' + operatorname + '<TR><TD COLSPAN="2">' + name + '</TD></TR></TABLE>>'
arity2set = set([n.split(':')[0] for n in nodes if isArgument(n)])
arity2 = list(arity2set)
arity1 = list(set([n for n in nodes if not isArgument(n)]) - arity2set)
bias_dict = dict(allbiases)
dot = Digraph(comment='Function Graph')
for n in arity1:
if n in bias_dict:
dot.node(n,str(np.round(bias_dict[n],2)) + '\n' + n.split('-')[0])
else:
dot.node(n,n.split('-')[0])
for n in arity2:
dot.node(n,arity2node(n.split('-')[0],
str(np.round(bias_dict.get(n+ ':1',0),2)),
str(np.round(bias_dict.get(n+ ':2',0),2)) ),shape='plaintext')
for e in alledges:
dot.edge(e[0], e[1], label=str(e[2]))
return dot
| 2,993 | 34.642857 | 113 | py |
EQL | EQL-master/EQL-DIV-ICML/src/__init__.py | 0 | 0 | 0 | py |
|
EQL | EQL-master/EQL-DIV-ICML/result_f1-EQLDIV/createjobs.py | #!/usr/bin/python
# sample perl script to create SGE jobs (sun grid engine)
# for scanning a parameter space
import os
jobname = "F1_" # should be short
name = "" + jobname # name of shell scripts
res = "result_f1-EQLDIV"
mem = "2000"
#maxtime = "4:00:00"
submitfile = "submit_" + name + ".sh"
SUBMIT = open(submitfile,'w')
SUBMIT.write("#/bin/bash\n")
pwd=os.getcwd()
e=10000
regstart = e/4
regend = e-e/20
i = 0
for l1 in [10**(-l1exp/10.0) for l1exp in range(35,60)]:
for l_n in [2,3]:
for normal in ([True] if i > 0 else [False, True]):
epochs = e if normal else 1
result = res + "/" if normal else res + "test/"
base_cmd = ["python src/mlfg_final.py -i ", str(i),
" -f ", result,
" -d data/f1-n-10k-1.dat.gz",
" --extrapol=data/f1-n-5k-1-test.dat.gz",
" --extrapol=data/f1-n-5k-1-2-test.dat.gz",
" --epochs=", str(epochs),
" --l1=", str(l1),
" --layers=", str(l_n),
" --iterNum=1",
" --reg_start=", str(regstart),
" --reg_end=", str(regend),
" -o",
]
cmd= "".join(base_cmd)
script_fname = ((name + str(i)) if normal else name + "000_test") + ".sh"
if normal:
SUBMIT.write("./" + script_fname + "\n")
with open(script_fname, 'w') as FILE:
FILE.write("""\
#!/bin/bash
# %(jobname)s%(i)d
cd %(pwd)s
export OMP_NUM_THREADS=1
export PATH=${HOME}/bin:/usr/bin:${PATH}
if %(cmd)s; then
rm -f %(script_fname)s
else
touch %(script_fname)s.dead
fi
""" % locals())
os.chmod(script_fname,0755)
i += 1
SUBMIT.close()
os.chmod(submitfile,0755)
print "Jobs:" , i
with open("finished_" + name + ".sh",'w') as FINISHED:
FINISHED.write("#!/bin/bash\nset -e\n" +
'grep "#" $(ls *.res -1 | head -n 1) >' + res + '/all.dat\n' +
"cat " + res + '/*.res | grep -v "#" >>' + res + '/all.dat\n' +
"cp " + __file__ + ' ' + res + '/\n' +
"rm finished_" + name+ '.sh ' + submitfile + '\n')
os.chmod("finished_" + name + ".sh",0755)
| 2,191 | 27.467532 | 80 | py |
mkbe | mkbe-master/DesGAN/generate.py | import argparse
import numpy as np
import random
import torch
from torch.autograd import Variable
from models import load_models, generate
###############################################################################
# Generation methods
###############################################################################
def interpolate(ae, gg, z1, z2, vocab,
steps=5, sample=None, maxlen=None):
"""
Interpolating in z space
Assumes that type(z1) == type(z2)
"""
if type(z1) == Variable:
noise1 = z1
noise2 = z2
elif type(z1) == torch.FloatTensor or type(z1) == torch.cuda.FloatTensor:
noise1 = Variable(z1, volatile=True)
noise2 = Variable(z2, volatile=True)
elif type(z1) == np.ndarray:
noise1 = Variable(torch.from_numpy(z1).float(), volatile=True)
noise2 = Variable(torch.from_numpy(z2).float(), volatile=True)
else:
raise ValueError("Unsupported input type (noise): {}".format(type(z1)))
# interpolation weights
lambdas = [x*1.0/(steps-1) for x in range(steps)]
gens = []
for L in lambdas:
gens.append(generate(ae, gg, (1-L)*noise1 + L*noise2,
vocab, sample, maxlen))
interpolations = []
for i in range(len(gens[0])):
interpolations.append([s[i] for s in gens])
return interpolations
def main(args):
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
else:
print("Note that our pre-trained models require CUDA to evaluate.")
###########################################################################
# Load the models
###########################################################################
model_args, idx2word, autoencoder, gan_gen, gan_disc \
= load_models(args.load_path)
###########################################################################
# Generation code
###########################################################################
# Generate sentences
if args.ngenerations > 0:
noise = torch.ones(args.ngenerations, model_args['z_size'])
noise.normal_()
sentences = generate(autoencoder, gan_gen, z=noise,
vocab=idx2word, sample=args.sample,
maxlen=model_args['maxlen'])
if not args.noprint:
print("\nSentence generations:\n")
for sent in sentences:
print(sent)
with open(args.outf, "w") as f:
f.write("Sentence generations:\n\n")
for sent in sentences:
f.write(sent+"\n")
# Generate interpolations
if args.ninterpolations > 0:
noise1 = torch.ones(args.ninterpolations, model_args['z_size'])
noise1.normal_()
noise2 = torch.ones(args.ninterpolations, model_args['z_size'])
noise2.normal_()
interps = interpolate(autoencoder, gan_gen,
z1=noise1,
z2=noise2,
vocab=idx2word,
steps=args.steps,
sample=args.sample,
maxlen=model_args['maxlen'])
if not args.noprint:
print("\nSentence interpolations:\n")
for interp in interps:
for sent in interp:
print(sent)
print("")
with open(args.outf, "a") as f:
f.write("\nSentence interpolations:\n\n")
for interp in interps:
for sent in interp:
f.write(sent+"\n")
f.write('\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch ARAE for Text Eval')
parser.add_argument('--load_path', type=str, required=True,
help='directory to load models from')
parser.add_argument('--temp', type=float, default=1,
help='softmax temperature (lower --> more discrete)')
parser.add_argument('--ngenerations', type=int, default=10,
help='Number of sentences to generate')
parser.add_argument('--ninterpolations', type=int, default=5,
help='Number z-space sentence interpolation examples')
parser.add_argument('--steps', type=int, default=5,
help='Number of steps in each interpolation')
parser.add_argument('--outf', type=str, default='./generated.txt',
help='filename and path to write to')
parser.add_argument('--noprint', action='store_true',
help='prevents examples from printing')
parser.add_argument('--sample', action='store_true',
help='sample when decoding for generation')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
args = parser.parse_args()
print(vars(args))
main(args)
| 5,149 | 36.867647 | 79 | py |
mkbe | mkbe-master/DesGAN/utils.py | import os
import torch
import numpy as np
import random
def load_kenlm():
global kenlm
import kenlm
def to_gpu(gpu, var):
if gpu:
return var.cuda()
return var
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.word2idx['<pad>'] = 0
self.word2idx['<sos>'] = 1
self.word2idx['<eos>'] = 2
self.word2idx['<oov>'] = 3
self.wordcounts = {}
# to track word counts
def add_word(self, word):
if word not in self.wordcounts:
self.wordcounts[word] = 1
else:
self.wordcounts[word] += 1
# prune vocab based on count k cutoff or most frequently seen k words
def prune_vocab(self, k=5, cnt=False):
# get all words and their respective counts
vocab_list = [(word, count) for word, count in self.wordcounts.items()]
if cnt:
# prune by count
self.pruned_vocab = \
{pair[0]: pair[1] for pair in vocab_list if pair[1] > k}
else:
# prune by most frequently seen words
vocab_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = [pair[0] for pair in vocab_list[:k]]
# sort to make vocabulary determistic
self.pruned_vocab.sort()
# add all chosen words to new vocabulary/dict
for word in self.pruned_vocab:
if word not in self.word2idx:
self.word2idx[word] = len(self.word2idx)
print("original vocab {}; pruned to {}".
format(len(self.wordcounts), len(self.word2idx)))
self.idx2word = {v: k for k, v in self.word2idx.items()}
def __len__(self):
return len(self.word2idx)
class Corpus(object):
def __init__(self, path, maxlen, vocab_size=11000, lowercase=False):
self.dictionary = Dictionary()
self.maxlen = maxlen
self.lowercase = lowercase
self.vocab_size = vocab_size
self.train_path = os.path.join(path, 'train.txt')
self.test_path = os.path.join(path, 'test.txt')
# make the vocabulary from training set
self.make_vocab()
self.train = self.tokenize(self.train_path)
self.test = self.tokenize(self.test_path)
def make_vocab(self):
assert os.path.exists(self.train_path)
# Add words to the dictionary
with open(self.train_path, 'r') as f:
for line in f:
if self.lowercase:
# -1 to get rid of \n character
words = line[:-1].lower().split(" ")
else:
words = line[:-1].split(" ")
for word in words:
self.dictionary.add_word(word)
# prune the vocabulary
self.dictionary.prune_vocab(k=self.vocab_size, cnt=False)
def tokenize(self, path):
"""Tokenizes a text file."""
dropped = 0
with open(path, 'r') as f:
linecount = 0
lines = []
for line in f:
linecount += 1
if self.lowercase:
words = line[:-1].lower().strip().split(" ")
else:
words = line[:-1].strip().split(" ")
if len(words) > self.maxlen:
dropped += 1
continue
words = ['<sos>'] + words
words += ['<eos>']
# vectorize
vocab = self.dictionary.word2idx
unk_idx = vocab['<oov>']
indices = [vocab[w] if w in vocab else unk_idx for w in words]
lines.append(indices)
print("Number of sentences dropped from {}: {} out of {} total".
format(path, dropped, linecount))
return lines
def batchify(data, bsz, shuffle=False, gpu=False):
#if shuffle:
# random.shuffle(data)
nbatch = len(data) // bsz
batches = []
for i in range(nbatch):
# Pad batches to maximum sequence length in batch
batch = data[i*bsz:(i+1)*bsz]
# subtract 1 from lengths b/c includes BOTH starts & end symbols
lengths = [len(x)-1 for x in batch]
# sort items by length (decreasing)
batch, lengths = length_sort(batch, lengths)
# source has no end symbol
source = [x[:-1] for x in batch]
# target has no start symbol
target = [x[1:] for x in batch]
# find length to pad to
maxlen = max(lengths)
for x, y in zip(source, target):
zeros = (maxlen-len(x))*[0]
x += zeros
y += zeros
source = torch.LongTensor(np.array(source))
target = torch.LongTensor(np.array(target)).view(-1)
if gpu:
source = source.cuda()
target = target.cuda()
batches.append((source, target, lengths))
return batches
def batchify_C(data, condition, bsz, shuffle=False, gpu=False):
#if shuffle:
# random.shuffle(data)
nbatch = len(data) // bsz
batches = []
cond_batch = []
for i in range(nbatch):
# Pad batches to maximum sequence length in batch
batch = data[i*bsz:(i+1)*bsz]
cond = condition[i*bsz:(i+1)*bsz]
# subtract 1 from lengths b/c includes BOTH starts & end symbols
lengths = [len(x)-1 for x in batch]
lengths_cond = [len(x) for x in cond]
# sort items by length (decreasing)
batch, lengths, cond = length_sort_c(batch, lengths, cond)
# source has no end symbol
source = [x[1:-1] for x in batch]
# target has no start symbol
target = [x[1:-1] for x in batch]
# source has no end symbol
source_cond = [x[1:-1] for x in cond]
# target has no start symbol
target_cond = [x[1:-1] for x in cond]
# find length to pad to
maxlen = max(lengths)
for x, y in zip(source, target):
zeros = (maxlen-len(x))*[0]
x += zeros
y += zeros
source_cond = torch.LongTensor(np.array(source_cond))
target_cond = torch.LongTensor(np.array(target_cond)).view(-1)
source = torch.LongTensor(np.array(source))
target = torch.LongTensor(np.array(target)).view(-1)
if gpu:
source_cond = source_cond.cuda()
target_cond = target_cond.cuda()
source = source.cuda()
target = target.cuda()
cond_batch.append((source_cond, target_cond, lengths_cond))
batches.append((source, target, lengths))
return batches, cond_batch
def length_sort(items, lengths, descending=True):
"""In order to use pytorch variable length sequence package"""
items = list(zip(items, lengths))
items.sort(key=lambda x: x[1], reverse=True)
items, lengths = zip(*items)
return list(items), list(lengths)
def length_sort_c(items, lengths, cond, descending=True):
"""In order to use pytorch variable length sequence package"""
items = list(zip(items, lengths, cond))
items.sort(key=lambda x: x[1], reverse=True)
items, lengths, cond = zip(*items)
return list(items), list(lengths), list(cond)
def train_ngram_lm(kenlm_path, data_path, output_path, N):
"""
Trains a modified Kneser-Ney n-gram KenLM from a text file.
Creates a .arpa file to store n-grams.
"""
# create .arpa file of n-grams
curdir = os.path.abspath(os.path.curdir)
#
command = "bin/lmplz -o "+str(N)+" <"+os.path.join(curdir, data_path) + \
" >"+os.path.join(curdir, output_path)
os.system("cd "+os.path.join(kenlm_path, 'build')+" && "+command)
load_kenlm()
# create language model
model = kenlm.Model(output_path)
return model
def get_ppl(lm, sentences):
"""
Assume sentences is a list of strings (space delimited sentences)
"""
total_nll = 0
total_wc = 0
for sent in sentences:
words = sent.strip().split()
score = lm.score(sent, bos=True, eos=False)
word_count = len(words)
total_wc += word_count
total_nll += score
ppl = 10**-(total_nll/total_wc)
return ppl
| 8,267 | 30.557252 | 79 | py |
mkbe | mkbe-master/DesGAN/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from utils import to_gpu
import json
import os
import numpy as np
class MLP_D(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.LeakyReLU(0.2), gpu=False):
super(MLP_D, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
if i==0:
layer = nn.Linear(layer_sizes[i]+199, layer_sizes[i+1])
else:
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
# No batch normalization after first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i+1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x, c):
for i, layer in enumerate(self.layers):
if i==0:
x = torch.cat((x, c), 1)
x = layer(x)
y = torch.mean(torch.sigmoid(x))
x = torch.mean(x)
return x, y
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_G(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_G, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
bn = nn.BatchNorm1d(layer_sizes[i+1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class Seq2Seq(nn.Module):
def __init__(self, emsize, nhidden, ntokens, nlayers, noise_radius=0.2,
hidden_init=False, dropout=0, gpu=False):
super(Seq2Seq, self).__init__()
self.nhidden = nhidden
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
# RNN Encoder and Decoder
self.encoder = nn.LSTM(input_size=emsize,
hidden_size=nhidden,
num_layers=nlayers,
dropout=dropout,
batch_first=True)
decoder_input_size = emsize+nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
# Initialize Linear Transformation
self.linear = nn.Linear(nhidden, ntokens)
self.init_weights()
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return (to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2))
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False):
batch_size, maxlen = indices.size()
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
# Encode
packed_output, state = self.encoder(packed_embeddings)
hidden, cell = state
# batch_size x nhidden
hidden = hidden[-1] # get hidden state of last layer of encoder
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
# For older versions of PyTorch use:
#hidden = torch.div(hidden, norms.expand_as(hidden))
# For newest version of PyTorch (as of 8/25) use this:
hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
std=self.noise_radius)
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
embeddings = self.embedding_decoder(indices)
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
packed_embeddings = pack_padded_sequence(input=augmented_embeddings,
lengths=lengths,
batch_first=True)
packed_output, state = self.decoder(packed_embeddings, state)
output, lengths = pad_packed_sequence(packed_output, batch_first=True)
# reshape to batch_size*maxlen x nhidden before linear over vocab
decoded = self.linear(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, hidden, maxlen, sample=True, temp=1.0):###changed
"""Generate through decoder; no backprop"""
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
# <sos>
self.start_symbols.data.resize_(batch_size, 1)
self.start_symbols.data.fill_(1)
embedding = self.embedding_decoder(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2) ###
# unroll
all_indices = []
for i in range(maxlen):
change = 0
output, state = self.decoder(inputs, state)
overvocab = self.linear(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
else:
# sampling
change = 1
probs = F.softmax(overvocab/temp)
indices = torch.multinomial(probs, 1)
if change ==0:
indices = indices.unsqueeze(1)
all_indices.append(indices) ### indices -> indices.unsqueeze(1)
embedding = self.embedding_decoder(indices)
#embedding = embedding.unsqueeze(1) ### man ezafe kardam vase generate avalie
#print (embedding.shape, hidden.unsqueeze(1).shape)
####print (hidden.shape)
####print (hidden.unsqueeze(1).shape)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2) ###
max_indices = torch.cat(all_indices, 1)##
return max_indices
def load_models(load_path):
model_args = json.load(open("{}/args.json".format(load_path), "r"))
word2idx = json.load(open("{}/vocab.json".format(load_path), "r"))
idx2word = {v: k for k, v in word2idx.items()}
autoencoder = Seq2Seq(emsize=model_args['emsize'],
nhidden=model_args['nhidden'],
ntokens=model_args['ntokens'],
nlayers=model_args['nlayers'],
hidden_init=model_args['hidden_init'])
gan_gen = MLP_G(ninput=model_args['z_size'],
noutput=model_args['nhidden'],
layers=model_args['arch_g'])
gan_disc = MLP_D(ninput=model_args['nhidden'],
noutput=1,
layers=model_args['arch_d'])
print('Loading models from'+load_path)
ae_path = os.path.join(load_path, "autoencoder_model.pt")
gen_path = os.path.join(load_path, "gan_gen_model.pt")
disc_path = os.path.join(load_path, "gan_disc_model.pt")
autoencoder.load_state_dict(torch.load(ae_path))
gan_gen.load_state_dict(torch.load(gen_path))
gan_disc.load_state_dict(torch.load(disc_path))
return model_args, idx2word, autoencoder, gan_gen, gan_disc
def generate(autoencoder, gan_gen, z, vocab, sample, maxlen):### chaanged
"""
Assume noise is batch_size x z_size
"""
if type(z) == Variable:
noise = z
elif type(z) == torch.FloatTensor or type(z) == torch.cuda.FloatTensor:
noise = Variable(z, volatile=True)
elif type(z) == np.ndarray:
noise = Variable(torch.from_numpy(z).float(), volatile=True)
else:
raise ValueError("Unsupported input type (noise): {}".format(type(z)))
gan_gen.eval()
autoencoder.eval()
# generate from random noise
fake_hidden = gan_gen(noise)
max_indices = autoencoder.generate(hidden=fake_hidden,
maxlen=maxlen,
sample=sample)
max_indices = max_indices.data.cpu().numpy()
sentences = []
for idx in max_indices:
# generated sentence
words = [vocab[x] for x in idx]
# truncate sentences to first occurrence of <eos>
truncated_sent = []
for w in words:
if w != '<eos>':
truncated_sent.append(w)
else:
break
sent = " ".join(truncated_sent)
sentences.append(sent)
return sentences
| 12,561 | 33.991643 | 82 | py |
mkbe | mkbe-master/DesGAN/metrics.py | """
Computes the BLEU, ROUGE, METEOR, and CIDER
using the COCO metrics scripts
"""
import argparse
import logging
# this requires the coco-caption package, https://github.com/tylin/coco-caption
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
parser = argparse.ArgumentParser(
description="""This takes two text files and a path the references (source, references),
computes bleu, meteor, rouge and cider metrics""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("hypothesis", type=argparse.FileType('r'),
help="The hypothesis files")
parser.add_argument("references", type=argparse.FileType('r'), nargs="+",
help="Path to all the reference files")
def read_file(file, type = "r"):
data = []
#list = []
with open(file) as f: #open(p)
data = f.readlines() + data
#for line in data:
# if type == "r":
# list += [[line.split()]]
# if type == "h":
# list += [line.split()]
return data
def load_textfiles(references, hypothesis):
print "The number of references is {}".format(len(references))
hypo = {idx: [lines.strip()] for (idx, lines) in enumerate(hypothesis)}
# take out newlines before creating dictionary
###raw_refs = [map(str.strip, r) for r in zip(*references)]
refs = {idx: [lines.strip()] for (idx, lines) in enumerate(references)}##{idx: rr for idx, rr in enumerate(raw_refs)}
# sanity check that we have the same number of references as hypothesis
print len(hypo), len(refs)
if len(hypo) != len(refs):
raise ValueError("There is a sentence number mismatch between the inputs")
return refs, hypo
def score(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
if __name__ == '__main__':
#logging.basicConfig(level=logging.INFO)
#logger = logging.getLogger('Computing Metrics:')
#args = parser.parse_args()
refer = "data/test.txt"
for i in range(44):
i+= 6
hypo = "output/example/end_of_epoch"+str(i)+"_lm_generations.txt"
ref = "data/test.txt"
ref = read_file(ref)
hypo = read_file(hypo, type ="h")
ref, hypo = load_textfiles(ref, hypo)#(args.references, args.hypothesis)
print score(ref, hypo)
| 2,904 | 32.390805 | 121 | py |
mkbe | mkbe-master/DesGAN/train.py | import argparse
import os
import time
import math
import numpy as np
import random
import sys
import json
from sklearn import preprocessing
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from utils import to_gpu, Corpus, batchify, train_ngram_lm, get_ppl, batchify_C
from models import Seq2Seq, MLP_D, MLP_G
parser = argparse.ArgumentParser(description='PyTorch ARAE for Text')
# Path Arguments
parser.add_argument('--data_path', type=str, required=True,
help='location of the data corpus')
parser.add_argument('--kenlm_path', type=str, default='./kenlm',
help='path to kenlm directory')
parser.add_argument('--outf', type=str, default='example',
help='output directory name')
# Data Processing Arguments
parser.add_argument('--vocab_size', type=int, default=11000,
help='cut vocabulary down to this size '
'(most frequently seen words in train)')
parser.add_argument('--maxlen', type=int, default=30, ### 30 -> 7
help='maximum sentence length')
parser.add_argument('--lowercase', action='store_true',
help='lowercase all text')
# Model Arguments
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhidden', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--noise_radius', type=float, default=0.2,
help='stdev of noise for autoencoder (regularizer)')
parser.add_argument('--noise_anneal', type=float, default=0.995,
help='anneal noise_radius exponentially by this'
'every 100 iterations')
parser.add_argument('--hidden_init', action='store_true',
help="initialize decoder hidden state with encoder's")
parser.add_argument('--arch_g', type=str, default='300-300',
help='generator architecture (MLP)')
parser.add_argument('--arch_d', type=str, default='300-300',
help='critic/discriminator architecture (MLP)')
parser.add_argument('--z_size', type=int, default=199,
help='dimension of random noise z to feed into generator')
parser.add_argument('--temp', type=float, default=1,
help='softmax temperature (lower --> more discrete)')
parser.add_argument('--enc_grad_norm', type=bool, default=True,
help='norm code gradient from critic->encoder')
parser.add_argument('--gan_toenc', type=float, default=-0.01,
help='weight factor passing gradient from gan to encoder')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
# Training Arguments
parser.add_argument('--epochs', type=int, default=15,
help='maximum number of epochs')
parser.add_argument('--min_epochs', type=int, default=6,
help="minimum number of epochs to train for")
parser.add_argument('--no_earlystopping', action='store_true',
help="won't use KenLM for early stopping")
parser.add_argument('--patience', type=int, default=5,
help="number of language model evaluations without ppl "
"improvement to wait before early stopping")
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--niters_ae', type=int, default=1,
help='number of autoencoder iterations in training')
parser.add_argument('--niters_gan_d', type=int, default=5,
help='number of discriminator iterations in training')
parser.add_argument('--niters_gan_g', type=int, default=1,
help='number of generator iterations in training')
parser.add_argument('--niters_gan_schedule', type=str, default='2-4-6',
help='epoch counts to increase number of GAN training '
' iterations (increment by 1 each time)')
parser.add_argument('--lr_ae', type=float, default=1,
help='autoencoder learning rate')
parser.add_argument('--lr_gan_g', type=float, default=5e-05,
help='generator learning rate')
parser.add_argument('--lr_gan_d', type=float, default=1e-05,
help='critic/discriminator learning rate')
parser.add_argument('--lr_ae_l', type=float, default=5e-03,
help='autoencoder l1 rate')
parser.add_argument('--lr_gan_l', type=float, default=5e-03,
help='l1 learning rate')
parser.add_argument('--beta1', type=float, default=0.9,
help='beta1 for adam. default=0.9')
parser.add_argument('--clip', type=float, default=1,
help='gradient clipping, max norm')
parser.add_argument('--gan_clamp', type=float, default=0.01,
help='WGAN clamp')
# Evaluation Arguments
parser.add_argument('--sample', action='store_true',
help='sample when decoding for generation')
parser.add_argument('--N', type=int, default=5,
help='N-gram order for training n-gram language model')
parser.add_argument('--log_interval', type=int, default=200,
help='interval to log autoencoder training results')
# Other
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
args = parser.parse_args()
print(vars(args))
# make output directory if it doesn't already exist
if not os.path.isdir('./output'):
os.makedirs('./output')
if not os.path.isdir('./output/{}'.format(args.outf)):
os.makedirs('./output/{}'.format(args.outf))
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, "
"so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
# create corpus
corpus = Corpus(args.data_path,
maxlen=args.maxlen,
vocab_size=args.vocab_size,
lowercase=args.lowercase)
# dumping vocabulary
with open('./output/{}/vocab.json'.format(args.outf), 'w') as f:
json.dump(corpus.dictionary.word2idx, f)
# save arguments
ntokens = len(corpus.dictionary.word2idx)
print("Vocabulary Size: {}".format(ntokens))
args.ntokens = ntokens
with open('./output/{}/args.json'.format(args.outf), 'w') as f:
json.dump(vars(args), f)
with open("./output/{}/logs.txt".format(args.outf), 'w') as f:
f.write(str(vars(args)))
f.write("\n\n")
eval_batch_size = 10
#load conditonal information
test_C = np.load('data/test_weight-YAGO.npy')
train_C = np.load('data/train_weight-YAGO.npy')
test_C = preprocessing.normalize(test_C, norm='l2')
train_C = preprocessing.normalize(train_C, norm='l2')
test_data, test_c = batchify_C(corpus.test, test_C, eval_batch_size, shuffle=False)
train_data, train_c = batchify_C(corpus.train, train_C, args.batch_size, shuffle=False)
test_final = batchify(test_C, len(test_C), shuffle=False)
print("Loaded data!")
###############################################################################
# Build the models
###############################################################################
ntokens = len(corpus.dictionary.word2idx)
autoencoder = Seq2Seq(emsize=args.emsize,
nhidden=args.nhidden,
ntokens=ntokens,
nlayers=args.nlayers,
noise_radius=args.noise_radius,
hidden_init=args.hidden_init,
dropout=args.dropout,
gpu=args.cuda)
gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)
print(autoencoder)
print(gan_gen)
print(gan_disc)
optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(),
lr=args.lr_gan_g,
betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(),
lr=args.lr_gan_d,
betas=(args.beta1, 0.999))
#optimizer_gan_l = optim.Adam(gan_gen.parameters(),
# lr=args.lr_gan_l,
# betas=(args.beta1, 0.999))
#optimizer_ae_l = optim.Adam(autoencoder.parameters(), lr=args.lr_ae_l)
criterion_ce = nn.CrossEntropyLoss()
if args.cuda:
autoencoder = autoencoder.cuda()
gan_gen = gan_gen.cuda()
gan_disc = gan_disc.cuda()
criterion_ce = criterion_ce.cuda()
###############################################################################
# Training code
###############################################################################
def save_model():
print("Saving models")
with open('./output/{}/autoencoder_model.pt'.format(args.outf), 'wb') as f:
torch.save(autoencoder.state_dict(), f)
with open('./output/{}/gan_gen_model.pt'.format(args.outf), 'wb') as f:
torch.save(gan_gen.state_dict(), f)
with open('./output/{}/gan_disc_model.pt'.format(args.outf), 'wb') as f:
torch.save(gan_disc.state_dict(), f)
def evaluate_autoencoder(data_source, epoch):
# Turn on evaluation mode which disables dropout.
autoencoder.eval()
total_loss = 0
ntokens = len(corpus.dictionary.word2idx)
all_accuracies = 0
bcnt = 0
for i, batch in enumerate(data_source):
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source, volatile=True))
target = to_gpu(args.cuda, Variable(target, volatile=True))
mask = target.gt(0)
masked_target = target.masked_select(mask)
# examples x ntokens
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
# output: batch x seq_len x ntokens
output = autoencoder(source, lengths, noise=True)
flattened_output = output.view(-1, ntokens)
masked_output = \
flattened_output.masked_select(output_mask).view(-1, ntokens)
total_loss += criterion_ce(masked_output/args.temp, masked_target).data
# accuracy
max_vals, max_indices = torch.max(masked_output, 1)
all_accuracies += \
torch.mean(max_indices.eq(masked_target).float()).data[0]
bcnt += 1
aeoutf = "./output/%s/%d_autoencoder.txt" % (args.outf, epoch)
# with open(aeoutf, "a") as f:
# max_values, max_indices = torch.max(output, 2)
# max_indices = \
# max_indices.view(output.size(0), -1).data.cpu().numpy()
# target = target.view(output.size(0), -1).data.cpu().numpy()
# for t, idx in zip(target, max_indices):
# # real sentence
# chars = " ".join([corpus.dictionary.idx2word[x] for x in t])
# f.write(chars)
# f.write("\n")
# autoencoder output sentence
# chars = " ".join([corpus.dictionary.idx2word[x] for x in idx])
# f.write(chars)
# f.write("\n\n")
return total_loss[0] / len(data_source), all_accuracies/bcnt
def evaluate_generator(noise, epoch):
gan_gen.eval()
autoencoder.eval()
# generate from fixed random noise
fake_hidden = gan_gen(noise)
max_indices = \
autoencoder.generate(fake_hidden, args.maxlen, sample=args.sample)
# with open("./output/%s/%s_generated.txt" % (args.outf, epoch), "w") as f:
# max_indices = max_indices.data.cpu().numpy()
# for idx in max_indices:
# generated sentence
# words = [corpus.dictionary.idx2word[x] for x in idx]
# truncate sentences to first occurrence of <eos>
# truncated_sent = []
# for w in words:
# if w != '<eos>':
# truncated_sent.append(w)
# else:
# break
# chars = " ".join(truncated_sent)
# f.write(chars)
# f.write("\n")
def train_lm(test, eval_path, save_path):#####(test, eval_path, save_path) or (eval_path, save_path)
# generate examples
indices = []
noise = to_gpu(args.cuda, Variable(torch.ones(100, args.z_size)))
test = to_gpu(args.cuda, Variable(test[0][0]))
for i in range(1):
noise.data.normal_(0, 1)
fake_hidden = gan_gen(test)
max_indices = autoencoder.generate(fake_hidden, args.maxlen)
indices.append(max_indices.data.cpu().numpy())
indices = np.concatenate(indices, axis=0)
# write generated sentences to text file
with open(save_path+".txt", "w") as f:
# laplacian smoothing
#for word in corpus.dictionary.word2idx.keys():
# f.write(word+"\n")
for idx in indices:
# generated sentence
words = [corpus.dictionary.idx2word[x] for x in idx]
# truncate sentences to first occurrence of <eos>
truncated_sent = []
for w in words:
if w != '<eos>':
truncated_sent.append(w)
else:
break
chars = " ".join(truncated_sent)
f.write(chars+"\n")
#save_path = "./snli_lm/dev_gen"
# train language model on generated examples
lm = train_ngram_lm(kenlm_path=args.kenlm_path,
data_path=save_path+".txt",
output_path=save_path+".arpa",
N=args.N)
# load sentences to evaluate on
with open(eval_path, 'r') as f:
lines = f.readlines()
sentences = [l.replace('\n', '') for l in lines]
ppl = get_ppl(lm, sentences)
return ppl
def train_ae(batch, total_loss_ae, start_time, i):
autoencoder.train()
autoencoder.zero_grad()
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# Create sentence length mask over padding
mask = target.gt(0)
masked_target = target.masked_select(mask)
# examples x ntokens
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
# output: batch x seq_len x ntokens
output = autoencoder(source, lengths, noise=True)
# output_size: batch_size, maxlen, self.ntokens
flattened_output = output.view(-1, ntokens)
masked_output = \
flattened_output.masked_select(output_mask).view(-1, ntokens)
loss = criterion_ce(masked_output/args.temp, masked_target)
loss.backward()
# `clip_grad_norm` to prevent exploding gradient in RNNs / LSTMs
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_ae.step()
total_loss_ae += loss.data
accuracy = None
if i % args.log_interval == 0 and i > 0:
# accuracy
probs = F.softmax(masked_output)
max_vals, max_indices = torch.max(probs, 1)
accuracy = torch.mean(max_indices.eq(masked_target).float()).data[0]
cur_loss = total_loss_ae[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | acc {:8.2f}'
.format(epoch, i, len(train_data),
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss), accuracy))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | acc {:8.2f}\n'.
format(epoch, i, len(train_data),
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss), accuracy))
total_loss_ae = 0
start_time = time.time()
return total_loss_ae, start_time
def train_gan_l(batch, batch2):##(batch2) or ()
gan_gen.train()
gan_gen.zero_grad()
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
batch_C = to_gpu(args.cuda, Variable(batch2[0]))
fake_hidden = gan_gen(batch_C)
########## l1 loss
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# batch_size x nhidden
real_hidden = autoencoder(source, lengths, noise=False, encode_only=True)
err_l = torch.mean(torch.abs(fake_hidden - real_hidden))
err_l.backward( )
##########
optimizer_gan_l.step()
return err_l
def train_gan_g(batch, batch2):##(batch2) or ()
gan_gen.train()
gan_gen.zero_grad()
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
batch_C = to_gpu(args.cuda, Variable(batch2[0]))
fake_hidden = gan_gen(batch_C)
errG, y = gan_disc(fake_hidden, batch_C)
# loss / backprop
errG.backward(one)
optimizer_gan_g.step()
return errG
def grad_hook(grad):
# Gradient norm: regularize to be same
# code_grad_gan * code_grad_ae / norm(code_grad_gan)
if args.enc_grad_norm:
gan_norm = torch.norm(grad, 2, 1).detach().data.mean()
normed_grad = grad * autoencoder.grad_norm / gan_norm
else:
normed_grad = grad
# weight factor and sign flip
normed_grad *= -math.fabs(args.gan_toenc)
return normed_grad
def train_gan_d(batch, batch2):###(batch, batch2) or (batch)
# clamp parameters to a cube
for p in gan_disc.parameters():
p.data.clamp_(-args.gan_clamp, args.gan_clamp)
autoencoder.train()
autoencoder.zero_grad()
gan_disc.train()
gan_disc.zero_grad()
# positive samples ----------------------------
# generate real codes
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# batch_size x nhidden
real_hidden = autoencoder(source, lengths, noise=False, encode_only=True)
real_hidden_l = real_hidden
real_hidden.register_hook(grad_hook)
batch_C = to_gpu(args.cuda, Variable(batch2[0]))
# loss / backprop
errD_real, y1 = gan_disc(real_hidden, batch_C)
errD_real.backward(one)
# negative samples ----------------------------
# generate fake codes
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
fake_hidden = gan_gen(batch_C)
errD_fake, y2 = gan_disc(fake_hidden.detach(), batch_C)
errD_fake.backward(mone)
# `clip_grad_norm` to prvent exploding gradient problem in RNNs / LSTMs
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_gan_d.step()
optimizer_ae.step()
errD = -(errD_real - errD_fake)
return errD, errD_real, errD_fake
print("Training...")
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('Training...\n')
# schedule of increasing GAN training loops
if args.niters_gan_schedule != "":
gan_schedule = [int(x) for x in args.niters_gan_schedule.split("-")]
else:
gan_schedule = []
niter_gan = 1
print gan_schedule
fixed_noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
fixed_noise.data.normal_(0, 1)
print (len(fixed_noise))
one = to_gpu(args.cuda, torch.FloatTensor([1]))
mone = one * -1
best_ppl = None
impatience = 0
all_ppl = []
for epoch in range(1, args.epochs+1):
# update gan training schedule
if epoch in gan_schedule:
niter_gan += 1
print("GAN training loop schedule increased to {}".format(niter_gan))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write("GAN training loop schedule increased to {}\n".
format(niter_gan))
total_loss_ae = 0
epoch_start_time = time.time()
start_time = time.time()
niter = 0
niter_global = 1
# loop through all batches in training data
while niter < len(train_data):
# train autoencoder ----------------------------
for i in range(args.niters_ae):
if niter == len(train_data):
break # end of epoch
print train_data[niter][0].shape
total_loss_ae, start_time = \
train_ae(train_data[niter], total_loss_ae, start_time, niter)
niter += 1
# train gan ----------------------------------
for k in range(niter_gan):
# train discriminator/critic
for i in range(args.niters_gan_d):
# feed a seen sample within this epoch; good for early training
point = random.randint(0, len(train_data)-1)
errD, errD_real, errD_fake = \
train_gan_d(train_data[point], train_c[point])
# train generator
for i in range(args.niters_gan_g):
point = random.randint(0, len(train_data)-1)
errG = train_gan_g(train_data[point], train_c[point])
niter_global += 1
if niter_global % 100 == 0:
print('[%d/%d][%d/%d] Loss_D: %.8f (Loss_D_real: %.8f '
'Loss_D_fake: %.8f) Loss_G: %.8f'
% (epoch, args.epochs, niter, len(train_data),
errD.data[0], errD_real.data[0],
errD_fake.data[0], errG.data[0]))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('[%d/%d][%d/%d] Loss_D: %.8f (Loss_D_real: %.8f '
'Loss_D_fake: %.8f) Loss_G: %.8f\n'
% (epoch, args.epochs, niter, len(train_data),
errD.data[0], errD_real.data[0],
errD_fake.data[0], errG.data[0]))
# exponentially decaying noise on autoencoder
autoencoder.noise_radius = \
autoencoder.noise_radius*args.noise_anneal
if niter_global % 3000 == 0:
evaluate_generator(fixed_noise, "epoch{}_step{}".
format(epoch, niter_global))
# evaluate with lm
if not args.no_earlystopping and epoch > args.min_epochs:
ppl = train_lm(eval_path=os.path.join(args.data_path,
"test.txt"),
save_path="output/{}/"
"epoch{}_step{}_lm_generations".
format(args.outf, epoch,
niter_global))
print("Perplexity {}".format(ppl))
all_ppl.append(ppl)
print(all_ppl)
with open("./output/{}/logs.txt".
format(args.outf), 'a') as f:
f.write("\n\nPerplexity {}\n".format(ppl))
f.write(str(all_ppl)+"\n\n")
if best_ppl is None or ppl < best_ppl:
impatience = 0
best_ppl = ppl
print("New best ppl {}\n".format(best_ppl))
with open("./output/{}/logs.txt".
format(args.outf), 'a') as f:
f.write("New best ppl {}\n".format(best_ppl))
save_model()
else:
impatience += 1
# end training
if impatience > args.patience:
print("Ending training")
with open("./output/{}/logs.txt".
format(args.outf), 'a') as f:
f.write("\nEnding Training\n")
sys.exit()
# end of epoch ----------------------------
# evaluation
test_loss, accuracy = evaluate_autoencoder(test_data, epoch)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:5.2f} | acc {:3.3f}'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
print('-' * 89)
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write('-' * 89)
f.write('\n| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} |'
' test ppl {:5.2f} | acc {:3.3f}\n'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
f.write('-' * 89)
f.write('\n')
evaluate_generator(fixed_noise, "end_of_epoch_{}".format(epoch))
if not args.no_earlystopping and epoch >= args.min_epochs:
ppl = train_lm(test_final, eval_path=os.path.join(args.data_path, "test.txt"),
save_path="./output/{}/end_of_epoch{}_lm_generations".
format(args.outf, epoch))
print("Perplexity {}".format(ppl))
all_ppl.append(ppl)
print(all_ppl)
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write("\n\nPerplexity {}\n".format(ppl))
f.write(str(all_ppl)+"\n\n")
if best_ppl is None or ppl < best_ppl:
impatience = 0
best_ppl = ppl
print("New best ppl {}\n".format(best_ppl))
with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
f.write("New best ppl {}\n".format(best_ppl))
save_model()
else:
pouya = 0
# impatience += 1
# end training
# if impatience > args.patience:
# print("Ending training")
# with open("./output/{}/logs.txt".format(args.outf), 'a') as f:
# f.write("\nEnding Training\n")
# sys.exit()
# shuffle between epochs
train_data = batchify(corpus.train, args.batch_size, shuffle=False)
| 26,814 | 37.472023 | 100 | py |
mkbe | mkbe-master/MKBE/models/conv_units.py | """CNN building blocks derived from Inception-ResNet-v2
"""
import tensorflow as tf
def print_variable_info():
"""
auxiliary function to print trainable variable information
"""
var_list = tf.trainable_variables()
total = 0
layer_name = ""
layer_total = 0
for var in var_list:
num = 1
for dim in var.shape:
num *= int(dim)
var_name_list = str(var.name).split('/')
if var_name_list[0] != layer_name:
if layer_total != 0:
print("Layer {} total parameters: {}".format(layer_name, layer_total))
print("---layer {} parameters---".format(var_name_list[0]))
layer_total = 0
layer_name = var_name_list[0]
print("{}: {}, {}".format(var.name, str(var.shape), num))
total += num
layer_total += num
print("Total parameters: {}".format(total))
def conv_bn_act(op, shape, stride, name, init, training, bn_momentum, act, padding='SAME'):
"""
Build a convolution layer with batch normalization before activation
:param op: input node
:param shape: kernel shape
:param stride: convolution stride
:param name: node name
:param init: initializer
:param training: batch normalization training flag
:param bn_momentum: batch normalization momentum
:param act: activation function
:param padding: padding requirement
:return: post activation node
"""
kernel = tf.get_variable("kernel_weights" + name, shape, initializer=init)
conv = tf.nn.convolution(op, kernel, padding, strides=stride, name="conv" + name)
bn = tf.layers.batch_normalization(conv, momentum=bn_momentum, training=training)
post = act(bn)
return post
def conv_bn(op, shape, stride, name, init, training, bn_momentum, padding='SAME'):
"""
Build a convolution layer with batch normalization WITHOUT activation (i.e. affine linear layer)
:param op: input node
:param shape: kernel shape
:param stride: convolution stride
:param name: node name
:param init: initializer
:param training: batch normalization training flag
:param bn_momentum: batch normalization momentum
:param padding: padding requirement
:return: post activation node
"""
kernel = tf.get_variable("kernel_weights" + name, shape, initializer=init)
conv = tf.nn.convolution(op, kernel, padding, strides=stride, name="conv" + name)
bn = tf.layers.batch_normalization(conv, momentum=bn_momentum, training=training)
return bn
def branch4_avgpool_5x5(
ops,
scope='branch4_avgpool_5x5',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu,
out_channel=32
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:param out_channel: output channels for each branch, advised to be smaller than or equal to input channels
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution branches
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 1x1 branch
branch0 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 4], [1, 1], "_0_a_1x1", init, training,
bn_momentum, act)
# 1x1, 3x3 branch
branch1 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 8], [1, 1], "_1_a_1x1", init, training,
bn_momentum, act)
branch1 = conv_bn_act(branch1, [3, 3, out_channel // 8, out_channel // 4], [1, 1], "_1_b_3x3", init, training,
bn_momentum, act)
# 1x1, 3x3, 3x3 branch
branch2 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 8], [1, 1], "_2_a_1x1", init, training,
bn_momentum, act)
branch2 = conv_bn_act(branch2, [3, 3, out_channel // 8, out_channel // 4], [1, 1], "_2_b_3x3", init, training,
bn_momentum, act)
branch2 = conv_bn_act(branch2, [3, 3, out_channel // 4, out_channel // 4], [1, 1], "_2_c_3x3", init, training,
bn_momentum, act)
# 3x3 avg_pool, 1x1 branch
branch3 = tf.nn.avg_pool(ops, [1, 3, 3, 1], [1, 1, 1, 1], 'SAME')
branch3 = conv_bn_act(branch3, [1, 1, in_channel, out_channel // 4], [1, 1], "_3_a_1x1", init, training,
bn_momentum, act)
# channel concatenation
return tf.concat(axis=3, values=[branch0, branch1, branch2, branch3])
def branch4_res_5x5(
ops,
scope='branch4_res_5x5',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu,
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution tower
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 3 tower branches
# 1x1 branch
branch0 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 8], [1, 1], "_0_a_1x1", init, training, bn_momentum,
act)
# 1x1, 3x3 branch
branch1 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 8], [1, 1], "_1_a_1x1", init, training, bn_momentum,
act)
branch1 = conv_bn_act(branch1, [3, 3, in_channel // 8, in_channel // 8], [1, 1], "_1_b_3x3", init, training,
bn_momentum, act)
# 1x1, 3x3, 3x3 branch
branch2 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 8], [1, 1], "_2_a_1x1", init, training, bn_momentum,
act)
branch2 = conv_bn_act(branch2, [3, 3, in_channel // 8, in_channel * 3 // 16], [1, 1], "_2_b_3x3", init,
training, bn_momentum, act)
branch2 = conv_bn_act(branch2, [3, 3, in_channel * 3 // 16, in_channel // 4], [1, 1], "_2_c_3x3", init,
training, bn_momentum, act)
# tower top convolution
concat = tf.concat(axis=3, values=[branch0, branch1, branch2])
tower = conv_bn(concat, [1, 1, in_channel // 2, in_channel], [1, 1], "_tower_a_1x1", init, training,
bn_momentum)
# residual summation
return ops + tower
def branch3_maxpool_downsample_5x5(
ops,
scope='branch3_maxpool_downsample_5x5',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution branches
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 3x3 branch
branch0 = conv_bn_act(ops, [3, 3, in_channel, in_channel // 2], [2, 2], "_0_a_3x3", init, training, bn_momentum,
act, 'VALID')
# 1x1, 3x3, 3x3 branch
branch1 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 4], [1, 1], "_1_a_1x1", init, training, bn_momentum,
act)
branch1 = conv_bn_act(branch1, [3, 3, in_channel // 4, in_channel * 3 // 8], [1, 1], "_1_b_3x3", init, training,
bn_momentum, act)
branch1 = conv_bn_act(branch1, [3, 3, in_channel * 3 // 8, in_channel // 2], [2, 2], "_1_c_3x3", init, training,
bn_momentum, act, 'VALID')
# max pooling branch
branch2 = tf.nn.max_pool(ops, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID')
# channel concatenation
return tf.concat(axis=3, values=[branch0, branch1, branch2])
def branch4_avgpool_13x13(
ops,
scope='branch3_avgpool_13x13',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu,
out_channel=64
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:param out_channel: output channel number
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution branches
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 1x1 branch
branch0 = conv_bn_act(ops, [1, 1, in_channel, out_channel * 3 // 8], [1, 1], "_0_a_1x1", init, training,
bn_momentum, act)
# 1x1, 1x7, 7x1 branch
branch1 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 8], [1, 1], "_1_a_1x1", init, training,
bn_momentum, act)
branch1 = conv_bn_act(branch1, [1, 7, out_channel // 8, out_channel * 3 // 16], [1, 1], "_1_b_1x7", init,
training, bn_momentum, act)
branch1 = conv_bn_act(branch1, [7, 1, out_channel * 3 // 16, out_channel // 4], [1, 1], "_1_c_7x1", init,
training, bn_momentum, act)
# 1x1, 7x1, 1x7, 7x1, 1x7 branch
branch2 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 8], [1, 1], "_2_a_1x1", init, training,
bn_momentum, act)
branch2 = conv_bn_act(branch2, [7, 1, out_channel // 8, out_channel * 3 // 16], [1, 1], "_2_b_7x1", init,
training, bn_momentum, act)
branch2 = conv_bn_act(branch2, [1, 7, out_channel * 3 // 16, out_channel // 4], [1, 1], "_2_c_1x7", init,
training, bn_momentum, act)
branch2 = conv_bn_act(branch2, [7, 1, out_channel // 4, out_channel // 4], [1, 1], "_2_d_7x1", init,
training, bn_momentum, act)
branch2 = conv_bn_act(branch2, [1, 7, out_channel // 4, out_channel // 4], [1, 1], "_2_e_1x7", init,
training, bn_momentum, act)
# avgpool, 1x1 branch
branch3 = tf.nn.avg_pool(ops, [1, 3, 3, 1], [1, 1, 1, 1], 'SAME')
branch3 = conv_bn_act(branch3, [1, 1, in_channel, out_channel // 8], [1, 1], "_3_a_1x1", init, training,
bn_momentum, act)
# channel concatenation
return tf.concat(axis=3, values=[branch0, branch1, branch2, branch3])
def branch3_res_7x7(
ops,
scope='branch3_res_7x7',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu,
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution tower
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 3 tower branches
# 1x1 branch
branch0 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 4], [1, 1], "_0_a_1x1", init, training, bn_momentum,
act)
# 1x1, 1x7, 7x1 branch
branch1 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 8], [1, 1], "_1_a_1x1", init, training, bn_momentum,
act)
branch1 = conv_bn_act(branch1, [1, 7, in_channel // 8, in_channel * 3 // 16], [1, 1], "_1_b_1x7", init,
training, bn_momentum, act)
branch1 = conv_bn_act(branch1, [7, 1, in_channel * 3 // 16, in_channel // 4], [1, 1], "_1_c_7x1", init,
training, bn_momentum, act)
# 1x1 tower branch
concat = tf.concat(axis=3, values=[branch0, branch1])
tower = conv_bn(concat, [1, 1, in_channel // 2, in_channel], [1, 1], "_tower_a_1x1", init, training,
bn_momentum)
# residual summation
return ops + tower
def branch3_maxpool_downsample_9x9(
ops,
scope='branch3_maxpool_downsample_9x9',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution branches
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 1x1, 3x3 branch
branch0 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 8], [1, 1], "_0_a_1x1", init, training, bn_momentum,
act)
branch0 = conv_bn_act(branch0, [3, 3, in_channel // 8, in_channel // 8], [2, 2], "_0_b_3x3", init, training,
bn_momentum, act, 'VALID')
# 1x1, 1x7, 7x1, 3x3 branch
branch1 = conv_bn_act(ops, [1, 1, in_channel, in_channel // 4], [1, 1], "_1_a_1x1", init, training, bn_momentum,
act)
branch1 = conv_bn_act(branch1, [1, 7, in_channel // 4, in_channel // 4], [1, 1], "_1_b_1x7", init, training,
bn_momentum, act)
branch1 = conv_bn_act(branch1, [7, 1, in_channel // 4, in_channel * 3 // 8], [1, 1], "_1_c_7x1", init, training,
bn_momentum, act)
branch1 = conv_bn_act(branch1, [3, 3, in_channel * 3 // 8, in_channel * 3 // 8], [2, 2], "_1_d_3x3", init,
training, bn_momentum, act, 'VALID')
# max pooling branch
branch2 = tf.nn.max_pool(ops, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID')
# channel concatenation
return tf.concat(axis=3, values=[branch0, branch1, branch2])
def branch6_avgpool_5x5_downchannel(
ops,
scope='branch6_avgpool_5x5_downchannel',
init=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu,
out_channel=24
):
"""
:param ops: input node
:param scope: namescope of this layer
:param init: initializer for convolution kernels
:param training: training flag for batch_norm layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:param out_channel: output channel number
:return: output node
"""
# get input channel number
in_channel = ops.shape[-1]
# convolution tower
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# 1x1 branch
branch0 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 6], [1, 1], "_0_a_1x1", init, training,
bn_momentum, act)
# 1x1, (1x3, 3x1) branches
branch1_2 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 4], [1, 1], "_1_2_a_1x1", init, training,
bn_momentum, act)
branch1 = conv_bn_act(branch1_2, [1, 3, out_channel // 4, out_channel // 6], [1, 1], "_1_b_1x3", init, training,
bn_momentum, act)
branch2 = conv_bn_act(branch1_2, [3, 1, out_channel // 4, out_channel // 6], [1, 1], "_2_b_3x1", init, training,
bn_momentum, act)
# 1x1, 3x1, 1x3, (1x3, 3x1) branches
branch3_4 = conv_bn_act(ops, [1, 1, in_channel, out_channel // 2], [1, 1], "_3_4_a_1x1", init, training,
bn_momentum, act)
branch3_4 = conv_bn_act(branch3_4, [3, 1, out_channel // 2, out_channel // 3], [1, 1], "_3_4_b_3x1", init,
training, bn_momentum, act)
branch3_4 = conv_bn_act(branch3_4, [1, 3, out_channel // 3, out_channel // 4], [1, 1], "_3_4_c_1x3", init,
training, bn_momentum, act)
branch3 = conv_bn_act(branch3_4, [1, 3, out_channel // 4, out_channel // 6], [1, 1], "_3_d_1x3", init, training,
bn_momentum, act)
branch4 = conv_bn_act(branch3_4, [3, 1, out_channel // 4, out_channel // 6], [1, 1], "_4_d_3x1", init, training,
bn_momentum, act)
# avgpool, 1x1 branch
branch5 = tf.nn.avg_pool(ops, [1, 3, 3, 1], [1, 1, 1, 1], 'SAME')
branch5 = conv_bn_act(branch5, [1, 1, in_channel, out_channel // 6], [1, 1], "_5_a_1x1", init, training,
bn_momentum, act)
# channel concatenation
return tf.concat(axis=3, values=[branch0, branch1, branch2, branch3, branch4, branch5])
def fc_dropout(
ops,
out_nodes,
scope='fully_connected',
init=tf.contrib.layers.xavier_initializer(uniform=False),
training=True,
bn_momentum=0.9,
act=tf.nn.relu,
keep_prob=0.9
):
"""
:param ops: input node
:param scope: namescope of this layer
:param out_nodes: output node number
:param init: initializer for weights
:param training: training flag for batch normalization layer
:param bn_momentum: batch normalization momentum
:param act: activation function
:param keep_prob: keeping probability for dropout layer
:return: output node
"""
in_nodes = ops.shape[-1]
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
weights = tf.get_variable("weights", [in_nodes, out_nodes], initializer=init)
fc = tf.matmul(ops, weights)
bn = tf.layers.batch_normalization(fc, momentum=bn_momentum, training=training)
activations = act(bn)
return tf.nn.dropout(activations, keep_prob)
def global_avg_dropout(
ops,
scope='global_avg_dropout',
keep_prob=0.9
):
"""
:param ops: input node
:param scope: namescope of this layer
:param keep_prob: keeping probability for dropout layer
:return: output node
"""
_, width, height, _ = ops.shape
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
avg = tf.nn.avg_pool(ops, [1, width, height, 1], [1, width, height, 1], 'VALID')
return tf.nn.dropout(avg, keep_prob)
if __name__ == '__main__':
input_node = tf.placeholder('float32', [16, 50, 50, 16])
output_node = branch4_avgpool_5x5(input_node, out_channel=32)
output_node = branch4_res_5x5(output_node)
output_node = branch3_maxpool_downsample_5x5(output_node)
output_node = branch4_avgpool_13x13(output_node)
output_node = branch3_res_7x7(output_node)
output_node = branch3_maxpool_downsample_9x9(output_node)
output_node = branch6_avgpool_5x5_downchannel(output_node)
print_variable_info()
print(output_node.shape)
| 19,724 | 40.179541 | 120 | py |
mkbe | mkbe-master/MKBE/models/ml_distmult.py | # Relations used: age, gender, occupation, zip, title, release date, genre, rating(1-5)
import metrics
import tensorflow as tf
#from compact_bilinear_pooling import compact_bilinear_pooling_layer
def activation(x):
with tf.name_scope("selu") as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
# return tf.nn.relu(x)
# return tf.tanh(x)
def define_graph(hyperparams, nodes, config=None):
dtype = tf.float32 if config is None or "dtype" not in config else config["dtype"]
id_dtype = tf.int32 if config is None or "id_dtype" not in config else config["id_dtype"]
pos_user_e1 = tf.placeholder(tf.int32)
pos_user_r = tf.placeholder(tf.int32)
neg_user_e1 = tf.placeholder(tf.int32)
neg_user_r = tf.placeholder(tf.int32)
pos_movie_e1 = tf.placeholder(tf.int32)
pos_movie_r = tf.placeholder(tf.int32)
neg_movie_e1 = tf.placeholder(tf.int32)
neg_movie_r = tf.placeholder(tf.int32)
pos_age = tf.placeholder(tf.float32)
neg_age = tf.placeholder(tf.float32)
pos_gender = tf.placeholder(tf.int32)
neg_gender = tf.placeholder(tf.int32)
pos_occupation = tf.placeholder(tf.int32)
neg_occupation = tf.placeholder(tf.int32)
pos_zip = tf.placeholder(tf.int32)
neg_zip = tf.placeholder(tf.int32)
pos_title = tf.placeholder(tf.int32, shape=(None, None))
neg_title = tf.placeholder(tf.int32, shape=(None, None))
pos_title_len = tf.placeholder(tf.int32)
neg_title_len = tf.placeholder(tf.int32)
pos_date = tf.placeholder(tf.float32)
neg_date = tf.placeholder(tf.float32)
pos_genre = tf.placeholder(tf.float32)
neg_genre = tf.placeholder(tf.float32)
pos_userrating = tf.placeholder(tf.int32)
neg_userrating = tf.placeholder(tf.int32)
pos_relrating = tf.placeholder(tf.int32)
neg_relrating = tf.placeholder(tf.int32)
pos_movierating = tf.placeholder(tf.int32)
neg_movierating = tf.placeholder(tf.int32)
pos_poster_movie = tf.placeholder(tf.int32)
pos_poster_rel = tf.placeholder(tf.int32)
pos_poster_fm = tf.placeholder(tf.float32, shape=(None, None, None, 512))
neg_poster_movie = tf.placeholder(tf.int32)
neg_poster_rel = tf.placeholder(tf.int32)
neg_poster_fm = tf.placeholder(tf.float32, shape=(None, None, None, 512))
mlp_keepprob = tf.placeholder(tf.float32)
lstm_keepprob = tf.placeholder(tf.float32)
# Weights for embeddings
if hyperparams["emb_dim"] > 3:
user_weights = tf.get_variable(
"user_weights", shape=[hyperparams["user_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
movie_weights = tf.get_variable(
"movie_weights", shape=[hyperparams["movie_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
gender_weights = tf.get_variable(
"gender_weights", shape=[hyperparams["gender_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
job_weights = tf.get_variable(
"job_weights", shape=[hyperparams["job_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
zip_weights = tf.get_variable(
"zip_weights", shape=[hyperparams["zip_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
char_weights = tf.get_variable(
"char_weights", shape=[hyperparams["char_size"], hyperparams["emb_dim"] // 2],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
else:
user_weights = tf.get_variable(
"user_weights", shape=[hyperparams["user_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
movie_weights = tf.get_variable(
"movie_weights", shape=[hyperparams["movie_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
gender_weights = tf.get_variable(
"gender_weights", shape=[hyperparams["gender_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
job_weights = tf.get_variable(
"job_weights", shape=[hyperparams["job_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
zip_weights = tf.get_variable(
"zip_weights", shape=[hyperparams["zip_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
char_weights = tf.get_variable(
"char_weights", shape=[hyperparams["char_size"], hyperparams["emb_dim"] // 2],
dtype=dtype, initializer=tf.truncated_normal_initializer(dtype=dtype)
)
# Biases embeddings
user_bias = tf.get_variable("user_bias", shape=[hyperparams["user_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
rel_bias = tf.get_variable("rel_bias", shape=[hyperparams["relation_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
movie_bias = tf.get_variable("movie_bias", shape=[hyperparams["movie_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
# Embedding lookup
pos_user_e1_emb = tf.nn.embedding_lookup(user_weights, pos_user_e1)
neg_user_e1_emb = tf.nn.embedding_lookup(user_weights, neg_user_e1)
pos_user_r_emb = tf.nn.embedding_lookup(rel_weights, pos_user_r)
neg_user_r_emb = tf.nn.embedding_lookup(rel_weights, neg_user_r)
pos_movie_e1_emb = tf.nn.embedding_lookup(movie_weights, pos_movie_e1)
neg_movie_e1_emb = tf.nn.embedding_lookup(movie_weights, neg_movie_e1)
pos_movie_r_emb = tf.nn.embedding_lookup(rel_weights, pos_movie_r)
neg_movie_r_emb = tf.nn.embedding_lookup(rel_weights, neg_movie_r)
pos_gender_emb = tf.nn.embedding_lookup(gender_weights, pos_gender)
neg_gender_emb = tf.nn.embedding_lookup(gender_weights, neg_gender)
pos_occupation_emb = tf.nn.embedding_lookup(job_weights, pos_occupation)
neg_occupation_emb = tf.nn.embedding_lookup(job_weights, neg_occupation)
pos_zip_emb = tf.nn.embedding_lookup(zip_weights, pos_zip)
neg_zip_emb = tf.nn.embedding_lookup(zip_weights, neg_zip)
pos_title_emb = tf.nn.embedding_lookup(char_weights, pos_title)
neg_title_emb = tf.nn.embedding_lookup(char_weights, neg_title)
pos_userrating_emb = tf.nn.embedding_lookup(user_weights, pos_userrating)
neg_userrating_emb = tf.nn.embedding_lookup(user_weights, neg_userrating)
pos_relrating_emb = tf.nn.embedding_lookup(rel_weights, pos_relrating)
neg_relrating_emb = tf.nn.embedding_lookup(rel_weights, neg_relrating)
pos_ratedmovie_emb = tf.nn.embedding_lookup(movie_weights, pos_movierating)
neg_ratedmovie_emb = tf.nn.embedding_lookup(movie_weights, neg_movierating)
pos_poster_movie_emb = tf.nn.embedding_lookup(movie_weights, pos_poster_movie)
neg_poster_movie_emb = tf.nn.embedding_lookup(movie_weights, neg_poster_movie)
pos_poster_rel_emb = tf.nn.embedding_lookup(rel_weights, pos_poster_rel)
neg_poster_rel_emb = tf.nn.embedding_lookup(rel_weights, neg_poster_rel)
pos_userrating_bias_emb = tf.nn.embedding_lookup(user_bias, pos_userrating)
neg_userrating_bias_emb = tf.nn.embedding_lookup(user_bias, neg_userrating)
pos_relrating_bias_emb = tf.nn.embedding_lookup(rel_bias, pos_relrating)
neg_relrating_bias_emb = tf.nn.embedding_lookup(rel_bias, neg_relrating)
pos_ratedmovie_bias_emb = tf.nn.embedding_lookup(movie_bias, pos_movierating)
neg_ratedmovie_bias_emb = tf.nn.embedding_lookup(movie_bias, neg_movierating)
# Collect Regularization variables
regularized_variables = []
# MLP Encoding
# For ages
age_weights = [tf.get_variable(
"age_weights_{:}".format(layer),
shape=[1 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype, initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["down_scale"],
mode="FAN_AVG", dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
age_bias = [tf.get_variable(
"age_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += age_weights
regularized_variables += age_bias
# Broadcasting for scalar-vector multiplication in the first layer and vector-matrix multiplication for other layers
pos_age_node = [tf.reshape(pos_age, (-1, 1))]
neg_age_node = [tf.reshape(neg_age, (-1, 1))]
for w, b in zip(age_weights, age_bias):
pos_age_node.append(activation(tf.add(
b, tf.multiply(pos_age_node[-1], w) if len(pos_age_node) == 1 else tf.matmul(pos_age_node[-1], w))))
neg_age_node.append(activation(tf.add(
b, tf.multiply(neg_age_node[-1], w) if len(neg_age_node) == 1 else tf.matmul(neg_age_node[-1], w))))
# For dates
date_weights = [tf.get_variable(
"date_weights_{:}".format(layer),
shape=[1 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["down_scale"], mode="FAN_AVG",
dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
date_bias = [tf.get_variable(
"date_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += date_weights
regularized_variables += date_bias
# Broadcasting for scalar-vector multiplication in the first layer and vector-matrix multiplication for other layers
pos_date_node = [tf.reshape(pos_date, (-1, 1))]
neg_date_node = [tf.reshape(neg_date, (-1, 1))]
for w, b in zip(date_weights, date_bias):
pos_date_node.append(activation(tf.add(
b, tf.multiply(pos_date_node[-1], w) if len(pos_date_node) == 1 else tf.matmul(pos_date_node[-1], w))))
neg_date_node.append(activation(tf.add(
b, tf.multiply(neg_date_node[-1], w) if len(neg_date_node) == 1 else tf.matmul(neg_date_node[-1], w))))
# For genres
genre_weights = [tf.get_variable(
"genre_weights_{:}".format(layer),
shape=[19 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["down_scale"], mode="FAN_AVG",
dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
genre_bias = [tf.get_variable(
"genre_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += genre_weights
regularized_variables += genre_bias
pos_genre_node = [tf.reshape(pos_genre, (-1, 19))]
neg_genre_node = [tf.reshape(neg_genre, (-1, 19))]
for w, b in zip(genre_weights, genre_bias):
pos_genre_node.append(activation(tf.add(b, tf.matmul(pos_genre_node[-1], w))))
neg_genre_node.append(activation(tf.add(b, tf.matmul(neg_genre_node[-1], w))))
# GRU Encoding
GRU_base_units = hyperparams["emb_dim"] // 2
# With bidirectional GRU and concatenation of outputs, the dimension of input vectors times 2 after each layer
GRUCells = [tf.nn.rnn_cell.GRUCell(GRU_base_units if layer < 2 else hyperparams["emb_dim"])
for layer in range(hyperparams["GRULayers"] * 2)]
pos_title_nodes = [pos_title_emb]
neg_title_nodes = [neg_title_emb]
for layer in range(hyperparams["GRULayers"]):
with tf.variable_scope("GRUEncoder_{:}".format(layer)):
if layer > 0:
out_fw, out_bw = tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=pos_title_nodes[-1], sequence_length=pos_title_len, swap_memory=True)[0]
pos_title_nodes.append((out_fw + out_bw) / 2.0)
else:
pos_title_nodes.append(tf.concat(tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=pos_title_nodes[-1], sequence_length=pos_title_len, swap_memory=True)[0], 2))
# Share weights between encoders for positive samples and negative samples
with tf.variable_scope("GRUEncoder_{:}".format(layer), reuse=True):
if layer > 0:
out_fw, out_bw = tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=neg_title_nodes[-1], sequence_length=neg_title_len, swap_memory=True)[0]
neg_title_nodes.append((out_fw + out_bw) / 2.0)
else:
neg_title_nodes.append(tf.concat(tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=neg_title_nodes[-1], sequence_length=neg_title_len, swap_memory=True)[0], 2))
regularized_variables += tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="GRUEncoder_{:}".format(layer))
gru_weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="GRUEncoder_{:}".format(layer))
# CNN Encoding with bilinear pooling
"""
proj_matrix1 = tf.get_variable("proj_matrix1", shape=[1, 1, 512, 1024], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["down_scale"], mode="FAN_AVG", dtype=dtype))
proj_matrix2 = tf.get_variable("proj_matrix2", shape=[1, 1, 512, 1024], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["down_scale"], mode="FAN_AVG", dtype=dtype))
pos_branch1 = tf.nn.conv2d(pos_poster_fm, proj_matrix1, strides=[1, 1, 1, 1], padding='SAME')
pos_branch2 = tf.nn.conv2d(pos_poster_fm, proj_matrix2, strides=[1, 1, 1, 1], padding='SAME')
pos_poster_vec = compact_bilinear_pooling_layer(pos_branch1, pos_branch2, hyperparams["emb_dim"])
neg_branch1 = tf.nn.conv2d(neg_poster_fm, proj_matrix1, strides=[1, 1, 1, 1], padding='SAME')
neg_branch2 = tf.nn.conv2d(neg_poster_fm, proj_matrix2, strides=[1, 1, 1, 1], padding='SAME')
neg_poster_vec = compact_bilinear_pooling_layer(neg_branch1, neg_branch2, hyperparams["emb_dim"])
"""
with tf.variable_scope("cnn_encoder"):
ksize = hyperparams["ksize"]
depth = hyperparams["depth"]
drate = hyperparams["drate"]
cnn_weights = tf.get_variable("cnn_weights", shape=[ksize, ksize, 512, depth], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["cnn_scale"], mode="FAN_AVG", dtype=dtype))
cnn_bias = tf.get_variable("cnn_bias", shape=[depth], dtype=dtype, initializer=tf.zeros_initializer)
pos_conv5_4 = tf.nn.relu(tf.nn.bias_add(tf.nn.convolution(pos_poster_fm, cnn_weights, "VALID",
dilation_rate=[drate, drate]), cnn_bias))
neg_conv5_4 = tf.nn.relu(tf.nn.bias_add(tf.nn.convolution(neg_poster_fm, cnn_weights, "VALID",
dilation_rate=[drate, drate]), cnn_bias))
#print(pos_conv5_4.shape, neg_conv5_4.shape)
#pos_conv5_4_shape = tf.shape(pos_conv5_4)
#neg_conv5_4_shape = tf.shape(neg_conv5_4)
pos_pool5 = tf.reduce_mean(pos_conv5_4, axis=[1, 2])
neg_pool5 = tf.reduce_mean(neg_conv5_4, axis=[1, 2])
#print(pos_pool5.shape, neg_pool5.shape)
#pos_pool5_shape = tf.shape(pos_pool5)
#neg_pool5_shape = tf.shape(neg_pool5)
fc_weights = tf.get_variable("fc_weights", shape=[depth, hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["cnn_scale"], mode="FAN_AVG", dtype=dtype))
fc_bias = tf.get_variable("fc_bias", shape=[hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.zeros_initializer)
pos_poster_vec = activation(tf.add(tf.matmul(pos_pool5, fc_weights), fc_bias))
neg_poster_vec = activation(tf.add(tf.matmul(pos_pool5, fc_weights), fc_bias))
#print(pos_poster_vec.shape, neg_poster_vec.shape)
#pos_poster_vec_shape = tf.shape(pos_poster_vec)
#neg_poster_vec_shape = tf.shape(neg_poster_vec)
regularized_variables += [cnn_weights, cnn_bias, fc_weights, fc_bias]
# Aggregate and normalize e1
pos_e1_list = [pos_user_e1_emb, pos_movie_e1_emb, pos_userrating_emb, pos_poster_movie_emb]
neg_e1_list = [neg_user_e1_emb, neg_movie_e1_emb, neg_userrating_emb, neg_poster_movie_emb]
if hyperparams["normalize_e1"]:
pos_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e1_list], axis=0),
dim=1)
neg_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e1_list], axis=0),
dim=1)
else:
pos_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e1_list], axis=0)
neg_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e1_list], axis=0)
regularized_variables += [user_weights]
# Aggregate r
pos_r_list = [pos_user_r_emb, pos_movie_r_emb, pos_relrating_emb, pos_poster_rel_emb]
neg_r_list = [neg_user_r_emb, neg_movie_r_emb, neg_relrating_emb, neg_poster_rel_emb]
if hyperparams["normalize_relation"]:
pos_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_r_list], axis=0),
dim=1)
neg_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_r_list], axis=0),
dim=1)
else:
pos_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_r_list], axis=0)
neg_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_r_list], axis=0)
regularized_variables += [rel_weights]
# Aggregate and normalize e2
pos_e2_list = [pos_age_node[-1], pos_gender_emb, pos_occupation_emb, pos_zip_emb, pos_title_nodes[-1][:, 0, :],
pos_date_node[-1], pos_genre_node[-1], pos_ratedmovie_emb, pos_poster_vec]
neg_e2_list = [neg_age_node[-1], neg_gender_emb, neg_occupation_emb, neg_zip_emb, neg_title_nodes[-1][:, 0, :],
neg_date_node[-1], neg_genre_node[-1], neg_ratedmovie_emb, neg_poster_vec]
if hyperparams["normalize_e2"]:
pos_e2 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e2_list], axis=0),
dim=1)
neg_e2 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e2_list], axis=0),
dim=1)
else:
pos_e2 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e2_list], axis=0)
neg_e2 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e2_list], axis=0)
regularized_variables += [movie_weights]
if not hyperparams["bias"]:
pos_bias = pos_relrating_bias_emb
neg_bias = neg_relrating_bias_emb
else:
pos_bias = 0
neg_bias = 0
# Distmult link prediction
pos = tf.reduce_sum(tf.multiply(tf.multiply(pos_e1, pos_r), pos_e2), 1, keep_dims=True) + pos_bias
neg = tf.reduce_sum(tf.multiply(tf.multiply(neg_e1, neg_r), neg_e2), 1, keep_dims=True) + neg_bias
# Regularization term
regularizer = tf.contrib.layers.l2_regularizer(hyperparams["regularization_coefficient"])
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularized_variables)
# Collect variables to be trained
lr1_vars = [user_weights, movie_weights, user_bias, movie_bias]
lr2_vars = [rel_weights, rel_bias]
if True or "user" in hyperparams["subset"]:
lr1_vars += age_weights + age_bias + [gender_weights, job_weights, zip_weights]
if True or "title" in hyperparams["subset"]:
lr2_vars += gru_weights
if True or "movie" in hyperparams["subset"]:
lr1_vars += date_weights + date_bias + genre_weights + genre_bias
if True or "poster" in hyperparams["subset"]:
lr1_vars += [cnn_weights, cnn_bias, fc_weights, fc_bias]
#lr1_vars += [proj_matrix1, proj_matrix2]
# Minimize Hinge Loss
loss = tf.reduce_sum((tf.maximum(neg - pos + hyperparams["margin"], 0))) + regularization_term
loss_to_show = tf.reduce_mean((tf.maximum(neg - pos + hyperparams["margin"], 0))) + regularization_term
training_op = tf.train.AdagradOptimizer(learning_rate=hyperparams["learning_rate"]).minimize(
loss, var_list=lr1_vars)
rlr_train_op = tf.train.AdagradOptimizer(learning_rate=hyperparams["learning_rate_reduced"]).minimize(
loss, var_list=lr2_vars
)
summary_nodes = [tf.summary.scalar("loss", loss_to_show),
tf.summary.scalar("regularization_term", regularization_term),
tf.summary.histogram("pos", pos),
tf.summary.histogram("neg", neg),
tf.summary.histogram("user_emb", user_weights),
tf.summary.histogram("relation_emb", rel_weights),
tf.summary.histogram("movie_emb", movie_weights)]
training_summary = tf.summary.merge_all()
return locals()
def scoring_and_counting(hyperparams, nodes, config=None):
# Input placeholders
rating_relations = tf.placeholder(tf.int32, shape=[5])
pos_user = tf.placeholder(tf.int32)
pos_r = tf.placeholder(tf.int32)
pos_movie = tf.placeholder(tf.int32)
# Weights to use
user_weights = nodes["user_weights"]
movie_weights = nodes["movie_weights"]
relation_weights = nodes["rel_weights"]
relation_bias = nodes["rel_bias"]
# Normalize e2 weights
if hyperparams["test_normalize_e2"]:
normalized_movie_weights = tf.nn.l2_normalize(movie_weights, dim=1)
else:
normalized_movie_weights = movie_weights
# Normalize r weights
if hyperparams["test_normalize_relation"]:
normalized_relation_weights = tf.nn.l2_normalize(relation_weights, dim=1)
else:
normalized_relation_weights = relation_weights
# Normalize e1 weights
if hyperparams["test_normalize_e1"]:
normalized_user_weights = tf.nn.l2_normalize(user_weights, dim=1)
else:
normalized_user_weights = user_weights
# Embedding positive and negative samples
pos_user_emb = tf.nn.embedding_lookup(normalized_user_weights, pos_user)
pos_r_emb = tf.nn.embedding_lookup(normalized_relation_weights, pos_r)
pos_movie_emb = tf.nn.embedding_lookup(normalized_movie_weights, pos_movie)
rating_relation_weights = tf.nn.embedding_lookup(normalized_relation_weights, rating_relations)
if hyperparams["bias"]:
pos_score_bias = tf.reshape(tf.nn.embedding_lookup(relation_bias, pos_r), (-1, 1))
neg_score_bias = tf.transpose(tf.nn.embedding_lookup(relation_bias, rating_relations))
else:
pos_score_bias = 0
neg_score_bias = 0
# Reshape and transpose the movie weights and rating weights to a (1, dim, depth) tensor
neg_movie_emb = tf.transpose(tf.reshape(
normalized_movie_weights, (-1, hyperparams["emb_dim"], 1)),
(2, 1, 0))
neg_r_emb = tf.transpose(tf.reshape(
rating_relation_weights, (-1, hyperparams["emb_dim"], 1)),
(2, 1, 0))
# Scoring positive samples
pos_scoring = tf.reduce_sum(
tf.multiply(tf.multiply(pos_user_emb, pos_r_emb), pos_movie_emb), axis=1, keep_dims=True) + pos_score_bias
# Scoring movie negative samples with broadcasting
pos_user_r_mul = tf.multiply(pos_user_emb, pos_r_emb)
neg_scoring_movie = tf.squeeze(tf.reduce_sum(
tf.multiply(tf.reshape(pos_user_r_mul, (-1, hyperparams["emb_dim"], 1)), neg_movie_emb),
axis=1
)) + pos_score_bias
# Scoring rating negative samples with broadcasting
pos_user_movie_mul = tf.multiply(pos_user_emb, pos_movie_emb)
neg_scoring_rating = tf.squeeze(tf.reduce_sum(
tf.multiply(tf.reshape(pos_user_movie_mul, (-1, hyperparams["emb_dim"], 1)), neg_r_emb),
axis=1
)) + neg_score_bias
movie_higher_values = tf.reduce_sum(tf.cast(neg_scoring_movie > pos_scoring, tf.float32), axis=1)
rating_higher_values = tf.reduce_sum(tf.cast(neg_scoring_rating > pos_scoring, tf.float32), axis=1)
return locals()
def test_graph(hyperparams, nodes, config=None):
nodes = scoring_and_counting(hyperparams, nodes, config=config)
metric_values = {
"MRR_movie": metrics.mrr(nodes["movie_higher_values"]),
"HITS@10_movie": metrics.hits_n(nodes["movie_higher_values"], 10),
"HITS@3_movie": metrics.hits_n(nodes["movie_higher_values"], 3),
"HITS@1_movie": metrics.hits_n(nodes["movie_higher_values"], 1),
"MRR_r": metrics.mrr(nodes["rating_higher_values"]),
"HITS@5_r": metrics.hits_n(nodes["rating_higher_values"], 5),
"HITS@3_r": metrics.hits_n(nodes["rating_higher_values"], 3),
"HITS@2_r": metrics.hits_n(nodes["rating_higher_values"], 2),
"HITS@1_r": metrics.hits_n(nodes["rating_higher_values"], 1)
}
nodes.update(metric_values)
summaries = [tf.summary.scalar(k, v) for k, v in metric_values.items()] + [
tf.summary.histogram("rating score rankings", nodes["rating_higher_values"]),
tf.summary.histogram("movie score rankings", nodes["movie_higher_values"])
]
nodes["test_summary"] = tf.summary.merge(summaries)
return nodes
def debug_graph(hyperparams, nodes, config=None):
rating_rankings_min = tf.reduce_max(nodes["rating_higher_values"])
rating_rankings_max = tf.reduce_max(nodes["rating_higher_values"])
neg_score_rating_shape = tf.shape(nodes["neg_scoring_rating"])
neg_r_emb_shape = tf.shape(nodes["neg_r_emb"])
pos_u_m_mul_shape = tf.shape(nodes["pos_user_movie_mul"])
pos_scoring_shape = tf.shape(nodes["pos_scoring"])
return locals()
| 28,863 | 50.359431 | 120 | py |
mkbe | mkbe-master/MKBE/models/yago_convE_kb.py | import tensorflow as tf
def activation(x):
"""
with tf.name_scope("selu") as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
"""
return tf.tanh(x)
def define_graph(hyperparams, config=None):
dtype = tf.float32 if config is None or "dtype" not in config else config["dtype"]
id_dtype = tf.int32 if config is None or "id_dtype" not in config else config["id_dtype"]
pos_e1 = tf.placeholder(tf.int32, name="pos_e1")
pos_r = tf.placeholder(tf.int32, name="pos_r")
pos_e2 = tf.placeholder(tf.int32, name="pos_e2")
neg_e1 = tf.placeholder(tf.int32, name="neg_e1")
neg_r = tf.placeholder(tf.int32, name="neg_r")
neg_e2 = tf.placeholder(tf.int32, name="neg_e2")
pos_num = tf.placeholder(tf.float32, name="pos_num")
neg_num = tf.placeholder(tf.float32, name="neg_num")
pos_text = tf.placeholder(tf.int32, shape=(None, None), name="pos_text")
neg_text = tf.placeholder(tf.int32, shape=(None, None), name="neg_text")
pos_text_len = tf.placeholder(tf.int32, name="pos_text_len")
neg_text_len = tf.placeholder(tf.int32, name="neg_text_len")
mlp_keepprob = tf.placeholder(tf.float32, name="mlp_keepprob")
enc_keepprob = tf.placeholder(tf.float32, name="enc_keepprob")
emb_keepprob = tf.placeholder(tf.float32, name="emb_keepprob")
fm_keepprob = tf.placeholder(tf.float32, name="fm_keepprob")
# Weights for embeddings
if hyperparams["emb_dim"] > 3:
entity_weights = tf.get_variable(
"user_weights", shape=[hyperparams["entity_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"], mode="FAN_OUT",
dtype=dtype))
rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"], mode="FAN_OUT",
dtype=dtype))
word_weights = tf.get_variable(
"word_weights", shape=[hyperparams["word_size"], hyperparams["emb_dim"] // 2],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"], mode="FAN_OUT",
dtype=dtype))
else:
entity_weights = tf.get_variable(
"entity_weights", shape=[hyperparams["entity_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
word_weights = tf.get_variable(
"word_weights", shape=[hyperparams["word_size"], hyperparams["emb_dim"] // 2],
dtype=dtype, initializer=tf.truncated_normal_initializer(dtype=dtype)
)
# Biases embeddings
entity_bias = tf.get_variable("entity_bias", shape=[hyperparams["entity_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
rel_bias = tf.get_variable("rel_bias", shape=[hyperparams["relation_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
# Embedding lookup
pos_e1_emb = tf.nn.embedding_lookup(entity_weights, pos_e1)
neg_e1_emb = tf.nn.embedding_lookup(entity_weights, neg_e1)
pos_r_emb = tf.nn.embedding_lookup(rel_weights, pos_r)
neg_r_emb = tf.nn.embedding_lookup(rel_weights, neg_r)
pos_e2_emb = tf.nn.embedding_lookup(entity_weights, pos_e2)
neg_e2_emb = tf.nn.embedding_lookup(entity_weights, neg_e2)
pos_text_emb = tf.nn.embedding_lookup(word_weights, pos_text)
neg_text_emb = tf.nn.embedding_lookup(word_weights, neg_text)
pos_e1_bias_emb = tf.nn.embedding_lookup(entity_bias, pos_e1)
neg_e1_bias_emb = tf.nn.embedding_lookup(entity_bias, neg_e1)
pos_rel_bias_emb = tf.nn.embedding_lookup(rel_bias, pos_r)
neg_rel_bias_emb = tf.nn.embedding_lookup(rel_bias, neg_r)
pos_e2_bias_emb = tf.nn.embedding_lookup(entity_bias, pos_e2)
neg_e2_bias_emb = tf.nn.embedding_lookup(entity_bias, neg_e2)
# Collect Regularization variables
regularized_variables = []
# MLP Encoding
# For num
num_weights = [tf.get_variable(
"num_weights_{:}".format(layer),
shape=[1 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype, initializer=tf.contrib.layers.variance_scaling_initializer(factor=6.0, mode="FAN_OUT", dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
num_bias = [tf.get_variable(
"num_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += num_weights
regularized_variables += num_bias
# Broadcasting for scalar-vector multiplication in the first layer and vector-matrix multiplication for other layers
pos_num_node = [tf.reshape(pos_num, (-1, 1))]
neg_num_node = [tf.reshape(neg_num, (-1, 1))]
for w, b in zip(num_weights, num_bias):
pos_num_node.append(activation(tf.add(
b, tf.multiply(pos_num_node[-1], w) if len(pos_num_node) == 1 else tf.matmul(pos_num_node[-1], w))))
neg_num_node.append(activation(tf.add(
b, tf.multiply(neg_num_node[-1], w) if len(neg_num_node) == 1 else tf.matmul(neg_num_node[-1], w))))
"""
# GRU Encoding
GRU_base_units = hyperparams["emb_dim"] // (2 ** hyperparams["GRULayers"])
# With bidirectional GRU and concatenation of outputs, the dimension of input vectors times 2 after each layer
GRUCells = [tf.nn.rnn_cell.GRUCell(GRU_base_units * 2 ** (layer // 2))
for layer in range(hyperparams["GRULayers"] * 2)]
pos_text_nodes = [pos_text_emb]
neg_text_nodes = [neg_text_emb]
for layer in range(hyperparams["GRULayers"]):
with tf.variable_scope("GRUEncoder_{:}".format(layer)):
pos_text_nodes.append(tf.concat(tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=pos_text_nodes[-1], sequence_length=pos_text_len, swap_memory=True)[0], 2))
# Share weights between encoders for positive samples and negative samples
with tf.variable_scope("GRUEncoder_{:}".format(layer), reuse=True):
neg_text_nodes.append(tf.concat(tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=neg_text_nodes[-1], sequence_length=neg_text_len, swap_memory=True)[0], 2))
regularized_variables += tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="GRUEncoder_{:}".format(layer))
"""
"""
# CNN Encoding for text
text_weights = [tf.get_variable(
"text_weights_{:}".format(layer),
shape=[hyperparams["CNNTextKernel"], 1,
hyperparams["emb_dim"] // 2 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype, initializer=tf.contrib.layers.variance_scaling_initializer(factor=6.0, mode="FAN_OUT", dtype=dtype)
) for layer in range(hyperparams["CNNTextLayers"])]
text_bias = [tf.get_variable(
"text_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["CNNTextLayers"])]
regularized_variables += text_weights
regularized_variables += text_bias
pos_text_nodes = [tf.expand_dims(pos_text_emb, 2)]
neg_text_nodes = [tf.expand_dims(neg_text_emb, 2)]
for w, b in zip(text_weights, text_bias):
pos_text_nodes.append(tf.nn.relu(tf.nn.bias_add(
tf.nn.conv2d(pos_text_nodes[-1], w, [1, 1, 1, 1], 'SAME'), b)))
neg_text_nodes.append(tf.nn.relu(tf.nn.bias_add(
tf.nn.conv2d(neg_text_nodes[-1], w, [1, 1, 1, 1], 'SAME'), b)))
pos_text_vec = tf.reduce_mean(pos_text_nodes[-1][:, :, 0, :], axis=1)
neg_text_vec = tf.reduce_mean(neg_text_nodes[-1][:, :, 0, :], axis=1)
"""
# Aggregate and normalize e1
pos_e1_list = [pos_e1_emb]
neg_e1_list = [neg_e1_emb]
if hyperparams["normalize_e1"]:
Pos_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e1_list], axis=0),
dim=1)
Neg_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e1_list], axis=0),
dim=1)
else:
Pos_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e1_list], axis=0)
Neg_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e1_list], axis=0)
regularized_variables += [entity_weights]
# Aggregate r
pos_r_list = [pos_r_emb]
neg_r_list = [neg_r_emb]
if hyperparams["normalize_relation"]:
Pos_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_r_list], axis=0),
dim=1)
Neg_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_r_list], axis=0),
dim=1)
else:
Pos_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_r_list], axis=0)
Neg_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_r_list], axis=0)
regularized_variables += [rel_weights]
# Aggregate and normalize e2
pos_e2_list = [pos_e2_emb, pos_num_node[-1]]
neg_e2_list = [neg_e2_emb, neg_num_node[-1]]
if hyperparams["normalize_e2"]:
Pos_e2 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e2_list], axis=0),
dim=1)
Neg_e2 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e2_list], axis=0),
dim=1)
else:
Pos_e2 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e2_list], axis=0)
Neg_e2 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e2_list], axis=0)
regularized_variables += [entity_weights]
if not hyperparams["bias"]:
pos_bias = pos_rel_bias_emb
neg_bias = neg_rel_bias_emb
else:
pos_bias = 0
neg_bias = 0
# ConvE link prediction
with tf.variable_scope("convE"):
emb_dim = hyperparams["emb_dim"]
pose1_img = tf.reshape(Pos_e1, (-1, emb_dim // 16, 16, 1))
nege1_img = tf.reshape(Neg_e1, (-1, emb_dim // 16, 16, 1))
posr_img = tf.reshape(Pos_r, (-1, emb_dim // 16, 16, 1))
negr_img = tf.reshape(Neg_r, (-1, emb_dim // 16, 16, 1))
pos_stack = tf.layers.batch_normalization(tf.concat([pose1_img, posr_img], 2), training=True)
neg_stack = tf.layers.batch_normalization(tf.concat([nege1_img, negr_img], 2), training=True)
pos_indrop = tf.nn.dropout(pos_stack, emb_keepprob)
neg_indrop = tf.nn.dropout(neg_stack, emb_keepprob)
convE_ker = tf.get_variable("convE_ker", shape=[3, 3, 1, 32], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["cnn_scale"], mode="FAN_AVG", dtype=dtype))
convE_bias = tf.get_variable("convE_bias", shape=[32], dtype=dtype, initializer=tf.zeros_initializer)
pos_convE_conv = tf.nn.relu(tf.layers.batch_normalization(
tf.nn.bias_add(tf.nn.convolution(pos_indrop, convE_ker, "SAME"), convE_bias), training=True))
neg_convE_conv = tf.nn.relu(tf.layers.batch_normalization(
tf.nn.bias_add(tf.nn.convolution(neg_indrop, convE_ker, "SAME"), convE_bias), training=True))
pos_flat = tf.reshape(tf.nn.dropout(pos_convE_conv, fm_keepprob), (-1, emb_dim * 32 * 2))
neg_flat = tf.reshape(tf.nn.dropout(neg_convE_conv, fm_keepprob), (-1, emb_dim * 32 * 2))
pos_flat_shape = tf.shape(pos_flat, name="pos_flat_shape")
convE_fc_w = tf.get_variable("convE_fc_w", shape=[hyperparams["emb_dim"] * 32 * 2, hyperparams["emb_dim"]],
dtype=dtype, initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["init_scale"], mode="FAN_AVG", dtype=dtype))
pos_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.matmul(pos_flat, convE_fc_w), mlp_keepprob),
training=True))
neg_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.matmul(neg_flat, convE_fc_w), mlp_keepprob),
training=True))
pos = tf.reduce_sum(tf.multiply(pos_fc, Pos_e2), 1, keep_dims=True) + pos_bias
neg = tf.reduce_sum(tf.multiply(neg_fc, Neg_e2), 1, keep_dims=True) + neg_bias
regularized_variables += [convE_ker, convE_bias, convE_fc_w]
# Regularization term
regularizer = tf.contrib.layers.l2_regularizer(hyperparams["regularization_coefficient"])
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularized_variables)
# Minimize Hinge Loss
loss = tf.reduce_sum((tf.maximum(neg - pos + hyperparams["margin"], 0))) + regularization_term
loss_to_show = tf.reduce_mean((tf.maximum(neg - pos + hyperparams["margin"], 0))) + regularization_term
training_op = tf.train.AdagradOptimizer(learning_rate=hyperparams["learning_rate"]).minimize(
loss, var_list=[entity_weights, entity_bias] + num_weights + num_bias + [convE_ker, convE_bias, convE_fc_w])
rlr_train_op = tf.train.AdagradOptimizer(learning_rate=hyperparams["learning_rate_reduced"]).minimize(
loss, var_list=[rel_weights, rel_bias]
)
summary_nodes = [tf.summary.scalar("loss", loss_to_show),
tf.summary.scalar("regularization_term", regularization_term),
tf.summary.histogram("pos", pos),
tf.summary.histogram("neg", neg),
tf.summary.histogram("entity_emb", entity_weights),
tf.summary.histogram("relation_emb", rel_weights)]
training_summary = tf.summary.merge_all()
return locals()
def scoring_and_counting(hyperparams, nodes, config=None):
"""
# Input placeholders
pos_e1 = tf.placeholder(tf.int32)
pos_r = tf.placeholder(tf.int32)
pos_e2 = tf.placeholder(tf.int32)
# Weights to use
entity_weights = nodes["entity_weights"]
relation_weights = nodes["rel_weights"]
relation_bias = nodes["rel_bias"]
# Normalize e2 weights
if hyperparams["test_normalize_e2"]:
normalized_entity_weights = tf.nn.l2_normalize(entity_weights, dim=1)
else:
normalized_entity_weights = entity_weights
# Normalize r weights
if hyperparams["test_normalize_relation"]:
normalized_relation_weights = tf.nn.l2_normalize(relation_weights, dim=1)
else:
normalized_relation_weights = relation_weights
# Normalize e1 weights
if hyperparams["test_normalize_e1"]:
normalized_entity_weights = tf.nn.l2_normalize(entity_weights, dim=1)
else:
normalized_entity_weights = entity_weights
# Embedding positive and negative samples
pos_e1_emb = tf.nn.embedding_lookup(normalized_entity_weights, pos_e1)
pos_r_emb = tf.nn.embedding_lookup(normalized_relation_weights, pos_r)
pos_e2_emb = tf.nn.embedding_lookup(normalized_entity_weights, pos_e2)
if hyperparams["bias"]:
pos_score_bias = tf.reshape(tf.nn.embedding_lookup(relation_bias, pos_r), (-1, 1))
else:
pos_score_bias = 0
neg_score_bias = 0
# Reshape and transpose the movie weights and rating weights to a (1, dim, depth) tensor
neg_e2_emb = tf.transpose(tf.reshape(
normalized_entity_weights, (-1, hyperparams["emb_dim"], 1)),
(2, 1, 0))
neg_r_emb = tf.transpose(tf.reshape(
relation_weights, (-1, hyperparams["emb_dim"], 1)),
(2, 1, 0))
# Scoring positive samples
pos_scoring = tf.reduce_sum(
tf.multiply(tf.multiply(pos_e1_emb, pos_r_emb), pos_e2_emb), axis=1, keep_dims=True) + pos_score_bias
# Scoring negative samples with broadcasting
pos_e1_r_mul = tf.multiply(pos_e1_emb, pos_r_emb)
neg_scoring = tf.squeeze(tf.reduce_sum(
tf.multiply(tf.reshape(pos_e1_r_mul, (-1, hyperparams["emb_dim"], 1)), neg_e2_emb),
axis=1
)) + pos_score_bias
higher_values = tf.reduce_sum(tf.cast(neg_scoring > pos_scoring, tf.float32), axis=1)
"""
return locals()
def test_graph(hyperparams, nodes, config=None):
"""
nodes = scoring_and_counting(hyperparams, nodes, config=config)
metric_values = {
"MRR": metrics.mrr(nodes["higher_values"]),
"HITS@10": metrics.hits_n(nodes["higher_values"], 10),
"HITS@3": metrics.hits_n(nodes["higher_values"], 3),
"HITS@1": metrics.hits_n(nodes["higher_values"], 1),
}
nodes.update(metric_values)
summaries = [tf.summary.scalar(k, v) for k, v in metric_values.items()] + [
tf.summary.histogram("score rankings", nodes["higher_values"])
]
nodes["test_summary"] = tf.summary.merge(summaries)
"""
return nodes
def debug_graph(hyperparams, nodes, config=None):
"""
neg_r_emb_shape = tf.shape(nodes["neg_r_emb"])
pos_scoring_shape = tf.shape(nodes["pos_scoring"])
"""
return locals()
| 18,131 | 44.672544 | 120 | py |
mkbe | mkbe-master/MKBE/models/__init__.py | #from .ml_ConvE import *
from .ml_distmult import * | 51 | 25 | 26 | py |
mkbe | mkbe-master/MKBE/models/ml_convE_kb.py | # Relations used: age, gender, occupation, zip, title, release date, genre, rating(1-5)
import tensorflow as tf
import metrics
def activation(x):
"""
with tf.name_scope("selu") as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
"""
return tf.nn.relu(x)
# return tf.tanh(x)
def define_graph(hyperparams, nodes, config=None):
dtype = tf.float32 if config is None or "dtype" not in config else config["dtype"]
id_dtype = tf.int32 if config is None or "id_dtype" not in config else config["id_dtype"]
pos_user_e1 = tf.placeholder(tf.int32)
pos_user_r = tf.placeholder(tf.int32)
neg_user_e1 = tf.placeholder(tf.int32)
neg_user_r = tf.placeholder(tf.int32)
pos_movie_e1 = tf.placeholder(tf.int32)
pos_movie_r = tf.placeholder(tf.int32)
neg_movie_e1 = tf.placeholder(tf.int32)
neg_movie_r = tf.placeholder(tf.int32)
pos_age = tf.placeholder(tf.float32)
neg_age = tf.placeholder(tf.float32)
pos_gender = tf.placeholder(tf.int32)
neg_gender = tf.placeholder(tf.int32)
pos_occupation = tf.placeholder(tf.int32)
neg_occupation = tf.placeholder(tf.int32)
pos_zip = tf.placeholder(tf.int32)
neg_zip = tf.placeholder(tf.int32)
pos_title = tf.placeholder(tf.int32, shape=(None, None))
neg_title = tf.placeholder(tf.int32, shape=(None, None))
pos_title_len = tf.placeholder(tf.int32)
neg_title_len = tf.placeholder(tf.int32)
pos_date = tf.placeholder(tf.float32)
neg_date = tf.placeholder(tf.float32)
pos_genre = tf.placeholder(tf.float32)
neg_genre = tf.placeholder(tf.float32)
pos_userrating = tf.placeholder(tf.int32)
neg_userrating = tf.placeholder(tf.int32)
pos_relrating = tf.placeholder(tf.int32)
neg_relrating = tf.placeholder(tf.int32)
pos_movierating = tf.placeholder(tf.int32)
neg_movierating = tf.placeholder(tf.int32)
pos_poster_movie = tf.placeholder(tf.int32)
pos_poster_rel = tf.placeholder(tf.int32)
pos_poster_fm = tf.placeholder(tf.float32, shape=(None, None, None, 512))
neg_poster_movie = tf.placeholder(tf.int32)
neg_poster_rel = tf.placeholder(tf.int32)
neg_poster_fm = tf.placeholder(tf.float32, shape=(None, None, None, 512))
mlp_keepprob = tf.placeholder(tf.float32, name="mlp_keepprob")
enc_keepprob = tf.placeholder(tf.float32, name="enc_keepprob")
emb_keepprob = tf.placeholder(tf.float32, name="emb_keepprob")
fm_keepprob = tf.placeholder(tf.float32, name="fm_keepprob")
is_training = tf.placeholder(tf.bool, name="is_training")
# Weights for embeddings
if hyperparams["emb_dim"] > 3:
user_weights = tf.get_variable(
"user_weights", shape=[hyperparams["user_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
movie_weights = tf.get_variable(
"movie_weights", shape=[hyperparams["movie_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
gender_weights = tf.get_variable(
"gender_weights", shape=[hyperparams["gender_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
job_weights = tf.get_variable(
"job_weights", shape=[hyperparams["job_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
zip_weights = tf.get_variable(
"zip_weights", shape=[hyperparams["zip_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
char_weights = tf.get_variable(
"char_weights", shape=[hyperparams["char_size"], hyperparams["emb_dim"] // 2],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["init_scale"],
mode="FAN_OUT", dtype=dtype))
else:
user_weights = tf.get_variable(
"user_weights", shape=[hyperparams["user_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
movie_weights = tf.get_variable(
"movie_weights", shape=[hyperparams["movie_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
gender_weights = tf.get_variable(
"gender_weights", shape=[hyperparams["gender_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
job_weights = tf.get_variable(
"job_weights", shape=[hyperparams["job_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
zip_weights = tf.get_variable(
"zip_weights", shape=[hyperparams["zip_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
char_weights = tf.get_variable(
"char_weights", shape=[hyperparams["char_size"], hyperparams["emb_dim"] // 2],
dtype=dtype, initializer=tf.truncated_normal_initializer(dtype=dtype)
)
# Biases embeddings
user_bias = tf.get_variable("user_bias", shape=[hyperparams["user_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
rel_bias = tf.get_variable("rel_bias", shape=[hyperparams["relation_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
movie_bias = tf.get_variable("movie_bias", shape=[hyperparams["movie_size"], 1], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
# Embedding lookup
pos_user_e1_emb = tf.nn.embedding_lookup(user_weights, pos_user_e1)
neg_user_e1_emb = tf.nn.embedding_lookup(user_weights, neg_user_e1)
pos_user_r_emb = tf.nn.embedding_lookup(rel_weights, pos_user_r)
neg_user_r_emb = tf.nn.embedding_lookup(rel_weights, neg_user_r)
pos_movie_e1_emb = tf.nn.embedding_lookup(movie_weights, pos_movie_e1)
neg_movie_e1_emb = tf.nn.embedding_lookup(movie_weights, neg_movie_e1)
pos_movie_r_emb = tf.nn.embedding_lookup(rel_weights, pos_movie_r)
neg_movie_r_emb = tf.nn.embedding_lookup(rel_weights, neg_movie_r)
pos_gender_emb = tf.nn.embedding_lookup(gender_weights, pos_gender)
neg_gender_emb = tf.nn.embedding_lookup(gender_weights, neg_gender)
pos_occupation_emb = tf.nn.embedding_lookup(job_weights, pos_occupation)
neg_occupation_emb = tf.nn.embedding_lookup(job_weights, neg_occupation)
pos_zip_emb = tf.nn.embedding_lookup(zip_weights, pos_zip)
neg_zip_emb = tf.nn.embedding_lookup(zip_weights, neg_zip)
pos_title_emb = tf.nn.embedding_lookup(char_weights, pos_title)
neg_title_emb = tf.nn.embedding_lookup(char_weights, neg_title)
pos_userrating_emb = tf.nn.embedding_lookup(user_weights, pos_userrating)
neg_userrating_emb = tf.nn.embedding_lookup(user_weights, neg_userrating)
pos_relrating_emb = tf.nn.embedding_lookup(rel_weights, pos_relrating)
neg_relrating_emb = tf.nn.embedding_lookup(rel_weights, neg_relrating)
pos_ratedmovie_emb = tf.nn.embedding_lookup(movie_weights, pos_movierating)
neg_ratedmovie_emb = tf.nn.embedding_lookup(movie_weights, neg_movierating)
pos_poster_movie_emb = tf.nn.embedding_lookup(movie_weights, pos_poster_movie)
neg_poster_movie_emb = tf.nn.embedding_lookup(movie_weights, neg_poster_movie)
pos_poster_rel_emb = tf.nn.embedding_lookup(rel_weights, pos_poster_rel)
neg_poster_rel_emb = tf.nn.embedding_lookup(rel_weights, neg_poster_rel)
pos_userrating_bias_emb = tf.nn.embedding_lookup(user_bias, pos_userrating)
neg_userrating_bias_emb = tf.nn.embedding_lookup(user_bias, neg_userrating)
pos_relrating_bias_emb = tf.nn.embedding_lookup(rel_bias, pos_relrating)
neg_relrating_bias_emb = tf.nn.embedding_lookup(rel_bias, neg_relrating)
pos_ratedmovie_bias_emb = tf.nn.embedding_lookup(movie_bias, pos_movierating)
neg_ratedmovie_bias_emb = tf.nn.embedding_lookup(movie_bias, neg_movierating)
# Collect Regularization variables
regularized_variables = []
# MLP Encoding
# For ages
age_weights = [tf.get_variable(
"age_weights_{:}".format(layer),
shape=[1 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype, initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["down_scale"],
mode="FAN_AVG", dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
age_bias = [tf.get_variable(
"age_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += age_weights
regularized_variables += age_bias
# Broadcasting for scalar-vector multiplication in the first layer and vector-matrix multiplication for other layers
pos_age_node = [tf.reshape(pos_age, (-1, 1))]
neg_age_node = [tf.reshape(neg_age, (-1, 1))]
for w, b in zip(age_weights, age_bias):
pos_age_node.append(tf.nn.dropout(activation(
tf.add(b, tf.multiply(pos_age_node[-1], w) if len(pos_age_node) == 1 else tf.matmul(pos_age_node[-1], w))),
enc_keepprob))
neg_age_node.append(tf.nn.dropout(activation(
tf.add(b, tf.multiply(neg_age_node[-1], w) if len(neg_age_node) == 1 else tf.matmul(neg_age_node[-1], w))),
enc_keepprob))
# For dates
date_weights = [tf.get_variable(
"date_weights_{:}".format(layer),
shape=[1 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["down_scale"], mode="FAN_AVG",
dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
date_bias = [tf.get_variable(
"date_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += date_weights
regularized_variables += date_bias
# Broadcasting for scalar-vector multiplication in the first layer and vector-matrix multiplication for other layers
pos_date_node = [tf.reshape(pos_date, (-1, 1))]
neg_date_node = [tf.reshape(neg_date, (-1, 1))]
for w, b in zip(date_weights, date_bias):
pos_date_node.append(tf.nn.dropout(activation(tf.add(b, tf.multiply(pos_date_node[-1], w) if len(
pos_date_node) == 1 else tf.matmul(pos_date_node[-1], w))), enc_keepprob))
neg_date_node.append(tf.nn.dropout(activation(tf.add(b, tf.multiply(neg_date_node[-1], w) if len(
neg_date_node) == 1 else tf.matmul(neg_date_node[-1], w))), enc_keepprob))
# For genres
genre_weights = [tf.get_variable(
"genre_weights_{:}".format(layer),
shape=[19 if layer == 0 else hyperparams["emb_dim"], hyperparams["emb_dim"]],
dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=hyperparams["down_scale"], mode="FAN_AVG",
dtype=dtype)
) for layer in range(hyperparams["MLPLayers"])]
genre_bias = [tf.get_variable(
"genre_bias_{:}".format(layer), shape=[hyperparams["emb_dim"]], dtype=dtype, initializer=tf.zeros_initializer
) for layer in range(hyperparams["MLPLayers"])]
regularized_variables += genre_weights
regularized_variables += genre_bias
pos_genre_node = [tf.reshape(pos_genre, (-1, 19))]
neg_genre_node = [tf.reshape(neg_genre, (-1, 19))]
for w, b in zip(genre_weights, genre_bias):
pos_genre_node.append(tf.nn.dropout(activation(tf.add(b, tf.matmul(pos_genre_node[-1], w))), enc_keepprob))
neg_genre_node.append(tf.nn.dropout(activation(tf.add(b, tf.matmul(neg_genre_node[-1], w))), enc_keepprob))
# GRU Encoding
GRU_base_units = hyperparams["emb_dim"] // 2
# With bidirectional GRU and concatenation of outputs, the dimension of input vectors times 2 after each layer
GRUCells = [tf.nn.rnn_cell.GRUCell(GRU_base_units if layer < 2 else hyperparams["emb_dim"])
for layer in range(hyperparams["GRULayers"] * 2)]
pos_title_nodes = [pos_title_emb]
neg_title_nodes = [neg_title_emb]
for layer in range(hyperparams["GRULayers"]):
with tf.variable_scope("GRUEncoder_{:}".format(layer)):
if layer > 0:
out_fw, out_bw = tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=pos_title_nodes[-1], sequence_length=pos_title_len, swap_memory=True)[0]
pos_title_nodes.append(tf.nn.dropout((out_fw + out_bw) / 2.0, enc_keepprob))
else:
pos_title_nodes.append(tf.concat(tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=pos_title_nodes[-1], sequence_length=pos_title_len, swap_memory=True)[0], 2))
# Share weights between encoders for positive samples and negative samples
with tf.variable_scope("GRUEncoder_{:}".format(layer), reuse=True):
if layer > 0:
out_fw, out_bw = tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=neg_title_nodes[-1], sequence_length=neg_title_len, swap_memory=True)[0]
neg_title_nodes.append(tf.nn.dropout((out_fw + out_bw) / 2.0, enc_keepprob))
else:
neg_title_nodes.append(tf.concat(tf.nn.bidirectional_dynamic_rnn(
GRUCells[layer * 2], GRUCells[layer * 2 + 1], dtype=dtype,
inputs=neg_title_nodes[-1], sequence_length=neg_title_len, swap_memory=True)[0], 2))
regularized_variables += tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="GRUEncoder_{:}".format(layer))
gru_weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="GRUEncoder_{:}".format(layer))
# CNN Encoding with bilinear pooling
"""
proj_matrix1 = tf.get_variable("proj_matrix1", shape=[1, 1, 512, 1024], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["down_scale"], mode="FAN_AVG", dtype=dtype))
proj_matrix2 = tf.get_variable("proj_matrix2", shape=[1, 1, 512, 1024], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["down_scale"], mode="FAN_AVG", dtype=dtype))
pos_branch1 = tf.nn.conv2d(pos_poster_fm, proj_matrix1, strides=[1, 1, 1, 1], padding='SAME')
pos_branch2 = tf.nn.conv2d(pos_poster_fm, proj_matrix2, strides=[1, 1, 1, 1], padding='SAME')
pos_poster_vec = compact_bilinear_pooling_layer(pos_branch1, pos_branch2, hyperparams["emb_dim"])
neg_branch1 = tf.nn.conv2d(neg_poster_fm, proj_matrix1, strides=[1, 1, 1, 1], padding='SAME')
neg_branch2 = tf.nn.conv2d(neg_poster_fm, proj_matrix2, strides=[1, 1, 1, 1], padding='SAME')
neg_poster_vec = compact_bilinear_pooling_layer(neg_branch1, neg_branch2, hyperparams["emb_dim"])
"""
with tf.variable_scope("cnn_encoder"):
ksize = hyperparams["ksize"]
depth = hyperparams["depth"]
drate = hyperparams["drate"]
cnn_weights = tf.get_variable("cnn_weights", shape=[ksize, ksize, 512, depth], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["cnn_scale"], mode="FAN_AVG", dtype=dtype))
cnn_bias = tf.get_variable("cnn_bias", shape=[depth], dtype=dtype, initializer=tf.zeros_initializer)
pos_conv5_4 = tf.nn.dropout(tf.nn.relu(
tf.nn.bias_add(tf.nn.convolution(pos_poster_fm, cnn_weights, "VALID", dilation_rate=[drate, drate]),
cnn_bias)), enc_keepprob)
neg_conv5_4 = tf.nn.dropout(tf.nn.relu(
tf.nn.bias_add(tf.nn.convolution(neg_poster_fm, cnn_weights, "VALID", dilation_rate=[drate, drate]),
cnn_bias)), enc_keepprob)
# print(pos_conv5_4.shape, neg_conv5_4.shape)
# pos_conv5_4_shape = tf.shape(pos_conv5_4)
# neg_conv5_4_shape = tf.shape(neg_conv5_4)
pos_pool5 = tf.nn.dropout(tf.reduce_mean(pos_conv5_4, axis=[1, 2]), enc_keepprob)
neg_pool5 = tf.nn.dropout(tf.reduce_mean(neg_conv5_4, axis=[1, 2]), enc_keepprob)
# print(pos_pool5.shape, neg_pool5.shape)
# pos_pool5_shape = tf.shape(pos_pool5)
# neg_pool5_shape = tf.shape(neg_pool5)
fc_weights = tf.get_variable("fc_weights", shape=[depth, hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["cnn_scale"], mode="FAN_AVG", dtype=dtype))
fc_bias = tf.get_variable("fc_bias", shape=[hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.zeros_initializer)
pos_poster_vec = tf.nn.dropout(activation(tf.add(tf.matmul(pos_pool5, fc_weights), fc_bias)), enc_keepprob)
neg_poster_vec = tf.nn.dropout(activation(tf.add(tf.matmul(pos_pool5, fc_weights), fc_bias)), enc_keepprob)
# print(pos_poster_vec.shape, neg_poster_vec.shape)
# pos_poster_vec_shape = tf.shape(pos_poster_vec)
# neg_poster_vec_shape = tf.shape(neg_poster_vec)
regularized_variables += [cnn_weights, cnn_bias, fc_weights, fc_bias]
# Aggregate and normalize e1
pos_e1_list = [pos_user_e1_emb, pos_movie_e1_emb, pos_userrating_emb, pos_poster_movie_emb]
neg_e1_list = [neg_user_e1_emb, neg_movie_e1_emb, neg_userrating_emb, neg_poster_movie_emb]
if hyperparams["normalize_e1"]:
pos_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e1_list], axis=0),
dim=1)
neg_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e1_list], axis=0),
dim=1)
else:
pos_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e1_list], axis=0)
neg_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e1_list], axis=0)
regularized_variables += [user_weights, movie_weights]
# Aggregate r
pos_r_list = [pos_user_r_emb, pos_movie_r_emb, pos_relrating_emb, pos_poster_rel_emb]
neg_r_list = [neg_user_r_emb, neg_movie_r_emb, neg_relrating_emb, neg_poster_rel_emb]
if hyperparams["normalize_relation"]:
pos_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_r_list], axis=0),
dim=1)
neg_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_r_list], axis=0),
dim=1)
else:
pos_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_r_list], axis=0)
neg_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_r_list], axis=0)
regularized_variables += [rel_weights]
# Aggregate and normalize e2
pos_e2_list = [pos_age_node[-1], pos_gender_emb, pos_occupation_emb, pos_zip_emb, pos_title_nodes[-1][:, 0, :],
pos_date_node[-1], pos_genre_node[-1], pos_ratedmovie_emb, pos_poster_vec]
neg_e2_list = [neg_age_node[-1], neg_gender_emb, neg_occupation_emb, neg_zip_emb, neg_title_nodes[-1][:, 0, :],
neg_date_node[-1], neg_genre_node[-1], neg_ratedmovie_emb, neg_poster_vec]
if hyperparams["normalize_e2"]:
pos_e2 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e2_list], axis=0),
dim=1)
neg_e2 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e2_list], axis=0),
dim=1)
else:
pos_e2 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in pos_e2_list], axis=0)
neg_e2 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in neg_e2_list], axis=0)
regularized_variables += [movie_weights]
if not hyperparams["bias"]:
pos_bias = pos_relrating_bias_emb
neg_bias = neg_relrating_bias_emb
else:
pos_bias = 0
neg_bias = 0
pose1_shape = tf.shape(pos_e1, name="pose1_shape")
# ConvE link prediction
with tf.variable_scope("convE"):
emb_dim = hyperparams["emb_dim"]
pose1_img = tf.reshape(pos_e1, (-1, emb_dim // 16, 16, 1))
nege1_img = tf.reshape(neg_e1, (-1, emb_dim // 16, 16, 1))
posr_img = tf.reshape(pos_r, (-1, emb_dim // 16, 16, 1))
negr_img = tf.reshape(neg_r, (-1, emb_dim // 16, 16, 1))
pos_stack = tf.layers.batch_normalization(tf.concat([pose1_img, posr_img], 2), training=is_training)
neg_stack = tf.layers.batch_normalization(tf.concat([nege1_img, negr_img], 2), training=is_training)
pos_indrop = tf.nn.dropout(pos_stack, emb_keepprob)
neg_indrop = tf.nn.dropout(neg_stack, emb_keepprob)
convE_ker = tf.get_variable("convE_ker", shape=[3, 3, 1, 32], dtype=dtype,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["cnn_scale"], mode="FAN_AVG", dtype=dtype))
convE_bias = tf.get_variable("convE_bias", shape=[32], dtype=dtype, initializer=tf.zeros_initializer)
pos_convE_conv = tf.nn.relu(tf.layers.batch_normalization(
tf.nn.bias_add(tf.nn.convolution(pos_indrop, convE_ker, "SAME"), convE_bias), training=is_training))
neg_convE_conv = tf.nn.relu(tf.layers.batch_normalization(
tf.nn.bias_add(tf.nn.convolution(neg_indrop, convE_ker, "SAME"), convE_bias), training=is_training))
pos_flat = tf.reshape(tf.nn.dropout(pos_convE_conv, fm_keepprob), (-1, emb_dim * 32 * 2))
neg_flat = tf.reshape(tf.nn.dropout(neg_convE_conv, fm_keepprob), (-1, emb_dim * 32 * 2))
pos_flat_shape = tf.shape(pos_flat, name="pos_flat_shape")
convE_fc_w = tf.get_variable("convE_fc_w", shape=[hyperparams["emb_dim"] * 32 * 2, hyperparams["emb_dim"]],
dtype=dtype, initializer=tf.contrib.layers.variance_scaling_initializer(
factor=hyperparams["init_scale"], mode="FAN_AVG", dtype=dtype))
pos_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.matmul(pos_flat, convE_fc_w), mlp_keepprob),
training=is_training))
neg_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.matmul(neg_flat, convE_fc_w), mlp_keepprob),
training=is_training))
pos = tf.reduce_sum(tf.multiply(pos_fc, pos_e2), 1, keep_dims=True) + pos_bias
neg = tf.reduce_sum(tf.multiply(neg_fc, neg_e2), 1, keep_dims=True) + neg_bias
regularized_variables += [convE_ker, convE_bias, convE_fc_w]
# Regularization term
regularizer = tf.contrib.layers.l2_regularizer(hyperparams["regularization_coefficient"])
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularized_variables)
# Collect variables to be trained
lr1_vars = [user_weights, movie_weights, user_bias, movie_bias]
lr2_vars = [rel_weights, rel_bias, convE_ker, convE_bias, convE_fc_w]
lr1_vars += age_weights + age_bias + [gender_weights, job_weights, zip_weights]
lr2_vars += gru_weights
lr1_vars += date_weights + date_bias + genre_weights + genre_bias
lr1_vars += [cnn_weights, cnn_bias, fc_weights, fc_bias]
# lr1_vars += [proj_matrix1, proj_matrix2]
# Minimize Hinge Loss
loss = tf.reduce_sum((tf.maximum(neg - pos + hyperparams["margin"], 0))) + regularization_term
loss_to_show = tf.reduce_mean((tf.maximum(neg - pos + hyperparams["margin"], 0))) + regularization_term
training_op = tf.train.AdagradOptimizer(learning_rate=hyperparams["learning_rate"]).minimize(
loss, var_list=lr1_vars)
rlr_train_op = tf.train.AdagradOptimizer(learning_rate=hyperparams["learning_rate_reduced"]).minimize(
loss, var_list=lr2_vars)
summary_nodes = [tf.summary.scalar("loss", loss_to_show),
tf.summary.scalar("regularization_term", regularization_term),
tf.summary.histogram("pos", pos),
tf.summary.histogram("neg", neg),
tf.summary.histogram("user_emb", user_weights),
tf.summary.histogram("relation_emb", rel_weights),
tf.summary.histogram("movie_emb", movie_weights)]
training_summary = tf.summary.merge_all()
return locals()
def scoring_and_counting(hyperparams, nodes, config=None):
# Input placeholders
rating_relations = tf.placeholder(tf.int32, shape=[5])
pos_user = tf.placeholder(tf.int32)
pos_r = tf.placeholder(tf.int32)
pos_movie = tf.placeholder(tf.int32)
emb_dim = hyperparams["emb_dim"]
bsize = tf.shape(pos_user)[0]
# Weights to use
user_weights = nodes["user_weights"]
movie_weights = nodes["movie_weights"]
relation_weights = nodes["rel_weights"]
relation_bias = nodes["rel_bias"]
is_training = nodes["is_training"]
mlp_keepprob = nodes["mlp_keepprob"]
emb_keepprob = nodes["emb_keepprob"]
fm_keepprob = nodes["fm_keepprob"]
# Normalize e2 weights
if hyperparams["test_normalize_e2"]:
normalized_movie_weights = tf.nn.l2_normalize(movie_weights, dim=1)
else:
normalized_movie_weights = movie_weights
# Normalize r weights
if hyperparams["test_normalize_relation"]:
normalized_relation_weights = tf.nn.l2_normalize(relation_weights, dim=1)
else:
normalized_relation_weights = relation_weights
# Normalize e1 weights
if hyperparams["test_normalize_e1"]:
normalized_user_weights = tf.nn.l2_normalize(user_weights, dim=1)
else:
normalized_user_weights = user_weights
# Embedding positive and negative samples
pos_user_emb = tf.nn.embedding_lookup(normalized_user_weights, pos_user)
pos_r_emb = tf.nn.embedding_lookup(normalized_relation_weights, pos_r)
pos_movie_emb = tf.nn.embedding_lookup(normalized_movie_weights, pos_movie)
rating_relation_weights = tf.nn.embedding_lookup(normalized_relation_weights, rating_relations)
if hyperparams["bias"]:
pos_score_bias = tf.reshape(tf.nn.embedding_lookup(relation_bias, pos_r), (-1, 1))
neg_score_bias = tf.reshape(tf.nn.embedding_lookup(relation_bias, rating_relations), (1, 5, 1))
else:
pos_score_bias = 0
neg_score_bias = 0
# ConvE link prediction
with tf.variable_scope("convE", reuse=tf.AUTO_REUSE):
pose1_img = tf.reshape(pos_user_emb, (-1, emb_dim // 16, 16, 1))
posr_img = tf.reshape(pos_r_emb, (-1, emb_dim // 16, 16, 1))
negr_img = tf.reshape(rating_relation_weights, (-1, 5, emb_dim // 16, 16, 1))
pos_stack = tf.layers.batch_normalization(tf.concat([pose1_img, posr_img], 2), training=False)
neg_stack = tf.layers.batch_normalization(
tf.concat([tf.tile(tf.expand_dims(pose1_img, axis=1), [1, 5, 1, 1, 1]),
tf.tile(negr_img, [bsize, 1, 1, 1, 1])], 3),
training=False)
pos_indrop = tf.nn.dropout(pos_stack, 1.0)
neg_indrop = tf.nn.dropout(neg_stack, 1.0)
convE_ker = tf.get_variable("convE_ker")
convE_bias = tf.get_variable("convE_bias")
pos_convE_conv = tf.nn.relu(tf.layers.batch_normalization(
tf.nn.bias_add(tf.nn.convolution(pos_indrop, convE_ker, "SAME"), convE_bias), training=False))
neg_convE_conv = tf.nn.relu(tf.layers.batch_normalization(
tf.nn.bias_add(tf.nn.convolution(
neg_indrop, tf.expand_dims(convE_ker, axis=0), "SAME"), convE_bias),
training=False))
pos_flat = tf.reshape(tf.nn.dropout(pos_convE_conv, 1), (-1, emb_dim * 32 * 2))
neg_flat = tf.reshape(tf.nn.dropout(neg_convE_conv, 1), (-1, emb_dim * 32 * 2))
convE_fc_w = tf.get_variable("convE_fc_w")
pos_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.matmul(pos_flat, convE_fc_w), 1),
training=False))
neg_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.matmul(neg_flat, convE_fc_w), 1),
training=False))
neg_e1_r = tf.reshape(neg_fc, (bsize, 5, emb_dim))
pos_movie_tile = tf.tile(tf.expand_dims(pos_movie_emb, 1), [1, 5, 1])
pos = tf.reduce_sum(tf.multiply(pos_fc, pos_movie_emb), 1, keep_dims=True) + pos_score_bias
neg = tf.reduce_sum(tf.multiply(neg_e1_r, pos_movie_tile), 2, keep_dims=True) + neg_score_bias
pos_scoring = tf.reshape(pos, (-1, 1, 1))
neg_scoring_rating = tf.reshape(neg, (-1, 5, 1))
# movie_higher_values = tf.reduce_sum(tf.cast(neg_scoring_movie > pos_scoring, tf.float32), axis=1)
rating_higher_values = tf.reduce_sum(tf.cast(neg_scoring_rating > pos_scoring, tf.float32), axis=1)
return locals()
def test_graph(hyperparams, nodes, config=None):
nodes = scoring_and_counting(hyperparams, nodes, config=config)
metric_values = {
# "MRR_movie": metrics.mrr(nodes["movie_higher_values"]),
# "HITS@10_movie": metrics.hits_n(nodes["movie_higher_values"], 10),
# "HITS@3_movie": metrics.hits_n(nodes["movie_higher_values"], 3),
# "HITS@1_movie": metrics.hits_n(nodes["movie_higher_values"], 1),
"MRR_r": metrics.mrr(nodes["rating_higher_values"]),
"HITS@5_r": metrics.hits_n(nodes["rating_higher_values"], 5),
"HITS@3_r": metrics.hits_n(nodes["rating_higher_values"], 3),
"HITS@2_r": metrics.hits_n(nodes["rating_higher_values"], 2),
"HITS@1_r": metrics.hits_n(nodes["rating_higher_values"], 1)
}
nodes.update(metric_values)
summaries = [tf.summary.scalar(k, v) for k, v in metric_values.items()] + [
tf.summary.histogram("rating score rankings", nodes["rating_higher_values"]),
# tf.summary.histogram("movie score rankings", nodes["movie_higher_values"])
]
nodes["test_summary"] = tf.summary.merge(summaries)
return nodes
def debug_graph(hyperparams, nodes, config=None):
"""
rating_rankings_min = tf.reduce_max(nodes["rating_higher_values"])
rating_rankings_max = tf.reduce_max(nodes["rating_higher_values"])
neg_score_rating_shape = tf.shape(nodes["neg_scoring_rating"])
neg_r_emb_shape = tf.shape(nodes["neg_r_emb"])
pos_u_m_mul_shape = tf.shape(nodes["pos_user_movie_mul"])
pos_scoring_shape = tf.shape(nodes["pos_scoring"])
"""
return locals()
| 33,237 | 51.842607 | 120 | py |
mkbe | mkbe-master/MKBE/models/yago_convE_kb_model.py | import tensorflow as tf
from tensorpack import *
from tensorflow.contrib.keras import backend as K
class YAGOConveMultimodel(ModelDesc):
def __init__(self, hyperparams):
super(YAGOConveMultimodel, self).__init__()
self.hyperparams = hyperparams
def _get_inputs(self):
return [InputDesc(tf.int32, (None,), "e1"),
InputDesc(tf.int32, (None,), "r"),
InputDesc(tf.int8, (None, self.hyperparams["entity_size"]), "e2_multihot"),
InputDesc(tf.int32, (None,), "e2_ind")]
def generate_onehot(self, indices):
entity_size = self.hyperparams["entity_size"]
return tf.one_hot(
indices, entity_size, dtype=tf.float32, on_value=1.0 - self.hyperparams["label_smoothing"],
off_value=self.hyperparams["label_smoothing"] / (entity_size - 1.0))
def label_smoothing(self, onehots, lambd):
e2 = tf.cast(onehots, tf.float32)
e2_multi = (1.0 - lambd) * e2 + (10 * lambd / self.hyperparams["entity_size"])
return e2_multi
def _build_graph(self, inputs):
hyperparams = self.hyperparams
dtype = tf.float32
id_dtype = tf.int32
e1, r, e2_multihot, e2_ind = inputs
label_smooth = tf.placeholder(tf.float32, name="label_smoothing", shape=())
mlp_keepprob = tf.placeholder(tf.float32, name="mlp_keepprob")
enc_keepprob = tf.placeholder(tf.float32, name="enc_keepprob")
emb_keepprob = tf.placeholder(tf.float32, name="emb_keepprob")
fm_keepprob = tf.placeholder(tf.float32, name="fm_keepprob")
is_training = tf.placeholder(tf.bool, name="is_training")
# Weights for embeddings
if hyperparams["emb_dim"] > 3:
self.entity_weights = tf.get_variable(
"entity_weights", shape=[hyperparams["entity_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
self.rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
self.word_weights = tf.get_variable(
"word_weights", shape=[hyperparams["word_size"], hyperparams["emb_dim"] // 2],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
else:
self.entity_weights = tf.get_variable(
"entity_weights", shape=[hyperparams["entity_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
self.rel_weights = tf.get_variable(
"relation_weights", shape=[hyperparams["relation_size"], hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.truncated_normal_initializer(dtype=dtype))
self.word_weights = tf.get_variable(
"word_weights", shape=[hyperparams["word_size"], hyperparams["emb_dim"] // 2],
dtype=dtype, initializer=tf.truncated_normal_initializer(dtype=dtype)
)
# Encode e1 and r
e1_emb = tf.nn.embedding_lookup(self.entity_weights, e1)
r_emb = tf.nn.embedding_lookup(self.rel_weights, r)
# Collect Regularization variables
regularized_variables = []
# Aggregate and normalize e1
e1_list = [e1_emb]
if hyperparams["normalize_e1"]:
Pos_e1 = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in e1_list], axis=0),
dim=1)
else:
Pos_e1 = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in e1_list], axis=0)
regularized_variables += [self.entity_weights]
# Aggregate r
r_list = [r_emb]
if hyperparams["normalize_relation"]:
Pos_r = tf.nn.l2_normalize(
tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in r_list], axis=0),
dim=1)
else:
Pos_r = tf.concat([tf.reshape(emb, (-1, hyperparams["emb_dim"])) for emb in r_list], axis=0)
regularized_variables += [self.rel_weights]
# ConvE link prediction
with tf.variable_scope("convE"):
emb_dim = hyperparams["emb_dim"]
pose1_img = tf.reshape(Pos_e1, (-1, emb_dim // 10, 10, 1))
posr_img = tf.reshape(Pos_r, (-1, emb_dim // 10, 10, 1))
pos_stack = tf.layers.batch_normalization(tf.concat(
[pose1_img, posr_img], 2), training=is_training, epsilon=1e-5, momentum=0.1)
pos_indrop = tf.nn.dropout(pos_stack, emb_keepprob)
convE_ker = tf.get_variable("convE_ker", shape=[3, 3, 1, 32], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
convE_bias = tf.get_variable("convE_bias", shape=[32], dtype=dtype, initializer=tf.zeros_initializer)
pos_convE_conv = tf.nn.relu(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.convolution(
pos_indrop, convE_ker, "VALID"), convE_bias), training=is_training, epsilon=1e-5, momentum=0.1))
fm_dropout = tf.contrib.keras.layers.SpatialDropout2D(1.0 - fm_keepprob)
pos_flat = tf.reshape(fm_dropout(pos_convE_conv, training=is_training), (-1, 10368))
self.convE_fc_w = tf.get_variable(
"convE_fc_w", shape=[10368, hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(uniform=False, dtype=dtype))
self.convE_fc_b = tf.get_variable(
"convE_fc_b", shape=[hyperparams["emb_dim"]], dtype=dtype,
initializer=tf.constant_initializer(value=0.0))
pos_fc = tf.nn.relu(tf.layers.batch_normalization(tf.nn.dropout(tf.nn.bias_add(tf.matmul(
pos_flat, self.convE_fc_w), self.convE_fc_b), mlp_keepprob), training=is_training, epsilon=1e-5,
momentum=0.1))
self.pred = tf.matmul(pos_fc, self.entity_weights, transpose_b=True)
regularized_variables += [convE_ker, convE_bias, self.convE_fc_w, self.convE_fc_b]
# Generate e2 labels
e2_label = self.label_smoothing(e2_multihot, label_smooth)
# Sigmoid BCE loss/ sigmoid cross entropy
#self.ll_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=e2_label, logits=self.pred))
self.ll_loss = tf.reduce_mean(
tf.losses.sigmoid_cross_entropy(e2_label, self.pred, reduction=tf.losses.Reduction.NONE))
# Regularization term
regularizer = tf.contrib.layers.l2_regularizer(hyperparams["regularization_coefficient"])
regularization_term = tf.contrib.layers.apply_regularization(regularizer, regularized_variables)
# Aggregate loss
self.loss = tf.add(self.ll_loss, regularization_term, name="loss")
self.cost = self.loss
# Learning rate decay
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.maximum(tf.train.exponential_decay(
hyperparams["learning_rate"], global_step / 15000, 1, hyperparams["lr_decay"]), 1e-7, name="lr")
# Training op
self.train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss, global_step=global_step)
# Testing Graph
self.test_graph(e2_ind)
# Summaries
self.summaries()
return self.cost
def test_graph(self, pos_e2):
self.likelihood = tf.nn.sigmoid(self.pred)
pos_score = tf.diag_part(tf.nn.embedding_lookup(tf.transpose(self.likelihood), pos_e2))
cmp = tf.expand_dims(pos_score, axis=1) > self.likelihood
self.rank = tf.reduce_sum(tf.cast(cmp, tf.int32), axis=1) + 1
mrr = tf.reduce_mean(1.0 / tf.cast(self.rank, tf.float32), name="mrr")
hits_10 = tf.reduce_mean(tf.cast(self.rank <= 10, tf.float32), name="hits_10")
hits_3 = tf.reduce_mean(tf.cast(self.rank <= 3, tf.float32), name="hits_3")
hits_1 = tf.reduce_mean(tf.cast(self.rank <= 1, tf.float32), name="hits_1")
invalid_e2 = tf.reduce_mean(tf.cast(pos_e2 == 0, tf.float32), name="inv_e2")
return mrr, hits_1, hits_3, hits_10
def summaries(self):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("bce_loss", self.ll_loss)
tf.summary.histogram("logits", self.pred)
tf.summary.histogram("rank", self.rank)
tf.summary.histogram("probability", self.likelihood)
tf.summary.histogram("entity weights", self.entity_weights)
tf.summary.histogram("relation weights", self.rel_weights)
tf.summary.histogram("dense weights", self.convE_fc_w)
def _get_optimizer(self):
return self.train_op, self.loss, self.ll_loss
| 9,042 | 45.137755 | 120 | py |
mkbe | mkbe-master/MKBE/test/test_runner.py | import sys, tqdm
from tensorpack import *
from tensorpack.callbacks.inference_runner import _inference_context
class TestRunner(callbacks.InferenceRunner):
feed = {
"InferenceTower/emb_keepprob:0": 1.0,
"InferenceTower/fm_keepprob:0": 1.0,
"InferenceTower/mlp_keepprob:0": 1.0,
"InferenceTower/enc_keepprob:0": 1.0,
"InferenceTower/is_training:0": False,
"InferenceTower/label_smoothing:0": 0.0
}
def _trigger(self):
for inf in self.infs:
inf.before_epoch()
self._input_source.reset_state()
# iterate over the data, and run the hooked session
with _inference_context(), \
tqdm.tqdm(total=self._size, **utils.utils.get_tqdm_kwargs()) as pbar:
num_itr = self._size if self._size > 0 else sys.maxsize
for _ in range(num_itr):
self._hooked_sess.run(fetches=[], feed_dict=self.feed)
pbar.update()
for inf in self.infs:
inf.trigger_epoch() | 1,034 | 33.5 | 85 | py |
mkbe | mkbe-master/MKBE/metrics/metrics.py | import tensorflow as tf
def mrr(higher_values):
pos_index = higher_values + 1
return tf.reduce_mean(1.0/ pos_index)
def hits_n(higher_values, n):
hits_times = tf.cast(higher_values <= (n - 1), tf.float32)
return tf.reduce_mean(hits_times) | 257 | 24.8 | 62 | py |
mkbe | mkbe-master/MKBE/metrics/__init__.py | from .metrics import * | 22 | 22 | 22 | py |
mkbe | mkbe-master/MKBE/train/yago_training.py | from tensorpack import *
class SingleGPUTrainer(train.SimpleTrainer):
def __init__(self, hyperparams):
super(SingleGPUTrainer, self).__init__()
mutable_params = ["emb_keepprob", "fm_keepprob", "mlp_keepprob", "label_smoothing"]
self.feed = dict((k + ":0", hyperparams[k]) for k in mutable_params)
self.feed["is_training:0"] = True
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
with tfutils.tower.TowerContext("", is_training=True):
assert input.setup_done()
get_cost_fn(*input.get_input_tensors())
self.train_op = get_opt_fn()
return []
def run_step(self):
_ = self.hooked_sess.run(self.train_op, feed_dict=self.feed)
| 736 | 35.85 | 91 | py |
mkbe | mkbe-master/MKBE/preprocess/ml100k_preprocess.py | import itertools
import numpy as np
import pandas as pd
# subset can be ["movie_user_rating", "movie_title_rating", "movie_rating", "user_rating", "rating"]
fold = 1
subset = "movie_title_poster_user_rating"
#subset = "movie_title_user_rating"
in_files = {
"user-train": "../code/movielens/ml-100k/u.user",
"movie-train": "../code/movielens/ml-100k/u.item",
"rating-train": "../code/movielens/ml-100k/u{:}.base".format(fold),
"rating-test": "../code/movielens/ml-100k/u{:}.test".format(fold),
"cached-posters": "../code/movielens/ml-100k/feature_maps.npy"
}
out_files = {
"scale": "../assets/ml100k-processed/u{:}-{:}-scale.npy".format(fold, subset),
"train": "../assets/ml100k-processed/u{:}-{:}-train.npy".format(fold, subset),
"test": "../assets/ml100k-processed/u{:}-{:}-test.npy".format(fold, subset),
"idencoders": "../assets/ml100k-processed/u{:}-{:}-idencoder.npy".format(fold, subset),
"titles": "../assets/ml100k-processed/u{:}-{:}-titles.npy".format(fold, subset),
"title_dict": "../assets/ml100k-processed/u{:}-{:}-title-dict.npy".format(fold, subset)
}
user_headers = ["userid", "age", "gender", "occupation", "zip"]
user_r = ["is of_" + h for h in ["age", "gender", "occupation", "zip"]]
movie_headers = ["movieid", "title", "release date", "video release date", "IMDb URL", "unknown", "action", "adventure",
"animation", "childrens", "comedy", "crime", "documentary", "drama", "fantasy", "film-noir", "horror",
"musical", "mystery", "romance", "sci-fi", "thriller", "war", "western"]
movie_r = ["is of_" + h for h in ["title", "release date", "genre", "poster"]]
rating_headers = ["userid", "movieid", "rating", "timestamp"]
def read_and_filter():
userdf = pd.read_csv(in_files["user-train"], engine="c", names=user_headers, sep="|")
moviedf = pd.read_csv(in_files["movie-train"], engine="c", names=movie_headers, sep="|", encoding="latin1")
rating_train = pd.read_csv(in_files["rating-train"], engine="c", names=rating_headers, sep="\t")
rating_test = pd.read_csv(in_files["rating-test"], engine="c", names=rating_headers, sep="\t")
# Normalize user ages
age_scale_params = {
"mean": userdf.mean()["age"],
"std": userdf.std()["age"]
}
userdf["age"] -= age_scale_params["mean"]
userdf["age"] /= age_scale_params["std"]
# Slice first 2 digits of zip codes
userdf["zip"] = userdf["zip"].str.slice(0, 2)
# Normalize movie release dates
moviedf["release date"] = pd.to_datetime(moviedf["release date"]).astype("int64")
date_scale_params = {
"mean": moviedf.mean()["release date"],
"std": moviedf.std()["release date"]
}
moviedf["release date"] -= date_scale_params["mean"]
moviedf["release date"] /= date_scale_params["std"]
# Remove year from movie titles
moviedf["title"] = moviedf["title"].str.replace(r" \([0-9]+\)$", "")
scale_params = {
"age": age_scale_params,
"date": date_scale_params
}
np.save(out_files["scale"], np.array(scale_params))
return userdf, moviedf, rating_train, rating_test, scale_params
def build_dict(userdf, moviedf, rating_train, rating_test):
genders = set(userdf["gender"])
gender2id = dict(zip(genders, range(len(genders))))
occupations = set(userdf["occupation"])
job2id = dict(zip(occupations, range(len(occupations))))
zipcodes = set(userdf["zip"])
zip2id = dict(zip(zipcodes, range(len(zipcodes))))
chars = set(itertools.chain.from_iterable(moviedf["title"].values))
chars.update(["<go>", "<eos>"])
char2id = dict(zip(chars, range(len(chars))))
relations = set("rate_{:}".format(rating) for rating in set(rating_train["rating"]))
relations.update(user_r)
relations.update(movie_r)
rel2id = dict(zip(relations, range(len(relations))))
idenc = {
"gender2id": gender2id,
"job2id": job2id,
"zip2id": zip2id,
"char2id": char2id,
"rel2id": rel2id,
"maxuserid": max(userdf["userid"]),
"maxmovieid": max(moviedf["movieid"])
}
np.save(out_files["idencoders"], np.array(idenc))
return gender2id, job2id, zip2id, char2id, rel2id
def encode(userdf, moviedf, rating_train, rating_test, gender2id, job2id, zip2id, char2id, rel2id):
train_triplets = []
test_triplets = []
title_symlist = []
title_idlist = []
attr2enc = {
"gender": gender2id,
"occupation": job2id,
"zip": zip2id
}
af = "is of_"
# Encode user attributes
if "user" in subset:
for attribute in ["age", "gender", "occupation", "zip"]:
userids = userdf["userid"]
attrs = userdf[attribute]
for e1, e2 in zip(userids, attrs):
encoded_e2 = attr2enc[attribute][e2] if attribute in attr2enc else e2
train_triplets.append((e1, rel2id[af + attribute], encoded_e2))
if "movie" in subset:
movieids = moviedf["movieid"]
if "title" in subset:
# Encode movie titles
titles = moviedf["title"]
for e1, e2 in zip(movieids, titles):
encoded_e2 = [char2id["<go>"]] + [char2id[c] for c in e2] + [char2id["<eos>"]]
train_triplets.append((e1, rel2id[af + "title"], encoded_e2))
title_symlist.append(encoded_e2)
title_idlist.append(e1)
# Encode movie release dates
release_date = moviedf["release date"]
for e1, e2 in zip(movieids, release_date):
train_triplets.append((e1, rel2id[af + "release date"], e2))
# Encode movie genres
genre = moviedf[["unknown", "action", "adventure", "animation", "childrens", "comedy", "crime", "documentary",
"drama", "fantasy", "film-noir", "horror", "musical", "mystery", "romance", "sci-fi", "thriller",
"war", "western"]]
for e1, e2 in zip(movieids, genre.values):
train_triplets.append((e1, rel2id[af + "genre"], e2))
if "poster" in subset:
poster_dict = np.load(in_files["cached-posters"]).item()
for e1, e2 in poster_dict.items():
train_triplets.append((e1, rel2id[af + "poster"], e2))
# Encode training ratings
for e1, e2, r, _ in rating_train.values:
encoded_r = rel2id["rate_{:}".format(r)]
train_triplets.append((e1, encoded_r, e2))
# Encode test ratings
for e1, e2, r, _ in rating_test.values:
encoded_r = rel2id["rate_{:}".format(r)]
test_triplets.append((e1, encoded_r, e2))
training_set = np.array(train_triplets, dtype=tuple)
test_set = np.array(test_triplets, dtype=tuple)
title_set = np.array(title_symlist, dtype=list)
title_dict = dict(zip(title_idlist, title_symlist))
print(len(title_dict))
np.random.shuffle(training_set)
np.random.shuffle(test_set)
np.save(out_files["test"], test_set)
np.save(out_files["train"], training_set)
np.save(out_files["titles"], title_set)
np.save(out_files["title_dict"], title_dict)
if __name__ == "__main__":
userdf, moviedf, rating_train, rating_test, scale_params = read_and_filter()
gender2id, job2id, zip2id, char2id, rel2id = build_dict(userdf, moviedf, rating_train, rating_test)
encode(userdf, moviedf, rating_train, rating_test, gender2id, job2id, zip2id, char2id, rel2id)
| 7,432 | 38.748663 | 122 | py |
mkbe | mkbe-master/MKBE/preprocess/yago_preprocess.py | from collections import defaultdict
import numpy as np
import msgpack, msgpack_numpy, os, lmdb
msgpack_numpy.patch()
in_files = {
"train": "../code/YAGO/data/YAGO3-10/train.txt",
"test": "../code/YAGO/data/YAGO3-10/test.txt",
"valid": "../code/YAGO/data/YAGO3-10/valid.txt",
"numerical": "../code/YAGO/Multi-Model/data/num.txt",
"text": "../code/YAGO/Multi-Model/data/text.txt"
}
out_files = {
"train_S": "../assets/yago-processed/train_s.mdb",
"train_N": "../assets/yago-processed/train_n.mdb",
"train_I": "../assets/yago-processed/train_i.mdb",
"train_D": "../assets/yago-processed/train_d.mdb",
"test": "../assets/yago-processed/test.mdb",
"meta": "../assets/yago-processed/meta.npy"
}
class LMDB_Writer:
def __init__(self, path, write_frequency=1024):
isdir = os.path.isdir(path)
self.lmdb_env = lmdb.open(
path, subdir=isdir, map_size=1099511627776 * 2, readonly=False, meminit=False, map_async=True)
self.txn = self.lmdb_env.begin(write=True)
self.write_f = write_frequency
self.counter = 0
def write_kv(self, k_bytes, v_bytes):
self.txn.put(k_bytes, v_bytes)
self.counter += 1
if self.counter % self.write_f == 0:
self.txn.commit()
self.txn = self.lmdb_env.begin(write=True)
def close(self):
self.txn.commit()
self.lmdb_env.sync()
self.lmdb_env.close()
def __del__(self):
try:
self.txn.commit()
self.lmdb_env.sync()
self.lmdb_env.close()
except:
pass
def parseline(line):
return [s.strip() for s in line.split("\t")]
def read_filter_dict(infiles):
entity2id = {}
relation2id = {}
entity_counter = 0
relation_counter = 0
unreadable_lines = []
# build dictionary for relational triple
for sub in ["train", "valid", "test"]:
with open(infiles[sub], encoding="latin") as file:
for linenum, line in enumerate(file):
triplet = parseline(line)
if len(triplet) != 3:
unreadable_lines.append((linenum, line))
else:
e1, r, e2 = triplet
r_rev = r + "_reverse"
if e1 not in entity2id:
entity_counter += 1
entity2id[e1] = entity_counter
if e2 not in entity2id:
entity_counter += 1
entity2id[e2] = entity_counter
if r not in relation2id:
relation_counter += 1
relation2id[r] = relation_counter
if r_rev not in relation2id:
relation_counter += 1
relation2id[r_rev] = relation_counter
print("Unreadable lines:", len(unreadable_lines))
# Normalize numerical values
num_raw = []
with open(infiles["numerical"], encoding="utf-8") as file:
for line in file:
e1, r, e2 = parseline(line)
num_raw.append(int(e2))
if r not in relation2id:
relation_counter += 1
relation2id[r] = relation_counter
scale_params = {
"mean": np.mean(num_raw),
"std": np.std(num_raw)
}
e2_num = np.array(num_raw)
e2_num = np.subtract(e2_num, scale_params["mean"])
e2_num /= scale_params["std"]
# Encode text
words = set()
with open(infiles["text"], encoding="utf-8") as file:
for line in file:
e1, e2 = parseline(line)
words.update(e2.split())
words.update(["<go>", "<eos>"])
word2id = dict(zip(words, range(len(words))))
relation_counter += 1
relation2id['bio'] = relation_counter
meta = {
"entity2id": entity2id,
"rel2id": relation2id,
"word2id": word2id,
"maxentityid": max(entity2id.values()),
"scale_params": scale_params
}
return meta
def encode_int_kv_tobyte(int_k, record):
byte_k = u"{0:0>10}".format(int_k).encode("utf-8")
byte_v = msgpack.dumps(record)
return byte_k, byte_v
def encode_store_S(infiles, outfiles, meta):
triplets = defaultdict(dict)
writer = LMDB_Writer(outfiles["train_S"])
e2id = meta["entity2id"]
r2id = meta["rel2id"]
with open(infiles["train"], encoding="latin") as file:
counter = 0
for line in file:
splits = parseline(line)
if len(splits) == 3:
counter += 2
raw_e1, raw_r, raw_e2 = parseline(line)
e1, r, e2, r_rev = e2id[raw_e1], r2id[raw_r], e2id[raw_e2], r2id[raw_r + "_reverse"]
if r not in triplets[e1]:
triplets[e1][r] = [e2]
else:
triplets[e1][r].append(e2)
if r_rev not in triplets[e2]:
triplets[e2][r_rev] = [e1]
else:
triplets[e2][r_rev].append(e1)
meta["train_size"] = counter
for e1, record in triplets.items():
byte_k, byte_v = encode_int_kv_tobyte(e1, record)
writer.write_kv(byte_k, byte_v)
writer.close()
return meta
def encode_store_test_S(infiles, outfiles, meta):
triplets = defaultdict(dict)
writer = LMDB_Writer(outfiles["test"])
e2id = meta["entity2id"]
r2id = meta["rel2id"]
with open(infiles["test"], encoding="latin") as file:
counter = 0
for line in file:
counter += 1
raw_e1, raw_r, raw_e2 = parseline(line)
e1, r, e2 = e2id[raw_e1], r2id[raw_r], e2id[raw_e2]
if r not in triplets[e1]:
triplets[e1][r] = [e2]
else:
triplets[e1][r].append(e2)
meta["test_size"] = counter
for e1, record in triplets.items():
byte_k, byte_v = encode_int_kv_tobyte(e1, record)
writer.write_kv(byte_k, byte_v)
writer.close()
return meta
meta = read_filter_dict(in_files)
meta = encode_store_S(in_files, out_files, meta)
meta = encode_store_test_S(in_files, out_files, meta)
np.save(out_files["meta"], meta)
print(meta["train_size"], meta["test_size"])
| 6,281 | 28.632075 | 106 | py |
mkbe | mkbe-master/MKBE/Evaluation/Evaluation.py | import numpy as np
def Mrr(Score_N, Score):
""" calculate MRR for each sample in test dataset """
S_n = Score_N.tolist()
S = Score_N
for i in Score_N:
S_n = Score_N.tolist()
if np.absolute(i - Score) < 0.0001:
Score_N = np.delete(Score_N, S_n.index(i))
MR = np.append(Score_N, Score)
MR = MR.tolist()
MR.sort(reverse=True)
MR = MR.index(Score)
return 1. / (MR + 1), MR + 1
def hits(Score_N, Score, is_print=True):
MRR = 0
hit1 = 0
hit2 = 0
hit3 = 0
for i in range(len(Score)):
mrr, hit = Mrr(Score_N[i], Score[i])
MRR = MRR + mrr
if hit == 1:
hit1 += 1
if hit < 3:
hit2 += 1
if hit < 4:
hit3 += 1
MRR = MRR * (1. / len(Score))
if is_print:
print('MRR:', MRR)
print('hit1:', hit1 * (1. / len(Score)))
print('hit2:', hit2 * (1. / len(Score)))
print('hit3:', hit3 * (1. / len(Score)))
return hit1 * (1. / len(Score))
return hit1 * (1. / len(Score)), [MRR, hit1 * (1. / len(Score)), hit2 * (1. / len(Score)), hit3 * (1. / len(Score))]
if __name__ == "__main__":
Score = np.load('scores/scalar_embedding_positive.npy')
Score_N = np.load('scores/scalar_embedding_negative.npy')
print(len(Score), len(Score_N))
print(Score[0:5], Score_N[0:5])
MRR = 0
hit1 = 0
hit2 = 0
hit3 = 0
for i in range(len(Score)):
mrr, hit = Mrr(Score_N[i], Score[i])
MRR = MRR + mrr
if hit == 1:
hit1 += 1
if hit < 3:
hit2 += 1
if hit < 4:
hit3 += 1
MRR = MRR * (1. / len(Score))
print('MRR:', MRR)
print('hit1:', hit1 * (1. / len(Score)))
print('hit2:', hit2 * (1. / len(Score)))
print('hit3:', hit3 * (1. / len(Score)))
| 1,846 | 25.014085 | 120 | py |
mkbe | mkbe-master/MKBE/Evaluation/__init__.py | from .Evaluation import hits, Mrr | 33 | 33 | 33 | py |
mkbe | mkbe-master/MKBE/tasks/train_yago_kb.py | from input_pipeline.yago_input_pipeline import train_dataflow, test_dataflow, profiling_dataflow, profiling_test_df
from models.yago_convE_kb_model import YAGOConveMultimodel
from train.yago_training import SingleGPUTrainer
from test.test_runner import TestRunner
from tensorpack import *
import numpy as np
import tensorflow as tf
files = {
"train_S": "../assets/yago-processed/train_s.mdb",
"train_N": "../assets/yago-processed/train_n.mdb",
"train_I": "../assets/yago-processed/train_i.mdb",
"train_D": "../assets/yago-processed/train_d.mdb",
"test": "../assets/yago-processed/test.mdb",
"meta": "../assets/yago-processed/meta.npy"
}
meta = np.load(files["meta"]).item(0)
hyperparams = {
"dtype": tf.float32,
"id_dtype": tf.int32,
"emb_dim": 200,
"MLPLayers": 2,
"GRULayers": 2,
"CNNTextLayers": 2,
"CNNTextDilation": 2,
"CNNTextKernel": 4,
"entity_size": meta["maxentityid"] + 1,
"relation_size": len(meta["rel2id"]) + 1,
"word_size": len(meta["word2id"]) + 1,
"normalize_e1": False,
"normalize_relation": False,
"normalize_e2": False,
"test_normalize_e1": False,
"test_normalize_relation": False,
"test_normalize_e2": False,
"regularization_coefficient": 0.0,
"learning_rate": 0.003,
"lr_decay": 0.995,
"label_smoothing": 0.1,
"batch_size": 256,
"bias": False,
"debug": False,
"emb_keepprob": 0.8,
"fm_keepprob": 0.8,
"mlp_keepprob": 0.7,
"enc_keepprob": 0.9
}
utils.logger.set_logger_dir("./logs", action="d")
cbs = [
PeriodicCallback(TensorPrinter(["loss", "lr"]), every_k_steps=1000),
TestRunner(
test_dataflow(files["test"], files["meta"], 32),
[ScalarStats("mrr"), ScalarStats("hits_1"), ScalarStats("hits_3"), ScalarStats("hits_10"),
ScalarStats("label_smoothing"), ScalarStats("inv_e2")])
]
monitors = [
callbacks.ScalarPrinter(),
callbacks.JSONWriter(),
TFEventWriter(logdir="/mnt/data/log", max_queue=5, flush_secs=2)
]
cfg = TrainConfig(
model=YAGOConveMultimodel(hyperparams),
data=train_dataflow(files["train_S"], files["meta"], hyperparams["batch_size"], 300),
max_epoch=200,
steps_per_epoch=meta["train_size"] // hyperparams["batch_size"],
monitors=monitors,
callbacks=cbs
)
trainer = SingleGPUTrainer(hyperparams)
launch_train_with_config(cfg, trainer)
| 2,389 | 28.506173 | 115 | py |
mkbe | mkbe-master/MKBE/tasks/train_ml.py | from models import ml_convE_kb as model
from input_pipeline import negative_sampling as ns
from input_pipeline import dataset_loader as dl
import Evaluation
import tensorflow as tf
import numpy as np
# subset can be ["movie_title_poster_user_rating", "movie_title_user_rating", "movie_title_rating", "movie_rating",
# "user_rating", "rating"]
fold = 1
subset = "movie_title_poster_user_rating"
experiment_name = "best_{:}".format(fold)
files_train = [
"../assets/ml100k-processed/u{:}-{:}-train.npy".format(fold, subset),
"../assets/ml100k-processed/u{:}-{:}-idencoder.npy".format(fold, subset),
"../assets/ml100k-processed/u{:}-{:}-titles.npy".format(fold, subset),
"../code/movielens/ml-100k/feature_maps.npy",
"../assets/ml100k-processed/u{:}-{:}-title-dict.npy".format(fold, subset)
]
files_test = [
"../assets/ml100k-processed/u{:}-{:}-test.npy".format(fold, subset),
"../assets/ml100k-processed/u{:}-{:}-idencoder.npy".format(fold, subset),
"../assets/ml100k-processed/u{:}-{:}-titles.npy".format(fold, subset),
"../code/movielens/ml-100k/feature_maps.npy",
"../assets/ml100k-processed/u{:}-{:}-title-dict.npy".format(fold, subset)
]
config = {
"logdir": "/mnt/data/log"
}
summary_writer = tf.summary.FileWriter(config["logdir"] + "/train", max_queue=2, flush_secs=5)
test_writer = tf.summary.FileWriter(config["logdir"] + "/test", max_queue=2, flush_secs=5)
train_set = dl.Dataset(files_train)
test_set = dl.Dataset(files_test)
hyperparams = {
"subset": subset,
"dtype": tf.float16,
"id_dtype": tf.int32,
"emb_dim": 128,
"MLPLayers": 3,
"GRULayers": 3,
"ksize": 5,
"depth": 136,
"drate": 2,
"user_size": train_set.idencoders["maxuserid"] + 1,
"movie_size": train_set.idencoders["maxmovieid"] + 1,
"relation_size": len(train_set.idencoders["rel2id"]) + 1,
"gender_size": len(train_set.idencoders["gender2id"]) + 1,
"job_size": len(train_set.idencoders["job2id"]) + 1,
"zip_size": len(train_set.idencoders["zip2id"]) + 1,
"char_size": len(train_set.idencoders["char2id"]) + 1,
"normalize_e1": False,
"normalize_relation": True,
"normalize_e2": True,
"test_normalize_e1": False,
"test_normalize_relation": True,
"test_normalize_e2": True,
"regularization_coefficient": 5e-6,
"learning_rate": 0.7,
"learning_rate_reduced": 0.0001,
"margin": 1.5,
"init_scale": 24,
"down_scale": 120,
"cnn_scale": 24,
"max_epoch": 1800,
"batch_size": 512,
"bias": True,
"debug": False,
"activation": None
}
# hyperparams.update(np.load("scores/{:}/best_{:}_hyperparams.npy".format(subset, fold)).item())
# hyperparams["max_epoch"] = 1800
tf.set_random_seed(2048)
model_nodes = model.define_graph(hyperparams, {})
test_nodes = model.test_graph(hyperparams, model_nodes, config=None)
debug_nodes = model.debug_graph(hyperparams, test_nodes, config=None)
saver = tf.train.Saver()
highest_hits1_r, pos_scores_highest, neg_scores_highest = 0, None, None
# Configure Training environment
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
sess_config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
sess_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
print("initialized, on subset {}!".format(subset))
for epoch in range(hyperparams["max_epoch"]):
batch = ns.aggregate_sampled_batch(ns.negative_sampling_aligned(
train_set.next_batch(hyperparams["batch_size"]), None, train_set.idencoders,
train_set.titles, train_set.poster_arr))
# Feed dict for training
feed = dl.build_feed(model_nodes, batch)
[loss, summary, _, _] = sess.run(
[model_nodes["loss_to_show"], model_nodes["training_summary"],
model_nodes["training_op"], model_nodes["rlr_train_op"]
],
feed_dict=feed)
# Write summaries
summary_writer.add_summary(summary, epoch)
if epoch % 480 == 1:
print("loss", loss)
# Due to the limited accuracy of float32, HITS@5_r isn't exactly 100%
metrics = ["MRR_movie", "HITS@10_movie", "HITS@3_movie", "HITS@1_movie", "MRR_r", "HITS@5_r", "HITS@3_r",
"HITS@2_r", "HITS@1_r"]
metrics = ["MRR_r", "HITS@5_r", "HITS@3_r", "HITS@2_r", "HITS@1_r"]
debug_names = ["pos_conv5_4_shape", "neg_conv5_4_shape", "pos_pool5_shape", "neg_pool5_shape",
"pos_poster_vec_shape", "neg_poster_vec_shape"] if hyperparams["debug"] else []
points = []
for offset in range(0, test_set.set_size, 256):
batch = test_set.next_batch_inorder(256, offset)
results = sess.run(
[test_nodes[m] for m in metrics] + [test_nodes["test_summary"]],
feed_dict=dl.build_feed_test(test_nodes, hyperparams, test_set.idencoders, batch)
)
debug_results = sess.run([model_nodes[n] for n in debug_names], feed_dict=feed)
summary_writer.add_summary(results[-1], epoch)
points.append(results[: -1])
mean_m = np.mean(points, axis=0)
print("Test metrics: ", ", ".join("{:}: {:.4f}".format(m, v) for m, v in zip(metrics, mean_m)))
if hyperparams["debug"]:
print("Debug nodes:", ", ".join("{:}: {:}".format(k, v) for k, v in zip(debug_names, debug_results)))
if epoch % (train_set.set_size * 1.8 // hyperparams["batch_size"]) == 2:
print("loss", loss)
metrics = ["MRR_movie", "HITS@10_movie", "HITS@3_movie", "HITS@1_movie", "MRR_r", "HITS@5_r", "HITS@3_r",
"HITS@2_r", "HITS@1_r"]
metrics = ["MRR_r", "HITS@5_r", "HITS@3_r", "HITS@2_r", "HITS@1_r"]
positive_scores = []
negative_scores = []
for offset in range(0, test_set.set_size, 256):
batch = test_set.next_batch_inorder(256, offset)
neg_score, pos_score = sess.run(
[test_nodes["neg_scoring_rating"], test_nodes["pos_scoring"]],
feed_dict=dl.build_feed_test(test_nodes, hyperparams, test_set.idencoders, batch))
positive_scores.append(pos_score)
negative_scores.append(neg_score)
sample_score_arr = np.concatenate(positive_scores)
negative_score_arr = np.concatenate(negative_scores)
hits1_r, eval_num = Evaluation.hits(negative_score_arr, sample_score_arr, False)
print(epoch, " ".join("{:}: {:.4f}".format(name, num)
for name, num in zip(["MRR", "hits1", "hits2", "hits3"], eval_num)))
if hits1_r > highest_hits1_r:
pos_scores_highest = sample_score_arr
neg_scores_highest = negative_score_arr
highest_hits1_r = hits1_r
save_path = saver.save(sess, "../assets/weights/u{:}-convE.ckpt".format(fold)) | 7,193 | 39.189944 | 117 | py |
mkbe | mkbe-master/MKBE/tasks/train_yago.py | import numpy as np
import tensorflow as tf
import input_pipeline.dataset_loader_yago as dl
import models.yago_convE_kb as model
import input_pipeline.negative_sampling_yago as ns
# subset can be ["id", "text_id", "num_id", "image_id", "image_num_id", "image_text_id", "text_num_id", "image_text_num_id"]
subset = "text_id"
experiment_name = "suboptimal"
#test_subset = "hasGender"
files_train = [
"../code/YAGO/Multi-Model/YAGO-processed/train.npy",
"../code/YAGO/Multi-Model/YAGO-processed/idencoder.npy",
"../code/YAGO/Multi-Model/YAGO-processed/texts.npy"
]
files_test = [
"../code/YAGO/Multi-Model/YAGO-processed/test.npy",
"../code/YAGO/Multi-Model/YAGO-processed/idencoder.npy",
"../code/YAGO/Multi-Model/YAGO-processed/texts.npy"
]
config = {
"logdir": "/mnt/data/log"
}
#summary_writer = tf.summary.FileWriter(config["logdir"] + "/train", max_queue=2, flush_secs=5)
#test_writer = tf.summary.FileWriter(config["logdir"] + "/test", max_queue=2, flush_secs=5)
train_set = dl.Dataset(files_train)
test_set = dl.Dataset(files_test)
hyperparams = {
"dtype": tf.float32,
"id_dtype": tf.int32,
"emb_dim": 256,
"MLPLayers": 2,
"GRULayers": 2,
"CNNTextLayers": 2,
"CNNTextDilation": 2,
"CNNTextKernel": 4,
"entity_size": train_set.idencoders["maxentityid"] + 1,
"relation_size": len(train_set.idencoders["rel2id"]) + 1,
"word_size": len(train_set.idencoders["word2id"]) + 1,
"normalize_e1": False,
"normalize_relation": True,
"normalize_e2": True,
"test_normalize_e1": False,
"test_normalize_relation": True,
"test_normalize_e2": True,
"regularization_coefficient": 0.000001,
"learning_rate": 0.03,
"learning_rate_reduced": 7e-5,
"margin": 1.5,
"label_smoothing": 0.1,
"init_scale": 36,
"cnn_scale": 6,
"max_epoch": 10000,
"batch_size": 1024,
"bias": False,
"debug": False,
"emb_keepprob": 0.77,
"fm_keepprob": 0.77,
"mlp_keepprob": 0.6,
"enc_keepprob": 0.9
}
model_nodes = model.define_graph(hyperparams)
test_nodes = model.test_graph(hyperparams, model_nodes, config=None)
debug_nodes = model.debug_graph(hyperparams, test_nodes, config=None)
highest_hits1_r, pos_scores_highest, neg_scores_highest = 0, None, None
# Configure Training environment
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
print("initialized")
for epoch in range(hyperparams["max_epoch"]):
batch = ns.aggregate_sampled_batch(ns.negative_sampling_aligned(
train_set.next_batch(hyperparams["batch_size"]), None, train_set.idencoders, train_set.texts))
test_batch = ns.aggregate_sampled_batch(ns.negative_sampling_aligned(
test_set.next_batch(hyperparams["batch_size"] // 32), None, test_set.idencoders, test_set.texts))
# Feed dict for training
feed = dl.build_feed(model_nodes, batch)
test_feed = dl.build_feed(model_nodes, test_batch)
_, _, loss = sess.run(
[model_nodes["training_op"], model_nodes["rlr_train_op"], model_nodes["loss_to_show"]],
feed_dict=feed)
test_summary = sess.run(
model_nodes["training_summary"], feed_dict=test_feed)
# Write summaries
# summary_writer.add_summary(summary, epoch)
# test_writer.add_summary(test_summary, epoch)
if epoch % 20 == 1:
print("Training Loss:", loss) | 3,586 | 32.212963 | 124 | py |
mkbe | mkbe-master/MKBE/tasks/__init__.py | 1 | 0 | 0 | py |
|
mkbe | mkbe-master/MKBE/tasks/train_ml_gan_img.py | from input_pipeline.ml_img_loader import get_input_pipeline
from tensorpack import *
from tensorpack.dataflow import *
import os
df = get_input_pipeline(64)
test = dataflow.TestDataSpeed(df)
test.start() | 205 | 21.888889 | 59 | py |
mkbe | mkbe-master/MKBE/input_pipeline/yago_input_pipeline.py | from tensorpack import *
from tensorpack.dataflow import *
from input_pipeline.yago_lmdb_loader import LoaderS, TestLoaderDataflow
def sparse_to_dense(datapoint):
e1, r, e2_train, e2_test = datapoint
return e1, r, e2_train.toarray(), e2_test
def train_dataflow(s_file, idenc_file, batch_size, epoch, gpu_list=None):
s_loader = LoaderS(s_file, idenc_file)
df_in = DataFromGenerator(s_loader.gen_batch(batch_size, epoch))
df_multi = MultiProcessPrefetchData(df_in, 96, 10)
df_map = MapData(df_multi, sparse_to_dense)
ins = input_source.QueueInput(df_map)
gpu_ins = input_source.StagingInput(ins, [0] if gpu_list is None else gpu_list)
return gpu_ins
def test_dataflow(s_file, idenc_file, batch_size, gpu_list=None):
df_in = TestLoaderDataflow(s_file, idenc_file)
#df_in = dataflow.DataFromGenerator(s_loader.gen_sample_inorder())
df_batched = dataflow.BatchData(df_in, batch_size, remainder=True)
ins = FeedInput(df_batched, infinite=False)
return ins
def profiling_dataflow(s_file, idenc_file, batch_size, epoch, gpu_list=None):
s_loader = LoaderS(s_file, idenc_file)
df_in = DataFromGenerator(s_loader.gen_batch(batch_size, epoch))
df_multi = MultiProcessPrefetchData(df_in, 96, 10)
df_map = MapData(df_multi, sparse_to_dense)
df_test = TestDataSpeed(df_map)
return df_test
def profiling_test_df(s_file, idenc_file, batch_size, gpu_list=None):
df_in = TestLoaderDataflow(s_file, idenc_file)
#df_in = dataflow.DataFromGenerator(s_loader.gen_sample_inorder())
df_batched = dataflow.BatchData(df_in, batch_size, remainder=True)
df_test = TestDataSpeed(df_batched)
return df_test | 1,686 | 37.340909 | 83 | py |
mkbe | mkbe-master/MKBE/input_pipeline/yago_lmdb_loader.py | import numpy as np
import msgpack, msgpack_numpy, lmdb, os
from scipy import sparse
from tensorpack import *
msgpack_numpy.patch()
def decode_key(byte_k):
return int(str(byte_k, encoding="utf-8"))
def encode_key(int_k):
return u"{0:0>10}".format(int_k).encode("UTF-8")
class LoaderS:
def __init__(self, path, meta_path):
isdir = os.path.isdir(path)
self.lmdb_env = lmdb.open(
path, subdir=isdir, readonly=True, lock=False, readahead=True, map_size=1099511627776 * 2, max_readers=100)
self.txn = self.lmdb_env.begin()
self.meta = np.load(meta_path).item(0)
self.id2entity = dict((v, k) for k, v in self.meta["entity2id"].items())
self.id2rel = dict((v, k) for k, v in self.meta["rel2id"].items())
self.max_entity = max(self.id2entity.keys())
def index_str_by_str(self, k):
byte_k = encode_key(self.meta["entity2id"][k])
with self.txn.cursor() as cur:
record = msgpack.loads(cur.get(byte_k))
return dict((self.id2rel[r], self.id2entity[e2]) for r, e2 in record.items())
def index_int_by_int(self, k):
byte_k = encode_key(k)
with self.txn.cursor() as cur:
record_bytes = cur.get(byte_k)
record = msgpack.loads(record_bytes) if record_bytes is not None else None
return record
def gen_batch(self, batch_size, epoch=3):
for _ in range(self.meta["train_size"] * epoch // batch_size):
triplets = []
ks = np.random.randint(1, self.max_entity + 1, batch_size * 2)
for e1 in ks:
record = self.index_int_by_int(e1)
if record is not None:
for r, e2_l in record.items():
# e2_onehot = np.zeros((1, self.meta["maxentityid"] + 1), dtype=np.float16)
rows = np.array([0] * len(e2_l))
col = np.array(e2_l)
data = np.array([1] * len(e2_l), dtype=np.int8)
e2_onehot = sparse.csr_matrix((data, (rows, col)), shape=(1, self.meta["maxentityid"] + 1))
triplets.append((e1, r, e2_onehot))
batch_idx = np.random.choice(len(triplets), batch_size)
e1 = list(triplets[idx][0] for idx in batch_idx)
r = list(triplets[idx][1] for idx in batch_idx)
e2 = list(triplets[idx][2] for idx in batch_idx)
e2_test = np.zeros((batch_size), dtype=np.int32)
yield np.array(e1, dtype=np.int32), np.array(r, dtype=np.int32), sparse.vstack(e2), e2_test
def gen_sample_inorder(self):
with self.txn.cursor() as cur:
for k, record_byte in cur:
record = msgpack.loads(record_byte)
e1 = decode_key(k)
for r, e2_l in record.items():
for e2 in e2_l:
e2_onehot = np.zeros((self.meta["maxentityid"] + 1,), dtype=np.int8)
e2_onehot[e2] = 1
yield e1, r, e2_onehot, e2
class TestLoaderDataflow(DataFlow, LoaderS):
def __init__(self, path, meta_path):
LoaderS.__init__(self, path, meta_path)
DataFlow.__init__(self)
def reset_state(self):
self.gen = iter(self.gen_sample_inorder())
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
def get_data(self):
self.reset_state()
return self
def size(self):
return self.meta["test_size"] | 3,556 | 35.295918 | 119 | py |
mkbe | mkbe-master/MKBE/input_pipeline/negative_sampling.py | import numpy as np
def negative_sampling_aligned(batch, hyperparams, idenc, titles, poster_arr):
# Negative sampling: randomly choose an entity in the dictionary for categorical data,
# or sample from a normal distribution for real numbers
af = "is of_"
e1, r, e2 = batch
rel2id = idenc["rel2id"]
# Extract age strips
idx_age = np.where(r == rel2id[af + "age"])[0]
pos_age = e1[idx_age].astype(np.int32), r[idx_age].astype(np.int32), e2[idx_age].astype(np.float32)
neg_age = e1[idx_age].astype(np.int32), r[idx_age].astype(np.int32), \
np.random.normal(size=len(idx_age)).astype(np.float32)
# Extract gender strips
idx_gender = np.where(r == rel2id[af + "gender"])[0]
pos_gender = e1[idx_gender].astype(np.int32), r[idx_gender].astype(np.int32), e2[idx_gender].astype(np.int32)
neg_gender = e1[idx_gender].astype(np.int32), r[idx_gender].astype(np.int32), 1 - e2[idx_gender].astype(np.int32)
# Extract occupation
idx_occupation = np.where(r == rel2id[af + "occupation"])[0]
corrupted_e2 = np.random.choice(len(idenc["job2id"]), size=len(idx_occupation))
pos_occupation = e1[idx_occupation].astype(np.int32), r[idx_occupation].astype(np.int32), \
e2[idx_occupation].astype(np.int32)
neg_occupation = e1[idx_occupation].astype(np.int32), r[idx_occupation].astype(np.int32), \
corrupted_e2.astype(np.int32)
# Extract zip
idx_zip = np.where(r == rel2id[af + "zip"])[0]
corrupted_e2 = np.random.choice(len(idenc["zip2id"]), size=len(idx_zip))
pos_zip = e1[idx_zip].astype(np.int32), r[idx_zip].astype(np.int32), e2[idx_zip].astype(np.int32)
neg_zip = e1[idx_zip].astype(np.int32), r[idx_zip].astype(np.int32), corrupted_e2.astype(np.int32)
# Extract title
idx_title = np.where(r == rel2id[af + "title"])[0]
if len(idx_title) > 0:
corrupted_e2 = np.random.choice(titles, size=len(idx_title))
pos_len = np.array([len(line) for line in e2[idx_title]], dtype=np.int32)
neg_len = np.array([len(line) for line in corrupted_e2], dtype=np.int32)
max_pos_len = max(pos_len)
max_neg_len = max(neg_len)
pos_e2 = np.array([line + [0] * (max_pos_len - len(line)) for line in e2[idx_title]], dtype=np.int32)
neg_e2 = np.array([line + [0] * (max_neg_len - len(line)) for line in corrupted_e2], dtype=np.int32)
pos_title = e1[idx_title], r[idx_title], pos_e2, pos_len
neg_title = e1[idx_title], r[idx_title], neg_e2, neg_len
else:
ept_a = np.zeros((0, 20), dtype=np.int32)
ept_b = np.array([], dtype=np.int32)
pos_title = ept_b, ept_b, ept_a, np.zeros((0,), dtype=np.int32)
neg_title = ept_b, ept_b, ept_a, np.zeros((0,), dtype=np.int32)
# Extract Poster
idx_poster = np.where(r == rel2id[af + "poster"])[0]
if len(idx_poster) > 0:
corrupted_e2_idx = np.random.choice(len(poster_arr), size=len(idx_poster))
corrupted_e2 = poster_arr[corrupted_e2_idx, :, :, :]
pos_poster = e1[idx_poster].astype(np.int32), r[idx_poster].astype(np.int32), np.concatenate(e2[idx_poster], axis=0)
neg_poster = e1[idx_poster].astype(np.int32), r[idx_poster].astype(np.int32), corrupted_e2
else:
ept_b = np.array([], dtype=np.int32)
pos_poster = ept_b, ept_b, np.array([[[[]]]], dtype=np.float32).reshape((-1, 16, 16, 512))
neg_poster = ept_b, ept_b, np.array([[[[]]]], dtype=np.float32).reshape((-1, 16, 16, 512))
# Extract release date
idx_date = np.where(r == rel2id[af + "release date"])[0]
pos_date = e1[idx_date].astype(np.int32), r[idx_date].astype(np.int32), e2[idx_date].astype(np.float32)
neg_date = e1[idx_date].astype(np.int32), r[idx_date].astype(np.int32), \
np.random.normal(size=len(idx_date)).astype(np.float32)
# Extract genre
idx_genre = np.where(r == rel2id[af + "genre"])[0]
if len(idx_genre) > 0:
pos_e2 = np.concatenate([np.expand_dims(e2[idx], axis=0) for idx in idx_genre], axis=0).astype(np.float32)
pos_genre = e1[idx_genre].astype(np.int32), r[idx_genre].astype(np.int32), pos_e2
neg_genre = e1[idx_genre].astype(np.int32), r[idx_genre].astype(np.int32), 1 - pos_e2
else:
ept_b = np.array([], dtype=np.int32)
pos_genre = ept_b, ept_b, np.array([[]], dtype=np.float32)
neg_genre = ept_b, ept_b, np.array([[]], dtype=np.float32)
# Extract ratings
pos_rating_list = []
neg_rating_list = []
# Negative sampling for ratings
for rating in range(1, 6):
idx_rating = np.where(r == rel2id["rate_{:}".format(rating)])[0]
corrupted_r = np.array([rel2id["rate_{:}".format(r)] for r in range(1, 6) if r != rating] * len(idx_rating),
dtype=np.int32)
pos_rating = np.tile(e1[idx_rating], 4), np.tile(r[idx_rating], 4), np.tile(e2[idx_rating], 4)
neg_rating = np.tile(e1[idx_rating], 4), corrupted_r, np.tile(e2[idx_rating], 4)
pos_rating_list.append(pos_rating)
neg_rating_list.append(neg_rating)
# Negative sampling for movies
idx_rating = np.where(np.logical_or.reduce([r == rel2id["rate_{:}".format(rating)] for rating in range(1, 6)]))[0]
corrupted_e2 = np.random.choice(idenc["maxmovieid"], size=len(idx_rating))
pos_rating = e1[idx_rating], r[idx_rating], e2[idx_rating]
neg_rating = e1[idx_rating], r[idx_rating], corrupted_e2
pos_rating_list.append(pos_rating)
neg_rating_list.append(neg_rating)
pos_rating = np.concatenate([line[0] for line in pos_rating_list], axis=0).astype(np.int32), \
np.concatenate([line[1] for line in pos_rating_list], axis=0).astype(np.int32), \
np.concatenate([line[2] for line in pos_rating_list], axis=0).astype(np.int32)
neg_rating = np.concatenate([line[0] for line in neg_rating_list], axis=0).astype(np.int32), \
np.concatenate([line[1] for line in neg_rating_list], axis=0).astype(np.int32), \
np.concatenate([line[2] for line in neg_rating_list], axis=0).astype(np.int32)
return pos_age, neg_age, pos_gender, neg_gender, pos_occupation, neg_occupation, pos_zip, neg_zip, pos_title, \
neg_title, pos_date, neg_date, pos_genre, neg_genre, pos_rating, neg_rating, pos_poster, neg_poster
def aggregate_sampled_batch(batch):
pos_age, neg_age, pos_gender, neg_gender, pos_occupation, neg_occupation, pos_zip, neg_zip, pos_title, \
neg_title, pos_date, neg_date, pos_genre, neg_genre, pos_movierating, neg_movierating, pos_poster, \
neg_poster = batch
pos_user_e1 = np.concatenate([batch[idx][0] for idx in range(0, 7, 2)], axis=0).astype(np.int32)
pos_user_r = np.concatenate([batch[idx][1] for idx in range(0, 7, 2)], axis=0).astype(np.int32)
neg_user_e1 = np.concatenate([batch[idx][0] for idx in range(1, 8, 2)], axis=0).astype(np.int32)
neg_user_r = np.concatenate([batch[idx][1] for idx in range(1, 8, 2)], axis=0).astype(np.int32)
pos_movie_e1 = np.concatenate([batch[idx][0] for idx in range(8, 13, 2)], axis=0).astype(np.int32)
pos_movie_r = np.concatenate([batch[idx][1] for idx in range(8, 13, 2)], axis=0).astype(np.int32)
neg_movie_e1 = np.concatenate([batch[idx][0] for idx in range(9, 14, 2)], axis=0).astype(np.int32)
neg_movie_r = np.concatenate([batch[idx][1] for idx in range(9, 14, 2)], axis=0).astype(np.int32)
pos_userrating = pos_movierating[0]
pos_relrating = pos_movierating[1]
pos_ratedmovie = pos_movierating[2]
neg_userrating = neg_movierating[0]
neg_relrating = neg_movierating[1]
neg_ratedmovie = neg_movierating[2]
pos_poster_movie, pos_poster_rel, pos_poster_fm = pos_poster
neg_poster_movie, neg_poster_rel, neg_poster_fm = neg_poster
return {
"pos_user_e1": pos_user_e1,
"pos_user_r": pos_user_r,
"neg_user_e1": neg_user_e1,
"neg_user_r": neg_user_r,
"pos_movie_e1": pos_movie_e1,
"pos_movie_r": pos_movie_r,
"neg_movie_e1": neg_movie_e1,
"neg_movie_r": neg_movie_r,
"pos_age": pos_age[2],
"neg_age": neg_age[2],
"pos_gender": pos_gender[2],
"neg_gender": neg_gender[2],
"pos_occupation": pos_occupation[2],
"neg_occupation": neg_occupation[2],
"pos_zip": pos_zip[2],
"neg_zip": neg_zip[2],
"pos_title": pos_title[2],
"neg_title": neg_title[2],
"pos_title_len": pos_title[3],
"neg_title_len": neg_title[3],
"pos_date": pos_date[2],
"neg_date": neg_date[2],
"pos_genre": pos_genre[2],
"neg_genre": neg_genre[2],
"pos_userrating": pos_userrating,
"neg_userrating": neg_userrating,
"pos_relrating": pos_relrating,
"neg_relrating": neg_relrating,
"pos_movierating": pos_ratedmovie,
"neg_movierating": neg_ratedmovie,
"pos_poster_movie": pos_poster_movie,
"pos_poster_rel": pos_poster_rel,
"pos_poster_fm": pos_poster_fm,
"neg_poster_movie": neg_poster_movie,
"neg_poster_rel": neg_poster_rel,
"neg_poster_fm": neg_poster_fm
}
def build_gan_feed(batch, hyperparams, is_training=True):
e1, r, e2 = batch
pos_len = np.array([len(line) for line in e2], dtype=np.int32)
max_pos_len = max(pos_len)
pos_e2 = np.array([line + [0] * (max_pos_len - len(line)) for line in e2], dtype=np.int32)
pos_title = e1, r, pos_e2, pos_len
return {
"pos_movie_e1": e1,
"pos_movie_r": r,
"pos_title": pos_e2,
"pos_title_len": pos_len,
"emb_keepprob": hyperparams["emb_keepprob"],
"fm_keepprob": hyperparams["fm_keepprob"],
"mlp_keepprob": hyperparams["mlp_keepprob"],
"training_flag": is_training
}
| 9,866 | 46.210526 | 124 | py |
mkbe | mkbe-master/MKBE/input_pipeline/dataset_loader.py | # Relations used: age, gender, occupation, zip, title, release date, genre, rating(1-5)
import numpy as np
class Dataset:
def __init__(self, files, setname="train"):
setfile, encfile, titles, posters, title_dict = files
setarr = np.load(setfile)
self.idencoders = np.load(encfile).reshape((1))[0]
self.titles = np.load(titles)
self.title_dict = np.load(title_dict).item()
self.posters = np.load(posters).item()
self.poster_arr = np.concatenate(list(self.posters.values()), axis=0)
self.users = self.idencoders["maxuserid"]
self.movies = self.idencoders["maxmovieid"]
self.title_keys = list(self.title_dict.keys())
self.e1 = setarr[:, 0]
self.r = setarr[:, 1]
self.e2 = setarr[:, 2]
self.set_size = self.e1.shape[0]
def next_batch(self, batch_size):
idx = np.random.randint(self.set_size, size=batch_size)
return self.e1[idx], self.r[idx], self.e2[idx]
def next_batch_inorder(self, batch_size, offset):
end = offset + batch_size
return self.e1[offset:end], self.r[offset:end], self.e2[offset:end]
def title_triplets(self, batch_size):
e1 = np.random.choice(self.title_keys, batch_size)
r = np.array([self.idencoders["rel2id"]["is of_title"]] * batch_size, dtype=np.int)
e2 = np.array([self.title_dict[n] for n in e1], dtype=list)
def build_feed(nodes, batch):
params = {
"emb_keepprob:0": 0.77,
"fm_keepprob:0": 0.77,
"mlp_keepprob:0": 0.6,
"enc_keepprob:0": 0.9,
"is_training:0": True
}
feeds = dict((nodes[k], batch[k]) for k in batch.keys())
feeds.update(params)
return feeds
def build_feed_test(nodes, hyperparams, idenc, batch):
return {
nodes["rating_relations"]:
np.array([v for k, v in idenc["rel2id"].items() if "rate" in k],
dtype=np.int32),
nodes["pos_user"]: batch[0].astype(np.int32),
nodes["pos_r"]: batch[1].astype(np.int32),
nodes["pos_movie"]: batch[2].astype(np.int32)
}
| 2,118 | 33.177419 | 91 | py |
mkbe | mkbe-master/MKBE/input_pipeline/__init__.py | from .dataset_loader import Dataset
from .negative_sampling import negative_sampling_aligned, aggregate_sampled_batch, build_gan_feed
from .yago_lmdb_loader import LoaderS | 171 | 56.333333 | 97 | py |
mkbe | mkbe-master/MKBE/input_pipeline/ml_img_loader.py | from tensorpack import *
from tensorpack.dataflow import *
import cv2, os
import numpy as np
class FileReader:
def __init__(self, imgdir, weightsdir):
self.imgrt = imgdir + "{:}.jpg"
self.weights = np.load(weightsdir)
def read_arr_byid(self, movieid):
filename = self.imgrt.format(movieid)
img = cv2.imread(filename).astype(np.float32) / 128.0 - 1.0
movie_weights = self.weights[movieid, :]
return img, movie_weights
def gen_imgid(self, source=None):
if source is None:
source = range(1, 1683)
for imgid in source:
filename = self.imgrt.format(imgid)
if os.path.exists(filename):
yield imgid
def get_input_pipeline(batch_size):
reader = FileReader("../assets/images/", "../assets/weights/movie_weights.npy")
df_in = dataflow.DataFromGenerator(reader.gen_imgid)
df = dataflow.RepeatedData(df_in, -1)
df = dataflow.MultiProcessMapDataZMQ(df, 4, reader.read_arr_byid, buffer_size=24, strict=False)
df = dataflow.BatchData(df, batch_size, remainder=True)
return df | 1,120 | 31.028571 | 99 | py |
mkbe | mkbe-master/MKBE/input_pipeline/dataset_loader_yago.py | # Relations used: 0-37, numerical, bio, image
import numpy as np
class Dataset:
def __init__(self, files, setname="train"):
setfile, encfile, texts = files
setarr = np.load(setfile, encoding="latin1")
self.idencoders = np.load(encfile, encoding="latin1").reshape((1))[0]
self.texts = np.load(texts, encoding="latin1")
self.entity = self.idencoders["maxentityid"]
self.e1 = setarr[:, 0]
self.r = setarr[:, 1]
self.e2 = setarr[:, 2]
self.set_size = self.e1.shape[0]
print(self.set_size)
def next_batch(self, batch_size, next_img_set=False):
idx = np.random.randint(self.set_size, size=batch_size)
return self.e1[idx], self.r[idx], self.e2[idx]
def next_batch_inorder(self, batch_size, offset):
end = offset + batch_size
return self.e1[offset:end], self.r[offset:end], self.e2[offset:end]
class AddImg:
def __init__(self, idenc, bsize):
self.fm_files = ["YAGO-processed/feature_maps_{}.npy".format(n) for n in range(4)]
self.fm_dict = {}
self.fm_counter = 0
self.load_new_fm()
self.bsize = bsize // 2
self.names = []
self.set_size = 0
def load_new_fm(self):
self.fm_counter = (self.fm_counter + 1) % len(self.fm_files)
self.fm_dict = np.load(self.fm_files[self.fm_counter]).item()
def build_feed(nodes, batch):
params = {
"emb_keepprob:0": 0.77,
"fm_keepprob:0": 0.77,
"mlp_keepprob:0": 0.6,
"enc_keepprob:0": 0.9
}
feeds = dict((nodes[k], batch[k]) for k in batch.keys())
feeds.update(params)
return feeds
def build_feed_test(nodes, hyperparams, idenc, batch):
return {
# nodes["rating_relations"]:
# np.array([v for k, v in idenc["rel2id"].items() if k <= 37],
# dtype=np.int32),
nodes["pos_e1"]: batch[0].astype(np.int32),
nodes["pos_r"]: batch[1].astype(np.int32),
nodes["pos_e2"]: batch[2].astype(np.int32)
}
| 2,056 | 30.646154 | 90 | py |
mkbe | mkbe-master/MKBE/input_pipeline/negative_sampling_yago.py | import numpy as np
def negative_sampling_aligned(batch, hyperparams, idenc, texts):
# Negative sampling: randomly choose an entity in the dictionary for categorical data,
# or sample from a normal distribution for real numbers
e1, r, e2 = batch
rel2id = idenc["rel2id"]
# Extract num strips
idx_num = np.where(
(r == rel2id["happenedOnDate"]) | (r == rel2id["wasBornOnDate"]) | (r == rel2id["diedOnDate"]) | (
r == rel2id["wasCreatedOnDate"]) | (r == rel2id["wasDestroyedOnDate"]))[0]
pos_num = e1[idx_num].astype(np.int32), r[idx_num].astype(np.int32), e2[idx_num].astype(np.float32)
neg_num = e1[idx_num].astype(np.int32), r[idx_num].astype(np.int32), \
np.random.normal(size=len(idx_num)).astype(np.float32)
# Extract relational triplet
idx_triplet = np.where(r <= 37)[0]
corrupted_e2 = np.random.choice(len(idenc["entity2id"]), size=len(idx_triplet))
pos_triplet = e1[idx_triplet].astype(np.int32), r[idx_triplet].astype(np.int32), \
e2[idx_triplet].astype(np.int32)
neg_triplet = e1[idx_triplet].astype(np.int32), r[idx_triplet].astype(np.int32), \
corrupted_e2.astype(np.int32)
# Extract text
idx_text = np.where(r == rel2id["bio"])[0]
if len(idx_text) > 0:
corrupted_e2 = np.random.choice(texts, size=len(idx_text))
pos_len = np.array([len(line) for line in e2[idx_text]], dtype=np.int32)
neg_len = np.array([len(line) for line in corrupted_e2], dtype=np.int32)
max_pos_len = max(pos_len)
max_neg_len = max(neg_len)
pos_e2 = np.array([line + [0] * (max_pos_len - len(line)) for line in e2[idx_text]], dtype=np.int32)
neg_e2 = np.array([line + [0] * (max_neg_len - len(line)) for line in corrupted_e2], dtype=np.int32)
pos_text = e1[idx_text], r[idx_text], pos_e2, pos_len
neg_text = e1[idx_text], r[idx_text], neg_e2, neg_len
else:
ept_a = np.zeros((0, 20), dtype=np.int32)
ept_b = np.array([], dtype=np.int32)
pos_text = ept_b, ept_b, ept_a, np.zeros((0,), dtype=np.int32)
neg_text = ept_b, ept_b, ept_a, np.zeros((0,), dtype=np.int32)
###### Extract image
return pos_num, neg_num, pos_triplet, neg_triplet, pos_text, neg_text
def aggregate_sampled_batch(batch):
pos_num, neg_num, pos_triplet, neg_triplet, pos_text, neg_text = batch
pos_e1 = np.concatenate([batch[idx][0] for idx in range(0, 5, 2)], axis=0).astype(np.int32)
pos_r = np.concatenate([batch[idx][1] for idx in range(0, 5, 2)], axis=0).astype(np.int32)
pos_e2 = pos_triplet[2]
neg_e1 = np.concatenate([batch[idx][0] for idx in range(1, 6, 2)], axis=0).astype(np.int32)
neg_r = np.concatenate([batch[idx][1] for idx in range(1, 6, 2)], axis=0).astype(np.int32)
neg_e2 = neg_triplet[2]
pos_num = pos_num[2]
neg_num = neg_num[2]
return {
"pos_e1": pos_e1,
"pos_r": pos_r,
"pos_e2": pos_e2,
"neg_e1": neg_e1,
"neg_r": neg_r,
"neg_e2": neg_e2,
"pos_num": pos_num,
"neg_num": neg_num,
"pos_text": pos_text[2],
"neg_text": neg_text[2],
"pos_text_len": pos_text[3],
"neg_text_len": neg_text[3]
}
| 3,263 | 39.296296 | 108 | py |
mkbe | mkbe-master/ImgGAN/inputpipe.py | # coding: utf-8
import tensorflow as tf
"""
def read_parse_preproc(filename_queue):
''' read, parse, and preproc single example. '''
with tf.variable_scope('read_parse_preproc'):
reader = tf.WholeFileReader()
_, image_file = reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
image = tf.reshape(image, [64, 64, 3])
image = tf.cast(image, tf.float32)
image = image / 127.5 - 1.0 # preproc - normalize
return [image]
"""
def read_parse_preproc_s(filename_queue):
''' read, parse, and preproc single example. '''
with tf.variable_scope('read_parse_preproc'):
reader = tf.TFRecordReader()
key, records = reader.read(filename_queue)
# parse records
features = tf.parse_single_example(
records,
features={
"image": tf.FixedLenFeature([], tf.string),
"emb": tf.FixedLenFeature([200], tf.float32)
}
)
image = tf.decode_raw(features["image"], tf.uint8)
image = tf.reshape(image, [64, 64, 3]) # The image_shape must be explicitly specified
image = tf.cast(image, tf.float32)
image = image / 127.5 - 1.0 # preproc - normalize
emb = tf.cast(features["emb"], tf.float32)
return image, emb
def read_parse_preproc(filename_queue):
''' read, parse, and preproc single example. '''
with tf.variable_scope('read_parse_preproc'):
reader = tf.TFRecordReader()
key, records = reader.read(filename_queue)
# parse records
features = tf.parse_single_example(
records,
features={
"image": tf.FixedLenFeature([], tf.string),
"imgid": tf.FixedLenFeature([], tf.int64)
}
)
image = tf.decode_raw(features["image"], tf.uint8)
image = tf.reshape(image, [64, 64, 3]) # The image_shape must be explicitly specified
image = tf.cast(image, tf.float32)
image = image / 127.5 - 1.0 # preproc - normalize
return image, features["imgid"]
# https://www.tensorflow.org/programmers_guide/reading_data
def get_batch(tfrecords_list, batch_size, model, shuffle=False, num_threads=1, min_after_dequeue=None, num_epochs=None):
name = "batch" if not shuffle else "shuffle_batch"
with tf.variable_scope(name):
filename_queue = tf.train.string_input_producer(tfrecords_list, shuffle=shuffle, num_epochs=num_epochs)
data_point = read_parse_preproc(filename_queue) if model == "ECBEGAN" else read_parse_preproc_s(filename_queue)
if min_after_dequeue is None:
min_after_dequeue = batch_size * 10
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch = tf.train.shuffle_batch(data_point, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, num_threads=num_threads,
allow_smaller_final_batch=True)
else:
batch = tf.train.batch(data_point, batch_size, capacity=capacity, num_threads=num_threads,
allow_smaller_final_batch=True)
return batch
def get_batch_join(tfrecords_list, batch_size, model, shuffle=False, num_threads=1, min_after_dequeue=None,
num_epochs=None):
name = "batch_join" if not shuffle else "shuffle_batch_join"
with tf.variable_scope(name):
filename_queue = tf.train.string_input_producer(tfrecords_list, shuffle=shuffle, num_epochs=num_epochs)
example_list = [
(read_parse_preproc(filename_queue) if model == "ECBEGAN" else read_parse_preproc_s(filename_queue)) for _
in range(num_threads)]
if min_after_dequeue is None:
min_after_dequeue = batch_size * 10
capacity = min_after_dequeue + 3 * batch_size
if shuffle:
batch = tf.train.shuffle_batch_join(tensors_list=example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, allow_smaller_final_batch=True)
else:
batch = tf.train.batch_join(example_list, batch_size, capacity=capacity, allow_smaller_final_batch=True)
return batch
# interfaces
def shuffle_batch_join(tfrecords_list, batch_size, num_threads, num_epochs, min_after_dequeue=None, model="ECBEGAN"):
return get_batch_join(tfrecords_list, batch_size, model, shuffle=True, num_threads=num_threads,
num_epochs=num_epochs, min_after_dequeue=min_after_dequeue)
def batch_join(tfrecords_list, batch_size, num_threads, num_epochs, min_after_dequeue=None, model="ECBEGAN"):
return get_batch_join(tfrecords_list, batch_size, model, shuffle=False, num_threads=num_threads,
num_epochs=num_epochs, min_after_dequeue=min_after_dequeue)
def shuffle_batch(tfrecords_list, batch_size, num_threads, num_epochs, min_after_dequeue=None, model="ECBEGAN"):
return get_batch(tfrecords_list, batch_size, model, shuffle=True, num_threads=num_threads,
num_epochs=num_epochs, min_after_dequeue=min_after_dequeue)
def batch(tfrecords_list, batch_size, num_threads, num_epochs, min_after_dequeue=None, model="ECBEGAN"):
return get_batch(tfrecords_list, batch_size, model, shuffle=False, num_threads=num_threads,
num_epochs=num_epochs, min_after_dequeue=min_after_dequeue)
| 5,543 | 42.653543 | 120 | py |
mkbe | mkbe-master/ImgGAN/utils.py | # coding: utf-8
import tensorflow as tf
import tensorflow.contrib.slim as slim
'''https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
Matplotlib chooses Xwindows backend by default. You need to set matplotlib do not use Xwindows backend.
- `matplotlib.use('Agg')`
- Or add to .config/matplotlib/matplotlibrc line backend : Agg.
- Or when connect to server use ssh -X ... command to use Xwindows.
'''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import scipy.misc
import numpy as np
def get_best_gpu():
'''Dependency: pynvml (for gpu memory informations)
return type is integer (gpu_id)
'''
try:
from pynvml import nvmlInit, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex, nvmlDeviceGetName, nvmlDeviceGetMemoryInfo
except Exception as e:
print('[!] {} => Use default GPU settings ...\n'.format(e))
return ''
print('\n===== Check GPU memory =====')
# byte to megabyte
def to_mb(x):
return int(x/1024./1024.)
best_idx = -1
best_free = 0.
nvmlInit()
n_gpu = nvmlDeviceGetCount()
for i in range(n_gpu):
handle = nvmlDeviceGetHandleByIndex(i)
name = nvmlDeviceGetName(handle)
mem = nvmlDeviceGetMemoryInfo(handle)
total = to_mb(mem.total)
free = to_mb(mem.free)
used = to_mb(mem.used)
free_ratio = mem.free / float(mem.total)
print("{} - {}/{} MB (free: {} MB - {:.2%})".format(name, used, total, free, free_ratio))
if free > best_free:
best_free = free
best_idx = i
print('\nSelected GPU is gpu:{}'.format(best_idx))
print('============================\n')
return best_idx
# Iterate the whole dataset and count the numbers
# CelebA contains about 200k examples with 128 tfrecord files and it takes about 1.5s to iterate
def num_examples_from_tfrecords(tfrecords_list):
num_examples = 0
for path in tfrecords_list:
num_examples += sum(1 for _ in tf.python_io.tf_record_iterator(path))
return num_examples
def expected_shape(tensor, expected):
"""batch size N shouldn't be set.
you can use shape of tensor instead of tensor itself.
Usage:
# batch size N is skipped.
expected_shape(tensor, [28, 28, 1])
expected_shape(tensor.shape, [28, 28, 1])
"""
if isinstance(tensor, tf.Tensor):
shape = tensor.shape[1:]
else:
shape = tensor[1:]
shape = list(map(lambda x: x.value, shape))
err_msg = 'wrong shape {} (expected shape is {})'.format(shape, expected)
assert shape == expected, err_msg
# if not shape == expected:
# warnings.warn('wrong shape {} (expected shape is {})'.format(shape, expected))
def plot(samples, shape=(4,4), figratio=0.75):
"""only for square-size samples
wh = sqrt(samples.size)
figratio: small-size = 0.75 (default) / big-size = 1.0
"""
if len(samples) != shape[0]*shape[1]:
print("Error: # of samples = {} but shape is {}".format(len(samples), shape))
return
h_figsize = shape[0] * figratio
w_figsize = shape[1] * figratio
fig = plt.figure(figsize=(w_figsize, h_figsize))
gs = gridspec.GridSpec(shape[0], shape[1])
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample) # checks cmap ...
return fig
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def merge(images, size):
"""merge images - burrowed from @carpedm20.
checklist before/after imsave:
* are images post-processed? for example - denormalization
* is np.squeeze required? maybe for grayscale...
"""
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter must have dimensions: HxW or HxWx3 or HxWx4')
'''Sugar for gradients histograms
# D_train_op = tf.train.AdamOptimizer(learning_rate=self.D_lr, beta1=self.beta1, beta2=self.beta2).\
# minimize(D_loss, var_list=D_vars)
D_opt = tf.train.AdamOptimizer(learning_rate=self.D_lr, beta1=self.beta1, beta2=self.beta2)
D_grads = tf.gradients(D_loss, D_vars)
D_grads_and_vars = list(zip(D_grads, D_vars))
D_train_op = D_opt.apply_gradients(grads_and_vars=D_grads_and_vars)
# G_train_op = tf.train.AdamOptimizer(learning_rate=self.G_lr, beta1=self.beta1, beta2=self.beta2).\
# minimize(G_loss, var_list=G_vars, global_step=global_step)
G_opt = tf.train.AdamOptimizer(learning_rate=self.G_lr, beta1=self.beta1, beta2=self.beta2)
G_grads = tf.gradients(G_loss, G_vars)
G_grads_and_vars = list(zip(G_grads, G_vars))
G_train_op = G_opt.apply_gradients(grads_and_vars=G_grads_and_vars, global_step=global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in D_grads_and_vars:
tf.summary.histogram('D/' + var.name + '/gradient', grad)
for grad, var in G_grads_and_vars:
tf.summary.histogram('G/' + var.name + '/gradient', grad)
'''
| 5,866 | 33.309942 | 127 | py |
mkbe | mkbe-master/ImgGAN/config.py | from models import *
model_zoo = ['EBGAN', 'BEGAN', 'DRAGAN']
def get_model(mtype, name, training):
model = None
if mtype == 'EBGAN':
model = ebgan.EBGAN
elif mtype == 'BEGAN':
model = began.BEGAN
elif mtype == 'CBEGAN':
model = cbegan.BEGAN
elif mtype == 'CBEGANHG':
model = cbeganhg.BEGAN
elif mtype == 'ECBEGAN':
model = ecbegan.BEGAN
else:
assert False, mtype + ' is not in the model zoo'
assert model, mtype + ' is work in progress'
return model(name=name, training=training)
def get_dataset(dataset_name):
yago_64 = "../assets/YAGO_imgs/*.jpg"
yago_tfrecord = "../assets/YAGO_imgs_tfrecord/*.tfrecord"
yago_facecrop = "../assets/yago-facecrop-tfrecord/*.tfrecord"
if dataset_name == 'yago':
path = yago_64
n_examples = 28950
elif dataset_name == 'yago_tfrecord':
path = yago_tfrecord
n_examples = 28950
elif dataset_name == "yago_facecrop":
path = yago_facecrop
n_examples = 21789
else:
raise ValueError('{} is does not supported. dataset must be celeba or lsun.'.format(dataset_name))
return path, n_examples
def pprint_args(FLAGS):
print("\nParameters:")
for attr, value in sorted(vars(FLAGS).items()):
print("{}={}".format(attr.upper(), value))
print("")
| 1,368 | 25.326923 | 106 | py |
mkbe | mkbe-master/ImgGAN/eval.py | #coding: utf-8
import tensorflow as tf
import numpy as np
import utils, cv2
import config, pickle
import os, glob
import scipy.misc
import random
from argparse import ArgumentParser
slim = tf.contrib.slim
def build_parser():
parser = ArgumentParser()
models_str = ' / '.join(config.model_zoo)
parser.add_argument('--model', help=models_str, required=True)
parser.add_argument('--name', help='default: name=model')
parser.add_argument('--dataset', '-D', required=True)
parser.add_argument('--sample', '-N', help='# of samples. It should be a square number. (default: 16)',
default=16, type=int)
parser.add_argument('--rep', default=16)
return parser
def sample_z(shape):
return np.random.normal(size=shape)
def get_all_checkpoints(ckpt_dir, step=None, force=False):
'''
When the learning is interrupted and resumed, all checkpoints can not be fetched with get_checkpoint_state
(The checkpoint state is rewritten from the point of resume).
This function fetch all checkpoints forcely when arguments force=True.
'''
if force:
ckpts = os.listdir(ckpt_dir) # get all fns
ckpts = map(lambda p: os.path.splitext(p)[0], ckpts) # del ext
ckpts = set(ckpts) # unique
ckpts = filter(lambda x: x.split('-')[-1].isdigit(), ckpts) # filter non-ckpt
ckpts = sorted(ckpts, key=lambda x: int(x.split('-')[-1])) # sort
if step is not None:
ckpts = filter(lambda x: x.split('-')[-1] == str(step), ckpts)
ckpts = list(map(lambda x: os.path.join(ckpt_dir, x), ckpts)) # fn => path
else:
ckpts = tf.train.get_checkpoint_state(ckpt_dir).all_model_checkpoint_paths
return ckpts
def load_meta():
token2id = np.load("../assets/yago-weights/token2id.npy").item(0)
entity_emb = np.load("../assets/yago-weights/entity_embedding.npy")
entity_emb /= np.std(entity_emb)
return token2id, entity_emb
class CSampler:
def __init__(self):
self.token2id = np.load("../assets/yago-weights/token2id.npy").item(0)
self.entity_emb = np.load("../assets/yago-weights/entity_embedding.npy")
self.entity_emb /= np.std(self.entity_emb)
self.tokens = list(self.token2id.keys())
with open("../assets/yago-test-imgid.pkl", "rb") as f:
self.testids = pickle.load(f)
def sample_c(self, rep):
c_l = []
ind_l = []
for ind in self.testids:
for rep_ind in range(rep):
c_l.append(self.entity_emb[ind, :])
ind_l.append((ind, rep_ind))
return np.array(c_l), ind_l
class Estimator:
def __init__(self):
self.token2id = np.load("../assets/yago-weights/token2id.npy").item(0)
self.id2token = np.load("../assets/yago-weights/id2token.npy").item(0)
self.entity_emb = np.load("../assets/yago-weights/entity_embedding.npy")
self.entity_emb /= np.std(self.entity_emb)
with open("../assets/yago-test-imgid.pkl", "rb") as f:
self.testids = pickle.load(f)
def sample_c(self, rep):
for ind in self.testids:
yield np.array([self.entity_emb[ind, :]] * rep, dtype=np.float32), ind
def sample_c_ind(self, rep):
for ind in self.testids:
yield np.array([ind] * rep, dtype=np.int64), ind
def dump_step(self, sess, model, step, dir, rep=4, merge_lim=20, modelname="ECBEGAN"):
tf.gfile.MakeDirs(os.path.join(dir, str(step)))
print(modelname)
merge_list = []
generator = self.sample_c_ind(rep) if modelname == "ECBEGAN" else self.sample_c(rep)
for c, ind in generator:
z = sample_z([rep, model.z_dim])
if modelname == "ECBEGAN":
gen_batch = ((sess.run(
model.fake_sample, {model.z: z, model.imgid: c}) + 1.0) / 2.0 * 255.0).astype(np.int32)
else:
gen_batch = ((sess.run(
model.fake_sample, {model.z: z, model.c: c}) + 1.0) / 2.0 * 255.0).astype(np.int32)
if len(merge_list) < merge_lim:
merge_list.append(gen_batch)
for rep_ind in range(rep):
img = gen_batch[rep_ind, ...]
cv2.imwrite(os.path.join(dir, str(step), "{}.{}.png".format(ind, rep_ind)), img[..., ::-1])
merge_blob = np.concatenate(merge_list, 0)
merged_img = utils.merge(merge_blob, size=[merge_lim, rep])
cv2.imwrite(os.path.join(dir, "{}.png".format(step)), merged_img[..., ::-1])
def eval(model, name, dataset, sample_shape=[4,4], load_all_ckpt=True):
if name == None:
name = model.name
dir_name = os.path.join('eval', dataset, name)
if tf.gfile.Exists(dir_name):
tf.gfile.DeleteRecursively(dir_name)
tf.gfile.MakeDirs(dir_name)
sampler = CSampler()
restorer = tf.train.Saver(slim.get_model_variables())
config = tf.ConfigProto()
best_gpu = utils.get_best_gpu()
config.gpu_options.visible_device_list = str(best_gpu)
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
with tf.Session(config=config) as sess:
ckpt_path = os.path.join('checkpoints', dataset, name)
ckpts = get_all_checkpoints(ckpt_path, step=None, force=load_all_ckpt)
size = sample_shape[0] * sample_shape[1]
z_ = sample_z([size, model.z_dim])
c, toks = sampler.sample_c(sample_shape[0], sample_shape[1])
for v in ckpts:
print("Evaluating {} ...".format(v))
restorer.restore(sess, v)
global_step = int(v.split('/')[-1].split('-')[-1])
fake_samples = sess.run(model.fake_sample, {model.z: z_, model.c: c})
# inverse transform: [-1, 1] => [0, 1]
fake_samples = (fake_samples + 1.) / 2.
merged_samples = utils.merge(fake_samples, size=sample_shape)
fn = "{:0>6d}.png".format(global_step)
scipy.misc.imsave(os.path.join(dir_name, fn), merged_samples)
def eval_individual(model, name, dataset, num=100, rep=4, step=35000):
if name == None:
name = model.name
dir_name = os.path.join('eval', dataset, name)
if tf.gfile.Exists(dir_name):
tf.gfile.DeleteRecursively(dir_name)
tf.gfile.MakeDirs(dir_name)
sampler = CSampler()
restorer = tf.train.Saver(slim.get_model_variables())
config = tf.ConfigProto()
best_gpu = utils.get_best_gpu()
config.gpu_options.visible_device_list = str(best_gpu)
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
with tf.Session(config=config) as sess:
ckpt_path = os.path.join('checkpoints', dataset, name)
ckpt = get_all_checkpoints(ckpt_path, step=30000, force=True)[0]
print("Evaluating {} ...".format(ckpt))
restorer.restore(sess, ckpt)
global_step = int(ckpt.split('/')[-1].split('-')[-1])
size = num * rep
z_ = sample_z([size, model.z_dim])
c, inds = sampler.sample_c(rep)
for ind in range(size):
fake_sample = sess.run(model.fake_sample, {model.z: z_[np.newaxis, ind, :], model.c: c[np.newaxis, ind, :]})
img = (fake_sample + 1.0) / 2.0
tokind, rep_ind = inds[ind]
scipy.misc.imsave(os.path.join(dir_name, "{}.{}.png".format(tokind, rep_ind)), img[0, ...])
def eval_dump(model, name, dataset, rep=4, step=97200):
dir_name = os.path.join("eval", dataset, name)
if tf.gfile.Exists(dir_name):
tf.gfile.DeleteRecursively(dir_name)
tf.gfile.MakeDirs(dir_name)
est = Estimator()
restorer = tf.train.Saver(slim.get_model_variables())
config = tf.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
with tf.Session(config=config) as sess:
ckpt_path = os.path.join('checkpoints', dataset, name)
ckpt = get_all_checkpoints(ckpt_path, step=step, force=True)[0]
print("Evaluating {} ...".format(ckpt))
restorer.restore(sess, ckpt)
est.dump_step(sess, model, step, dir_name, rep=rep, merge_lim=20)
'''
You can create a gif movie through imagemagick on the commandline:
$ convert -delay 20 eval/* movie.gif
'''
# def to_gif(dir_name='eval'):
# images = []
# for path in glob.glob(os.path.join(dir_name, '*.png')):
# im = scipy.misc.imread(path)
# images.append(im)
# # make_gif(images, dir_name + '/movie.gif', duration=10, true_image=True)
# imageio.mimsave('movie.gif', images, duration=0.2)
if __name__ == "__main__":
parser = build_parser()
FLAGS = parser.parse_args()
FLAGS.model = FLAGS.model.upper()
FLAGS.dataset = FLAGS.dataset.lower()
if FLAGS.name is None:
FLAGS.name = FLAGS.model.lower()
config.pprint_args(FLAGS)
N = int(FLAGS.sample)
rep = int(FLAGS.rep)
# training=False => build generator only
model = config.get_model(FLAGS.model, FLAGS.name.upper(), training=False)
eval_dump(model, name=FLAGS.name.upper(), dataset=FLAGS.dataset, rep=5)
| 9,114 | 36.204082 | 120 | py |
mkbe | mkbe-master/ImgGAN/convert.py | # coding: utf-8
import tensorflow as tf
import numpy as np
import scipy.misc
import os, cv2
import glob
def _bytes_features(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _int64_features(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def convert_yago(source_dir, target_dir, crop_size, out_size, exts=[''], num_shards=128, tfrecords_prefix=''):
if not tf.gfile.Exists(source_dir):
print('source_dir does not exists')
return
if tfrecords_prefix and not tfrecords_prefix.endswith('-'):
tfrecords_prefix += '-'
if tf.gfile.Exists(target_dir):
print("{} is Already exists".format(target_dir))
return
else:
tf.gfile.MakeDirs(target_dir)
# get meta-data
path_list = []
for ext in exts:
pattern = '*.' + ext if ext != '' else '*'
path = os.path.join(source_dir, pattern)
path_list.extend(glob.glob(path))
# read embeddings
token2id = np.load("../assets/yago-weights/token2id.npy").item(0)
entity_emb = np.load("../assets/yago-weights/entity_embedding.npy")
entity_emb /= np.std(entity_emb)
# shuffle path_list
np.random.shuffle(path_list)
num_files = len(path_list)
num_per_shard = num_files // num_shards # Last shard will have more files
print('# of files: {}'.format(num_files))
print('# of shards: {}'.format(num_shards))
print('# files per shards: {}'.format(num_per_shard))
# convert to tfrecords
shard_idx = 0
writer = None
for i, path in enumerate(path_list):
if i % num_per_shard == 0 and shard_idx < num_shards:
shard_idx += 1
tfrecord_fn = '{}{:0>4d}-of-{:0>4d}.tfrecord'.format(tfrecords_prefix, shard_idx, num_shards)
tfrecord_path = os.path.join(target_dir, tfrecord_fn)
print("Writing {} ...".format(tfrecord_path))
if shard_idx > 1:
writer.close()
writer = tf.python_io.TFRecordWriter(tfrecord_path)
# mode='RGB' read even grayscale image as RGB shape
im = cv2.imread(path)[..., ::-1]
if im.shape != (64, 64, 3):
raise ValueError("Incompatible shape {}".format(str(im.shape)))
# get embedding
name = os.path.splitext(os.path.basename(path))[0].lower()
emb = entity_emb[token2id[name], :]
example = tf.train.Example(features=tf.train.Features(feature={
# "shape": _int64_features(im.shape),
"image": _bytes_features([im.tostring()]),
"emb": _floats_feature(emb),
"imgid": _int64_features([token2id[name]])
}))
writer.write(example.SerializeToString())
writer.close()
def convert(source_dir, target_dir, crop_size, out_size, exts=[''], num_shards=128, tfrecords_prefix=''):
if not tf.gfile.Exists(source_dir):
print('source_dir does not exists')
return
if tfrecords_prefix and not tfrecords_prefix.endswith('-'):
tfrecords_prefix += '-'
if tf.gfile.Exists(target_dir):
print("{} is Already exists".format(target_dir))
return
else:
tf.gfile.MakeDirs(target_dir)
# get meta-data
path_list = []
for ext in exts:
pattern = '*.' + ext if ext != '' else '*'
path = os.path.join(source_dir, pattern)
path_list.extend(glob.glob(path))
# shuffle path_list
np.random.shuffle(path_list)
num_files = len(path_list)
num_per_shard = num_files // num_shards # Last shard will have more files
print('# of files: {}'.format(num_files))
print('# of shards: {}'.format(num_shards))
print('# files per shards: {}'.format(num_per_shard))
# convert to tfrecords
shard_idx = 0
writer = None
for i, path in enumerate(path_list):
if i % num_per_shard == 0 and shard_idx < num_shards:
shard_idx += 1
tfrecord_fn = '{}{:0>4d}-of-{:0>4d}.tfrecord'.format(tfrecords_prefix, shard_idx, num_shards)
tfrecord_path = os.path.join(target_dir, tfrecord_fn)
print("Writing {} ...".format(tfrecord_path))
if shard_idx > 1:
writer.close()
writer = tf.python_io.TFRecordWriter(tfrecord_path)
# mode='RGB' read even grayscale image as RGB shape
im = scipy.misc.imread(path, mode='RGB')
# preproc
try:
im = center_crop(im, crop_size)
except Exception as e:
# print("im_path: {}".format(path))
# print("im_shape: {}".format(im.shape))
print("[Exception] {}".format(e))
continue
im = scipy.misc.imresize(im, out_size)
example = tf.train.Example(features=tf.train.Features(feature={
# "shape": _int64_features(im.shape),
"image": _bytes_features([im.tostring()])
}))
writer.write(example.SerializeToString())
writer.close()
if __name__ == "__main__":
# YAGO
convert_yago('../assets/yago-facecrop', '../assets/yago-facecrop-tfrecord', crop_size=[128, 128], out_size=[128, 128],
exts=['jpg', 'png'], num_shards=128, tfrecords_prefix='yago')
| 5,321 | 32.055901 | 122 | py |
mkbe | mkbe-master/ImgGAN/ops.py | # coding: utf-8
import tensorflow as tf
slim = tf.contrib.slim
def lrelu(inputs, leak=0.2, scope="lrelu"):
"""
https://github.com/tensorflow/tensorflow/issues/4079
"""
with tf.variable_scope(scope):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * inputs + f2 * abs(inputs)
| 324 | 20.666667 | 56 | py |
mkbe | mkbe-master/ImgGAN/train.py | # coding: utf-8
import tensorflow as tf
from tqdm import tqdm
import numpy as np
import inputpipe as ip
import glob, os, sys, random
from argparse import ArgumentParser
import utils, config, pickle, cv2
def build_parser():
parser = ArgumentParser()
parser.add_argument('--num_epochs', default=75, help='default: 40', type=int)
parser.add_argument('--batch_size', default=16, help='default: 16', type=int)
parser.add_argument('--num_threads', default=8, help='# of data read threads (default: 8)', type=int)
models_str = ' / '.join(config.model_zoo)
parser.add_argument('--model', help=models_str, required=True) # DRAGAN, CramerGAN
parser.add_argument('--name', help='default: name=model')
parser.add_argument('--dataset', '-D', help='CelebA / LSUN', required=True)
parser.add_argument('--ckpt_step', default=1000, help='# of steps for saving checkpoint (default: 5000)', type=int)
parser.add_argument('--renew', action='store_true', help='train model from scratch - \
clean saved checkpoints and summaries', default=False)
return parser
def input_pipeline(glob_pattern, batch_size, num_threads, num_epochs, model):
tfrecords_list = glob.glob(glob_pattern)
# num_examples = utils.num_examples_from_tfrecords(tfrecords_list) # takes too long time for lsun
X = ip.shuffle_batch_join(
tfrecords_list, batch_size=batch_size, num_threads=num_threads, num_epochs=num_epochs, model=model)
return X
def sample_z(shape):
return np.random.normal(size=shape)
class Estimator:
def __init__(self):
self.token2id = np.load("../assets/yago-weights/token2id.npy").item(0)
self.id2token = np.load("../assets/yago-weights/id2token.npy").item(0)
self.entity_emb = np.load("../assets/yago-weights/entity_embedding.npy")
self.entity_emb /= np.std(self.entity_emb)
with open("../assets/yago-test-imgid.pkl", "rb") as f:
self.testids = pickle.load(f)
def sample_c(self, rep):
for ind in self.testids:
yield np.array([self.entity_emb[ind, :]] * rep, dtype=np.float32), ind
def sample_c_ind(self, rep):
for ind in self.testids:
yield np.array([ind] * rep, dtype=np.int64), ind
def dump_step(self, sess, model, step, dir, rep=4, merge_lim=20, modelname="ECBEGAN"):
tf.gfile.MakeDirs(os.path.join(dir, str(step)))
merge_list = []
generator = self.sample_c_ind(rep) if modelname == "ECBEGAN" else self.sample_c(rep)
for c, ind in generator:
z = sample_z([rep, model.z_dim])
if modelname == "ECBEGAN":
gen_batch = ((sess.run(
model.fake_sample, {model.z: z, model.imgid: c}) + 1.0) / 2.0 * 255.0).astype(np.int32)
else:
gen_batch = ((sess.run(
model.fake_sample, {model.z: z, model.c: c}) + 1.0) / 2.0 * 255.0).astype(np.int32)
if len(merge_list) < merge_lim:
merge_list.append(gen_batch)
for rep_ind in range(rep):
img = gen_batch[rep_ind, ...]
cv2.imwrite(os.path.join(dir, str(step), "{}.{}.png".format(ind, rep_ind)), img[..., ::-1])
merge_blob = np.concatenate(merge_list, 0)
merged_img = utils.merge(merge_blob, size=[merge_lim, rep])
cv2.imwrite(os.path.join(dir, "{}.png".format(step)), merged_img[..., ::-1])
def train(model, dataset, input_op, num_epochs, batch_size, n_examples, ckpt_step, renew=False):
# n_examples = 202599 # same as util.num_examples_from_tfrecords(glob.glob('./data/celebA_tfrecords/*.tfrecord'))
# 1 epoch = 1583 steps
print("\n# of examples: {}".format(n_examples))
print("steps per epoch: {}\n".format(n_examples//batch_size))
summary_path = os.path.join('./summary/', dataset, model.name)
ckpt_path = os.path.join('./checkpoints', dataset, model.name)
weight_path = os.path.join('./weights', dataset, model.name)
eval_path = os.path.join('./eval_train', dataset, model.name)
est = Estimator()
if renew:
if os.path.exists(summary_path):
tf.gfile.DeleteRecursively(summary_path)
if os.path.exists(ckpt_path):
tf.gfile.DeleteRecursively(ckpt_path)
if not os.path.exists(ckpt_path):
tf.gfile.MakeDirs(ckpt_path)
if not os.path.exists(weight_path):
tf.gfile.MakeDirs(weight_path)
config = tf.ConfigProto()
best_gpu = utils.get_best_gpu()
config.gpu_options.visible_device_list = str(best_gpu) # Works same as CUDA_VISIBLE_DEVICES!
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # for epochs
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# https://github.com/tensorflow/tensorflow/issues/10972
# TensorFlow 1.2 has much bugs for text summary
# make config_summary before define of summary_writer - bypass bug of tensorboard
# It seems that batch_size should have been contained in the model config ...
total_steps = int(np.ceil(n_examples * num_epochs / float(batch_size))) # total global step
config_list = [
('num_epochs', num_epochs),
('total_iteration', total_steps),
('batch_size', batch_size),
('dataset', dataset)
]
model_config_list = [[k, str(w)] for k, w in sorted(model.args.items()) + config_list]
model_config_summary_op = tf.summary.text(model.name + '/config', tf.convert_to_tensor(model_config_list),
collections=[])
model_config_summary = sess.run(model_config_summary_op)
# print to console
print("\n====== Process info =======")
print("argv: {}".format(' '.join(sys.argv)))
print("PID: {}".format(os.getpid()))
print("====== Model configs ======")
for k, v in model_config_list:
print("{}: {}".format(k, v))
print("===========================\n")
summary_writer = tf.summary.FileWriter(summary_path, flush_secs=30, graph=sess.graph)
summary_writer.add_summary(model_config_summary)
pbar = tqdm(total=total_steps, desc='global_step')
saver = tf.train.Saver(max_to_keep=9999) # save all checkpoints
global_step = 0
ckpt = tf.train.get_checkpoint_state(ckpt_path)
if ckpt:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = sess.run(model.global_step)
print('\n[!] Restore from {} ... starting global step is {}\n'.format(ckpt.model_checkpoint_path, global_step))
pbar.update(global_step)
try:
# If training process was resumed from checkpoints, input pipeline cannot detect
# when training should stop. So we need `global_step < total_step` condition.
while not coord.should_stop() and global_step < total_steps:
# model.all_summary_op contains histogram summary and image summary which are heavy op
summary_op = model.summary_op if global_step % 100 == 0 else model.all_summary_op
batch_X, batch_c = sess.run(input_op)
batch_z = sample_z([batch_size, model.z_dim])
if model.name == "ECBEGAN":
_, summary = sess.run(
[model.D_train_op, summary_op], {model.X: batch_X, model.z: batch_z, model.imgid: batch_c})
_, global_step = sess.run(
[model.G_train_op, model.global_step],
{model.X: batch_X, model.z: batch_z, model.imgid: batch_c})
else:
_, summary = sess.run(
[model.D_train_op, summary_op], {model.X: batch_X, model.z: batch_z, model.c: batch_c})
_, global_step = sess.run(
[model.G_train_op, model.global_step],
{model.X: batch_X, model.z: batch_z, model.c: batch_c})
summary_writer.add_summary(summary, global_step=global_step)
if global_step % 10 == 0:
pbar.update(10)
if global_step % ckpt_step == 0:
saver.save(sess, ckpt_path+'/'+model.name, global_step=global_step)
if model.name == "ECBEGAN" and global_step % 600 == 0:
emb_weight = sess.run(model.emb_mat)
np.save(weight_path + "/entity_emb_{}.npy".format(global_step), emb_weight)
if global_step >= 18000:
est.dump_step(sess, model, global_step, eval_path, modelname=model.name)
except tf.errors.OutOfRangeError:
print('\nDone -- epoch limit reached\n')
finally:
coord.request_stop()
coord.join(threads)
summary_writer.close()
pbar.close()
if __name__ == "__main__":
parser = build_parser()
FLAGS = parser.parse_args()
FLAGS.model = FLAGS.model.upper()
FLAGS.dataset = FLAGS.dataset.lower()
if FLAGS.name is None:
FLAGS.name = FLAGS.model.lower()
config.pprint_args(FLAGS)
# get information for dataset
dataset_pattern, n_examples = config.get_dataset(FLAGS.dataset)
# input pipeline
X = input_pipeline(dataset_pattern, batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_threads, num_epochs=FLAGS.num_epochs, model=FLAGS.model)
model = config.get_model(FLAGS.model, FLAGS.name.upper(), training=True)
train(model=model, dataset=FLAGS.dataset, input_op=X, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size,
n_examples=n_examples, ckpt_step=FLAGS.ckpt_step, renew=FLAGS.renew)
| 9,960 | 44.072398 | 123 | py |
mkbe | mkbe-master/ImgGAN/models/cbeganhg.py | # coding: utf-8
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
from utils import expected_shape
import ops
from .basemodel import BaseModel
class BEGAN(BaseModel):
def __init__(self, name, training, D_lr=1e-4, G_lr=1e-4, image_shape=[64, 64, 3], z_dim=64, gamma=0.5, c_dim=200):
self.gamma = gamma
self.c_dim = c_dim
self.decay_step = 600
self.decay_rate = 0.99
self.l1_decay_rate = 0.993
self.beta1 = 0.5
self.lambd_k = 0.001
self.lambd_l1 = 0.1
self.nf = 96
self.lr_lower_bound = 2e-5
super(BEGAN, self).__init__(name=name, training=training, D_lr=D_lr, G_lr=G_lr,
image_shape=image_shape, z_dim=z_dim)
def _build_train_graph(self):
with tf.variable_scope(self.name):
X = tf.placeholder(tf.float32, [None] + self.shape)
z = tf.placeholder(tf.float32, [None, self.z_dim])
c = tf.placeholder(tf.float32, [None, self.c_dim])
global_step = tf.Variable(0, name='global_step', trainable=False)
G = self._generator(z, c)
# Discriminator is not called an energy function in BEGAN. The naming is from EBGAN.
D_real_energy = self.hourglass_discriminator(X, c)
D_fake_energy = self.hourglass_discriminator(G, c, reuse=True)
pixel_energy = tf.reduce_mean(tf.abs(X - G))
L1_c = tf.train.exponential_decay(
self.lambd_l1, global_step, self.decay_step, self.l1_decay_rate, staircase=False)
k = tf.Variable(0., name='k', trainable=False)
with tf.variable_scope('D_loss'):
D_loss = D_real_energy - k * D_fake_energy
with tf.variable_scope('G_loss'):
G_loss = D_fake_energy * (1 - L1_c) + L1_c * pixel_energy
with tf.variable_scope('balance'):
balance = self.gamma*D_real_energy - D_fake_energy
with tf.variable_scope('M'):
M = D_real_energy + tf.abs(balance)
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/D/')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/G/')
D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/D/')
G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/G/')
# The authors suggest decaying learning rate by 0.5 when the convergence mesure stall
# carpedm20 decays by 0.5 per 100000 steps
# Heumi decays by 0.95 per 2000 steps (https://github.com/Heumi/BEGAN-tensorflow/)
D_lr = tf.train.exponential_decay(self.D_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
D_lr = tf.maximum(D_lr, self.lr_lower_bound)
G_lr = tf.train.exponential_decay(self.G_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
G_lr = tf.maximum(G_lr, self.lr_lower_bound)
L1_c = tf.train.exponential_decay(self.lambd_l1, global_step, self.decay_step, self.decay_rate, staircase=False)
with tf.variable_scope('D_train_op'):
with tf.control_dependencies(D_update_ops):
D_train_op = tf.train.AdamOptimizer(learning_rate=D_lr, beta1=self.beta1).\
minimize(D_loss, var_list=D_vars)
with tf.variable_scope('G_train_op'):
with tf.control_dependencies(G_update_ops):
G_train_op = tf.train.AdamOptimizer(learning_rate=G_lr, beta1=self.beta1).\
minimize(G_loss, var_list=G_vars, global_step=global_step)
# It should be ops `define` under control_dependencies
with tf.control_dependencies([D_train_op]): # should be iterable
with tf.variable_scope('update_k'):
update_k = tf.assign(k, tf.clip_by_value(k + self.lambd_k * balance, 0., 1.)) # define
D_train_op = update_k # run op
# summaries
# per-step summary
self.summary_op = tf.summary.merge([
tf.summary.scalar('G_loss', G_loss),
tf.summary.scalar('D_loss', D_loss),
tf.summary.scalar('D_energy/real', D_real_energy),
tf.summary.scalar('D_energy/fake', D_fake_energy * (1 - L1_c)),
tf.summary.scalar("G_pix_l1_loss", pixel_energy * L1_c),
tf.summary.scalar('convergence_measure', M),
tf.summary.scalar('balance', balance),
tf.summary.scalar('k', k),
tf.summary.scalar('D_lr', D_lr),
tf.summary.scalar('G_lr', G_lr)
])
# sparse-step summary
# Generator of BEGAN does not use tanh activation func.
# So the generated sample (fake sample) can exceed the image bound [-1, 1].
fake_sample = tf.clip_by_value(G, -1., 1.)
tf.summary.image('generated', fake_sample, max_outputs=self.FAKE_MAX_OUTPUT)
# tf.summary.histogram('G_hist', G) # for checking out of bound
# histogram all varibles
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
self.all_summary_op = tf.summary.merge_all()
# accesible points
self.X = X
self.z = z
self.c = c
self.D_train_op = D_train_op
self.G_train_op = G_train_op
self.fake_sample = fake_sample
self.global_step = global_step
def _build_gen_graph(self):
'''build computational graph for generation (evaluation)'''
with tf.variable_scope(self.name):
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
self.c = tf.placeholder(tf.float32, [None, self.c_dim])
self.fake_sample = tf.clip_by_value(self._generator(self.z, self.c), -1., 1.)
def _encoder(self, X, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
net = slim.conv2d(X, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf*2, stride=2) # 32x32
net = slim.conv2d(net, nf*2)
net = slim.conv2d(net, nf*2)
net = slim.conv2d(net, nf*3, stride=2) # 16x16
net = slim.conv2d(net, nf*3)
net = slim.conv2d(net, nf*3)
net = slim.conv2d(net, nf*4, stride=2) # 8x8
net = slim.conv2d(net, nf*4)
net = slim.conv2d(net, nf*4)
net = slim.conv2d(net, nf*4)
net = slim.flatten(net)
h = slim.fully_connected(net, nh, activation_fn=None)
return h
def _decoder(self, h, c, reuse=False):
with tf.variable_scope('decoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
zc = tf.concat([h, c], 1)
h0 = slim.fully_connected(zc, 8*8*nf, activation_fn=None) # h0
net = tf.reshape(h0, [-1, 8, 8, nf])
with slim.arg_scope(
[slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_bilinear(net, [16, 16]) # upsampling
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_bilinear(net, [32, 32])
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_bilinear(net, [64, 64])
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, 3, activation_fn=None)
return net
def hourglass_discriminator(self, X, c, reuse=False):
with tf.variable_scope('D', reuse=reuse):
nf = self.nf
nh = self.z_dim
# Encoder
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
conv1 = slim.conv2d(X, nf) # 64x64
conv1 = slim.conv2d(conv1, nf) + conv1
conv1 = slim.conv2d(conv1, nf) + conv1
conv2 = slim.conv2d(conv1, nf*2, stride=2) # 32x32
conv2 = slim.conv2d(conv2, nf*2) + conv2
conv2 = slim.conv2d(conv2, nf*2) + conv2
conv3 = slim.conv2d(conv2, nf*3, stride=2) # 16x16
conv3 = slim.conv2d(conv3, nf*3) + conv3
conv3 = slim.conv2d(conv3, nf*3) + conv3
conv4 = slim.conv2d(conv3, nf*4, stride=2) # 8x8
conv4 = slim.conv2d(conv4, nf*4) + conv4
conv4 = slim.conv2d(conv4, nf*4) + conv4
conv4 = slim.conv2d(conv4, nf*4) + conv4
net = slim.flatten(conv4)
h = slim.fully_connected(net, nh, activation_fn=None)
# Skip connections
"""
with slim.arg_scope([slim.conv2d], kernel_size=[3, 3], padding="SAME", activation_fn=tf.nn.elu):
skip1 = conv1
skip2 = slim.conv2d(conv2, nf)
skip3 = slim.conv2d(conv3, nf)
skip4 = slim.conv2d(conv4, nf)
"""
# Decoder
nf = self.nf
nh = self.z_dim
zc = tf.concat([h, c], 1)
h0 = slim.fully_connected(zc, 8*8*nf, activation_fn=None) # h0
fc = tf.reshape(h0, [-1, 8, 8, nf]) #+ skip4
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
conv5 = slim.conv2d(fc, nf) + fc
conv5 = slim.conv2d(conv5, nf) + conv5
conv6 = tf.image.resize_bilinear(conv5, [16, 16]) #+ skip3 # upsampling
conv6 = slim.conv2d(conv6, nf) + conv6
conv6 = slim.conv2d(conv6, nf) + conv6
conv7 = tf.image.resize_bilinear(conv6, [32, 32]) #+ skip2
conv7 = slim.conv2d(conv7, nf) + conv7
conv7 = slim.conv2d(conv7, nf) + conv7
conv8 = tf.image.resize_bilinear(conv7, [64, 64]) #+ skip1
conv8 = slim.conv2d(conv8, nf) + conv8
conv8 = slim.conv2d(conv8, nf) + conv8
x_recon = slim.conv2d(conv8, 3, activation_fn=None)
energy = tf.abs(X-x_recon) # L1 loss
energy = tf.reduce_mean(energy)
tf.summary.image('AE_decode', x_recon, max_outputs=self.FAKE_MAX_OUTPUT)
tf.summary.image('AE_input', X, max_outputs=self.FAKE_MAX_OUTPUT)
return energy
def _discriminator(self, X, c, reuse=False):
with tf.variable_scope('D', reuse=reuse):
h = self._encoder(X, reuse=reuse)
x_recon = self._decoder(h, c, reuse=reuse)
energy = tf.abs(X-x_recon) # L1 loss
energy = tf.reduce_mean(energy)
return energy
def _generator(self, z, c, reuse=False):
with tf.variable_scope('G', reuse=reuse):
x_fake = self._decoder(z, c, reuse=reuse)
return x_fake
| 11,498 | 41.120879 | 124 | py |
mkbe | mkbe-master/ImgGAN/models/cbegan.py | # coding: utf-8
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
from utils import expected_shape
import ops
from .basemodel import BaseModel
class BEGAN(BaseModel):
def __init__(self, name, training, D_lr=1e-4, G_lr=1e-4, image_shape=[64, 64, 3], z_dim=64, gamma=0.5, c_dim=200):
self.gamma = gamma
self.c_dim = c_dim
self.decay_step = 600
self.decay_rate = 0.99
self.l1_decay_rate = 0.993
self.beta1 = 0.5
self.lambd_k = 0.001
self.lambd_l1 = 0.1
self.nf = 128
self.lr_lower_bound = 2e-5
super(BEGAN, self).__init__(name=name, training=training, D_lr=D_lr, G_lr=G_lr,
image_shape=image_shape, z_dim=z_dim)
def _build_train_graph(self):
with tf.variable_scope(self.name):
X = tf.placeholder(tf.float32, [None] + self.shape)
z = tf.placeholder(tf.float32, [None, self.z_dim])
c = tf.placeholder(tf.float32, [None, self.c_dim])
global_step = tf.Variable(0, name='global_step', trainable=False)
G = self._generator(z, c)
# Discriminator is not called an energy function in BEGAN. The naming is from EBGAN.
D_real_energy = self._discriminator(X, c)
D_fake_energy = self._discriminator(G, c, reuse=True)
pixel_energy = tf.reduce_mean(tf.abs(X - G))
L1_c = tf.train.exponential_decay(
self.lambd_l1, global_step, self.decay_step, self.l1_decay_rate, staircase=False)
k = tf.Variable(0., name='k', trainable=False)
with tf.variable_scope('D_loss'):
D_loss = D_real_energy - k * D_fake_energy
with tf.variable_scope('G_loss'):
G_loss = D_fake_energy * (1 - L1_c) + L1_c * pixel_energy
with tf.variable_scope('balance'):
balance = self.gamma*D_real_energy - D_fake_energy
with tf.variable_scope('M'):
M = D_real_energy + tf.abs(balance)
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/D/')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/G/')
D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/D/')
G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/G/')
# The authors suggest decaying learning rate by 0.5 when the convergence mesure stall
# carpedm20 decays by 0.5 per 100000 steps
# Heumi decays by 0.95 per 2000 steps (https://github.com/Heumi/BEGAN-tensorflow/)
D_lr = tf.train.exponential_decay(self.D_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
D_lr = tf.maximum(D_lr, self.lr_lower_bound)
G_lr = tf.train.exponential_decay(self.G_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
G_lr = tf.maximum(G_lr, self.lr_lower_bound)
L1_c = tf.train.exponential_decay(self.lambd_l1, global_step, self.decay_step, self.decay_rate, staircase=False)
with tf.variable_scope('D_train_op'):
with tf.control_dependencies(D_update_ops):
D_train_op = tf.train.AdamOptimizer(learning_rate=D_lr, beta1=self.beta1).\
minimize(D_loss, var_list=D_vars)
with tf.variable_scope('G_train_op'):
with tf.control_dependencies(G_update_ops):
G_train_op = tf.train.AdamOptimizer(learning_rate=G_lr, beta1=self.beta1).\
minimize(G_loss, var_list=G_vars, global_step=global_step)
# It should be ops `define` under control_dependencies
with tf.control_dependencies([D_train_op]): # should be iterable
with tf.variable_scope('update_k'):
update_k = tf.assign(k, tf.clip_by_value(k + self.lambd_k * balance, 0., 1.)) # define
D_train_op = update_k # run op
# summaries
# per-step summary
self.summary_op = tf.summary.merge([
tf.summary.scalar('G_loss', G_loss),
tf.summary.scalar('D_loss', D_loss),
tf.summary.scalar('D_energy/real', D_real_energy),
tf.summary.scalar('D_energy/fake', D_fake_energy * (1 - L1_c)),
tf.summary.scalar("G_pix_l1_loss", pixel_energy * L1_c),
tf.summary.scalar('convergence_measure', M),
tf.summary.scalar('balance', balance),
tf.summary.scalar('k', k),
tf.summary.scalar('D_lr', D_lr),
tf.summary.scalar('G_lr', G_lr)
])
# sparse-step summary
# Generator of BEGAN does not use tanh activation func.
# So the generated sample (fake sample) can exceed the image bound [-1, 1].
fake_sample = tf.clip_by_value(G, -1., 1.)
tf.summary.image('fake_sample', fake_sample, max_outputs=self.FAKE_MAX_OUTPUT)
# tf.summary.histogram('G_hist', G) # for checking out of bound
# histogram all varibles
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
self.all_summary_op = tf.summary.merge_all()
# accesible points
self.X = X
self.z = z
self.c = c
self.D_train_op = D_train_op
self.G_train_op = G_train_op
self.fake_sample = fake_sample
self.global_step = global_step
def _build_gen_graph(self):
'''build computational graph for generation (evaluation)'''
with tf.variable_scope(self.name):
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
self.c = tf.placeholder(tf.float32, [None, self.c_dim])
self.fake_sample = tf.clip_by_value(self._generator(self.z, self.c), -1., 1.)
def _encoder(self, X, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
net = slim.conv2d(X, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf*2, stride=2) # 32x32
net = slim.conv2d(net, nf*2)
net = slim.conv2d(net, nf*2)
net = slim.conv2d(net, nf*3, stride=2) # 16x16
net = slim.conv2d(net, nf*3)
net = slim.conv2d(net, nf*3)
net = slim.conv2d(net, nf*4, stride=2) # 8x8
net = slim.conv2d(net, nf*4)
net = slim.conv2d(net, nf*4)
net = slim.conv2d(net, nf*4)
net = slim.flatten(net)
h = slim.fully_connected(net, nh, activation_fn=None)
return h
def _decoder(self, h, c, reuse=False):
with tf.variable_scope('decoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
zc = tf.concat([h, c], 1)
h0 = slim.fully_connected(zc, 8*8*nf, activation_fn=None) # h0
net = tf.reshape(h0, [-1, 8, 8, nf])
with slim.arg_scope(
[slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_bilinear(net, [16, 16]) # upsampling
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_bilinear(net, [32, 32])
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_bilinear(net, [64, 64])
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, 3, activation_fn=None)
return net
def _discriminator(self, X, c, reuse=False):
with tf.variable_scope('D', reuse=reuse):
h = self._encoder(X, reuse=reuse)
x_recon = self._decoder(h, c, reuse=reuse)
energy = tf.abs(X-x_recon) # L1 loss
energy = tf.reduce_mean(energy)
return energy
def _generator(self, z, c, reuse=False):
with tf.variable_scope('G', reuse=reuse):
x_fake = self._decoder(z, c, reuse=reuse)
return x_fake
| 8,591 | 41.96 | 124 | py |
mkbe | mkbe-master/ImgGAN/models/ecbegan.py | # coding: utf-8
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
from utils import expected_shape
import ops
from .basemodel import BaseModel
class BEGAN(BaseModel):
def __init__(self, name, training, D_lr=1e-4, G_lr=1e-4, image_shape=[64, 64, 3], z_dim=64, gamma=0.5, c_dim=200):
self.gamma = gamma
self.c_dim = c_dim
self.decay_step = 600
self.decay_rate = 0.99
self.l1_decay_rate = 0.993
self.beta1 = 0.5
self.lambd_k = 0.001
self.lambd_l1 = 0.1
self.nf = 96
self.lr_lower_bound = 2e-5
super(BEGAN, self).__init__(name=name, training=training, D_lr=D_lr, G_lr=G_lr,
image_shape=image_shape, z_dim=z_dim)
def _build_gen_graph(self):
'''build computational graph for generation (evaluation)'''
with tf.variable_scope(self.name):
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
self.imgid = tf.placeholder(tf.int32, [None, ])
c = self.emb_lookup(self.imgid)
self.fake_sample = tf.clip_by_value(self._generator(self.z, c), -1., 1.)
def _build_train_graph(self):
with tf.variable_scope(self.name):
X = tf.placeholder(tf.float32, [None] + self.shape)
z = tf.placeholder(tf.float32, [None, self.z_dim])
imgid = tf.placeholder(tf.int64, [None,])
global_step = tf.Variable(0, name='global_step', trainable=False)
c = self.emb_lookup(imgid)
G = self._generator(z, c)
# Discriminator is not called an energy function in BEGAN. The naming is from EBGAN.
D_real_energy = self._discriminator(X, c)
D_fake_energy = self._discriminator(G, c, reuse=True)
L1_c = tf.maximum(
tf.train.exponential_decay(self.lambd_l1, global_step, self.decay_step,
self.l1_decay_rate, staircase=False), 3e-5)
pixel_energy = tf.reduce_mean(tf.abs(X - G))
k = tf.Variable(0., name='k', trainable=False)
with tf.variable_scope('D_loss'):
D_loss = D_real_energy - k * D_fake_energy
with tf.variable_scope('G_loss'):
G_loss = D_fake_energy * (1 - L1_c) + L1_c * pixel_energy
with tf.variable_scope('balance'):
balance = self.gamma*D_real_energy - D_fake_energy
with tf.variable_scope('M'):
M = D_real_energy + tf.abs(balance)
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/D/')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/G/')
E_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/E/')
D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/D/')
G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/G/')
E_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/E/')
# The authors suggest decaying learning rate by 0.5 when the convergence mesure stall
# carpedm20 decays by 0.5 per 100000 steps
# Heumi decays by 0.95 per 2000 steps (https://github.com/Heumi/BEGAN-tensorflow/)
D_lr = tf.train.exponential_decay(self.D_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
D_lr = tf.maximum(D_lr, self.lr_lower_bound)
G_lr = tf.train.exponential_decay(self.G_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
G_lr = tf.maximum(G_lr, self.lr_lower_bound)
with tf.variable_scope('D_train_op'):
with tf.control_dependencies(D_update_ops + E_update_ops):
D_train_op = tf.train.AdamOptimizer(learning_rate=D_lr, beta1=self.beta1).\
minimize(D_loss, var_list=D_vars + E_vars)
with tf.variable_scope('G_train_op'):
with tf.control_dependencies(G_update_ops):
G_train_op = tf.train.AdamOptimizer(learning_rate=G_lr, beta1=self.beta1).\
minimize(G_loss, var_list=G_vars, global_step=global_step)
# It should be ops `define` under control_dependencies
with tf.control_dependencies([D_train_op]): # should be iterable
with tf.variable_scope('update_k'):
update_k = tf.assign(k, tf.clip_by_value(k + self.lambd_k * balance, 0., 1.)) # define
D_train_op = update_k # run op
# summaries
# per-step summary
self.summary_op = tf.summary.merge([
tf.summary.scalar('G_loss', G_loss),
tf.summary.scalar('D_loss', D_loss),
tf.summary.scalar('D_energy/real', D_real_energy),
tf.summary.scalar('D_energy/fake', D_fake_energy * (1 - L1_c)),
tf.summary.scalar("G_pix_l1_loss", pixel_energy * L1_c),
tf.summary.scalar('convergence_measure', M),
tf.summary.scalar('balance', balance),
tf.summary.scalar('k', k),
tf.summary.scalar('D_lr', D_lr),
tf.summary.scalar('G_lr', G_lr)
])
# sparse-step summary
# Generator of BEGAN does not use tanh activation func.
# So the generated sample (fake sample) can exceed the image bound [-1, 1].
fake_sample = tf.clip_by_value(G, -1., 1.)
tf.summary.image('fake_sample', fake_sample, max_outputs=self.FAKE_MAX_OUTPUT)
# tf.summary.histogram('G_hist', G) # for checking out of bound
# tf.summary.histogram('E_hist', self.emb_mat)
# histogram all varibles
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
self.all_summary_op = tf.summary.merge_all()
# accesible points
self.X = X
self.z = z
self.imgid = imgid
self.D_train_op = D_train_op
self.G_train_op = G_train_op
self.fake_sample = fake_sample
self.global_step = global_step
def emb_lookup(self, imgid):
c = tf.nn.embedding_lookup(self.emb(), imgid)
return c
def emb(self, reuse=tf.AUTO_REUSE):
raw_emb = np.load("../assets/yago-weights/ext_emb.npy")
with tf.variable_scope("E", reuse=reuse):
w = tf.constant(raw_emb, dtype=tf.float32)
self.emb_mat = slim.fully_connected(w, self.c_dim, activation_fn=tf.nn.elu)
return self.emb_mat
def _encoder(self, X, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
conv1 = slim.conv2d(X, nf)
conv1 = slim.conv2d(conv1, nf) + conv1
conv1 = slim.conv2d(conv1, nf) + conv1
conv2 = slim.conv2d(conv1, nf*2, stride=2) # 32x32
conv2 = slim.conv2d(conv2, nf*2) + conv2
conv2 = slim.conv2d(conv2, nf*2) + conv2
conv3 = slim.conv2d(conv2, nf*3, stride=2) # 16x16
conv3 = slim.conv2d(conv3, nf*3) + conv3
conv3 = slim.conv2d(conv3, nf*3) + conv3
conv4 = slim.conv2d(conv3, nf*4, stride=2) # 8x8
conv4 = slim.conv2d(conv4, nf*4) + conv4
conv4 = slim.conv2d(conv4, nf*4) + conv4
conv4 = slim.conv2d(conv4, nf*4) + conv4
conv4 = slim.flatten(conv4)
h = slim.fully_connected(conv4, nh, activation_fn=None)
return h
def _decoder(self, h, c, reuse=False):
with tf.variable_scope('decoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
zc = tf.concat([h, c], 1)
h0 = slim.fully_connected(zc, 8*8*nf, activation_fn=None) # h0
fc = tf.reshape(h0, [-1, 8, 8, nf])
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
conv1 = slim.conv2d(fc, nf) + fc
conv1 = slim.conv2d(conv1, nf) + conv1
conv2 = tf.image.resize_nearest_neighbor(conv1, [16, 16]) # upsampling
conv2 = slim.conv2d(conv2, nf) + conv2
conv2 = slim.conv2d(conv2, nf) + conv2
conv3 = tf.image.resize_nearest_neighbor(conv2, [32, 32])
conv3 = slim.conv2d(conv3, nf) + conv3
conv3 = slim.conv2d(conv3, nf) + conv3
conv4 = tf.image.resize_nearest_neighbor(conv3, [64, 64])
conv4 = slim.conv2d(conv4, nf) + conv4
conv4 = slim.conv2d(conv4, nf) + conv4
dec = slim.conv2d(conv4, 3, activation_fn=None)
return dec
def _discriminator(self, X, c, reuse=False):
with tf.variable_scope('D', reuse=reuse):
h = self._encoder(X, reuse=reuse)
x_recon = self._decoder(h, c, reuse=reuse)
energy = tf.abs(X-x_recon) # L1 loss
energy = tf.reduce_mean(energy)
tf.summary.image('AE_dec', x_recon, max_outputs=self.FAKE_MAX_OUTPUT)
tf.summary.image('AE_inp', X, max_outputs=self.FAKE_MAX_OUTPUT)
return energy
def _generator(self, z, c, reuse=False):
with tf.variable_scope('G', reuse=reuse):
x_fake = self._decoder(z, c, reuse=reuse)
return x_fake
| 9,694 | 42.868778 | 119 | py |
mkbe | mkbe-master/ImgGAN/models/ebgan.py | # coding: utf-8
import tensorflow as tf
slim = tf.contrib.slim
from utils import expected_shape
import ops
from .basemodel import BaseModel
class EBGAN(BaseModel):
def __init__(self, name, training, D_lr=1e-3, G_lr=1e-3, image_shape=[64, 64, 3], z_dim=100,
pt_weight=0.1, margin=20.):
''' The default value of pt_weight and margin is taken from the paper for celebA. '''
self.pt_weight = pt_weight
self.m = margin
self.beta1 = 0.5
super(EBGAN, self).__init__(name=name, training=training, D_lr=D_lr, G_lr=G_lr,
image_shape=image_shape, z_dim=z_dim)
def _build_train_graph(self):
with tf.variable_scope(self.name):
X = tf.placeholder(tf.float32, [None] + self.shape)
z = tf.placeholder(tf.float32, [None, self.z_dim])
global_step = tf.Variable(0, name='global_step', trainable=False)
G = self._generator(z)
D_real_latent, D_real_energy = self._discriminator(X)
D_fake_latent, D_fake_energy = self._discriminator(G, reuse=True)
D_fake_hinge = tf.maximum(0., self.m - D_fake_energy) # hinge_loss
D_loss = D_real_energy + D_fake_hinge
G_loss = D_fake_energy
PT = self.pt_regularizer(D_fake_latent)
pt_loss = self.pt_weight * PT
G_loss += pt_loss
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/D/')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/G/')
D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/D/')
G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/G/')
with tf.control_dependencies(D_update_ops):
D_train_op = tf.train.AdamOptimizer(learning_rate=self.D_lr, beta1=self.beta1).\
minimize(D_loss, var_list=D_vars)
with tf.control_dependencies(G_update_ops):
G_train_op = tf.train.AdamOptimizer(learning_rate=self.G_lr, beta1=self.beta1).\
minimize(G_loss, var_list=G_vars, global_step=global_step)
# summaries
# per-step summary
self.summary_op = tf.summary.merge([
tf.summary.scalar('G_loss', G_loss),
tf.summary.scalar('D_loss', D_loss),
tf.summary.scalar('PT', PT),
tf.summary.scalar('pt_loss', pt_loss),
tf.summary.scalar('D_energy/real', D_real_energy),
tf.summary.scalar('D_energy/fake', D_fake_energy),
tf.summary.scalar('D_fake_hinge', D_fake_hinge)
])
# sparse-step summary
tf.summary.image('fake_sample', G, max_outputs=self.FAKE_MAX_OUTPUT)
self.all_summary_op = tf.summary.merge_all()
# accesible points
self.X = X
self.z = z
self.D_train_op = D_train_op
self.G_train_op = G_train_op
self.fake_sample = G
self.global_step = global_step
def _discriminator(self, X, reuse=False):
with tf.variable_scope('D', reuse=reuse):
net = X
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], kernel_size=[4,4], stride=2, padding='SAME',
activation_fn=ops.lrelu, normalizer_fn=slim.batch_norm, normalizer_params=self.bn_params):
# encoder
net = slim.conv2d(net, 64, normalizer_fn=None) # 32x32
net = slim.conv2d(net, 128) # 16x16
net = slim.conv2d(net, 256) # 8x8
latent = net
expected_shape(latent, [8, 8, 256])
# decoder
net = slim.conv2d_transpose(net, 128) # 16x16
net = slim.conv2d_transpose(net, 64) # 32x32
x_recon = slim.conv2d_transpose(net, 3, activation_fn=None, normalizer_fn=None)
expected_shape(x_recon, [64, 64, 3])
energy = tf.sqrt(tf.reduce_sum(tf.square(X-x_recon), axis=[1,2,3])) # l2-norm error
energy = tf.reduce_mean(energy)
return latent, energy
def _generator(self, z, reuse=False):
with tf.variable_scope('G', reuse=reuse):
net = z
net = slim.fully_connected(net, 4*4*1024, activation_fn=tf.nn.relu)
net = tf.reshape(net, [-1, 4, 4, 1024])
with slim.arg_scope([slim.conv2d_transpose], kernel_size=[4,4], stride=2, padding='SAME',
activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=self.bn_params):
net = slim.conv2d_transpose(net, 512)
expected_shape(net, [8, 8, 512])
net = slim.conv2d_transpose(net, 256)
expected_shape(net, [16, 16, 256])
net = slim.conv2d_transpose(net, 128)
expected_shape(net, [32, 32, 128])
net = slim.conv2d_transpose(net, 3, activation_fn=tf.nn.tanh, normalizer_fn=None)
expected_shape(net, [64, 64, 3])
return net
# lf: latent features
def pt_regularizer(self, lf):
eps = 1e-8 # epsilon for numerical stability
lf = slim.flatten(lf)
# l2_norm = tf.sqrt(tf.reduce_sum(tf.square(lf), axis=1, keep_dims=True))
l2_norm = tf.norm(lf, axis=1, keep_dims=True)
expected_shape(l2_norm, [1])
unit_lf = lf / (l2_norm + eps)
cos_sim = tf.square(tf.matmul(unit_lf, unit_lf, transpose_b=True)) # [N, h_dim] x [h_dim, N] = [N, N]
N = tf.cast(tf.shape(lf)[0], tf.float32) # batch_size
pt_loss = (tf.reduce_sum(cos_sim)-N) / (N*(N-1))
return pt_loss
| 5,804 | 44.351563 | 115 | py |
mkbe | mkbe-master/ImgGAN/models/basemodel.py | # coding: utf-8
'''BaseModel for Generative Adversarial Netowrks.
'''
import tensorflow as tf
slim = tf.contrib.slim
class BaseModel(object):
FAKE_MAX_OUTPUT = 12
def __init__(self, name, training, D_lr, G_lr, image_shape=[64, 64, 3], z_dim=100):
self.name = name
self.shape = image_shape
self.bn_params = {
"decay": 0.99,
"epsilon": 1e-5,
"scale": True,
"is_training": training
}
self.z_dim = z_dim
self.D_lr = D_lr
self.G_lr = G_lr
self.args = vars(self).copy() # dict
if training == True:
self._build_train_graph()
else:
self._build_gen_graph()
def _build_gen_graph(self):
'''build computational graph for generation (evaluation)'''
with tf.variable_scope(self.name):
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
self.fake_sample = tf.clip_by_value(self._generator(self.z), -1., 1.)
def _build_train_graph(self, X):
'''build computational graph for training'''
pass | 1,113 | 25.52381 | 87 | py |
mkbe | mkbe-master/ImgGAN/models/__init__.py | from os.path import dirname, basename, isfile
import glob
def get_all_modules_cwd():
modules = glob.glob(dirname(__file__)+"/*.py")
return [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
__all__ = get_all_modules_cwd() | 265 | 25.6 | 93 | py |
mkbe | mkbe-master/ImgGAN/models/began.py | # coding: utf-8
import tensorflow as tf
slim = tf.contrib.slim
from utils import expected_shape
import ops
from .basemodel import BaseModel
class BEGAN(BaseModel):
def __init__(self, name, training, D_lr=1e-4, G_lr=1e-4, image_shape=[64, 64, 3], z_dim=64, gamma=0.5):
self.gamma = gamma
self.decay_step = 600
self.decay_rate = 0.985
self.beta1 = 0.5
self.lambd_k = 0.005
self.nf = 128
self.lr_lower_bound = 2e-5
super(BEGAN, self).__init__(name=name, training=training, D_lr=D_lr, G_lr=G_lr,
image_shape=image_shape, z_dim=z_dim)
def _build_train_graph(self):
with tf.variable_scope(self.name):
X = tf.placeholder(tf.float32, [None] + self.shape)
z = tf.placeholder(tf.float32, [None, self.z_dim])
global_step = tf.Variable(0, name='global_step', trainable=False)
G = self._generator(z)
# Discriminator is not called an energy function in BEGAN. The naming is from EBGAN.
D_real_energy = self._discriminator(X)
D_fake_energy = self._discriminator(G, reuse=True)
k = tf.Variable(0., name='k', trainable=False)
with tf.variable_scope('D_loss'):
D_loss = D_real_energy - k * D_fake_energy
with tf.variable_scope('G_loss'):
G_loss = D_fake_energy
with tf.variable_scope('balance'):
balance = self.gamma*D_real_energy - D_fake_energy
with tf.variable_scope('M'):
M = D_real_energy + tf.abs(balance)
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/D/')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+'/G/')
D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/D/')
G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.name+'/G/')
# The authors suggest decaying learning rate by 0.5 when the convergence mesure stall
# carpedm20 decays by 0.5 per 100000 steps
# Heumi decays by 0.95 per 2000 steps (https://github.com/Heumi/BEGAN-tensorflow/)
D_lr = tf.train.exponential_decay(self.D_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
D_lr = tf.maximum(D_lr, self.lr_lower_bound)
G_lr = tf.train.exponential_decay(self.G_lr, global_step, self.decay_step, self.decay_rate, staircase=True)
G_lr = tf.maximum(G_lr, self.lr_lower_bound)
with tf.variable_scope('D_train_op'):
with tf.control_dependencies(D_update_ops):
D_train_op = tf.train.AdamOptimizer(learning_rate=D_lr, beta1=self.beta1).\
minimize(D_loss, var_list=D_vars)
with tf.variable_scope('G_train_op'):
with tf.control_dependencies(G_update_ops):
G_train_op = tf.train.AdamOptimizer(learning_rate=G_lr, beta1=self.beta1).\
minimize(G_loss, var_list=G_vars, global_step=global_step)
# It should be ops `define` under control_dependencies
with tf.control_dependencies([D_train_op]): # should be iterable
with tf.variable_scope('update_k'):
update_k = tf.assign(k, tf.clip_by_value(k + self.lambd_k * balance, 0., 1.)) # define
D_train_op = update_k # run op
# summaries
# per-step summary
self.summary_op = tf.summary.merge([
tf.summary.scalar('G_loss', G_loss),
tf.summary.scalar('D_loss', D_loss),
tf.summary.scalar('D_energy/real', D_real_energy),
tf.summary.scalar('D_energy/fake', D_fake_energy),
tf.summary.scalar('convergence_measure', M),
tf.summary.scalar('balance', balance),
tf.summary.scalar('k', k),
tf.summary.scalar('D_lr', D_lr),
tf.summary.scalar('G_lr', G_lr)
])
# sparse-step summary
# Generator of BEGAN does not use tanh activation func.
# So the generated sample (fake sample) can exceed the image bound [-1, 1].
fake_sample = tf.clip_by_value(G, -1., 1.)
tf.summary.image('fake_sample', fake_sample, max_outputs=self.FAKE_MAX_OUTPUT)
tf.summary.histogram('G_hist', G) # for checking out of bound
# histogram all varibles
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
self.all_summary_op = tf.summary.merge_all()
# accesible points
self.X = X
self.z = z
self.D_train_op = D_train_op
self.G_train_op = G_train_op
self.fake_sample = fake_sample
self.global_step = global_step
def _encoder(self, X, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
net = slim.conv2d(X, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf*2, stride=2) # 32x32
net = slim.conv2d(net, nf*2)
net = slim.conv2d(net, nf*2)
net = slim.conv2d(net, nf*3, stride=2) # 16x16
net = slim.conv2d(net, nf*3)
net = slim.conv2d(net, nf*3)
net = slim.conv2d(net, nf*4, stride=2) # 8x8
net = slim.conv2d(net, nf*4)
net = slim.conv2d(net, nf*4)
net = slim.conv2d(net, nf*4)
net = slim.flatten(net)
h = slim.fully_connected(net, nh, activation_fn=None)
return h
def _decoder(self, h, reuse=False):
with tf.variable_scope('decoder', reuse=reuse):
nf = self.nf
nh = self.z_dim
h0 = slim.fully_connected(h, 8*8*nf, activation_fn=None) # h0
net = tf.reshape(h0, [-1, 8, 8, nf])
with slim.arg_scope([slim.conv2d], kernel_size=[3,3], padding='SAME', activation_fn=tf.nn.elu):
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_nearest_neighbor(net, [16, 16]) # upsampling
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_nearest_neighbor(net, [32, 32])
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = tf.image.resize_nearest_neighbor(net, [64, 64])
net = slim.conv2d(net, nf)
net = slim.conv2d(net, nf)
net = slim.conv2d(net, 3, activation_fn=None)
return net
def _discriminator(self, X, reuse=False):
with tf.variable_scope('D', reuse=reuse):
h = self._encoder(X, reuse=reuse)
x_recon = self._decoder(h, reuse=reuse)
energy = tf.abs(X-x_recon) # L1 loss
energy = tf.reduce_mean(energy)
return energy
def _generator(self, z, reuse=False):
with tf.variable_scope('G', reuse=reuse):
x_fake = self._decoder(z, reuse=reuse)
return x_fake
| 7,504 | 41.40113 | 119 | py |
UString | UString-master/main.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import os, time
import argparse
import shutil
from torch.utils.data import DataLoader
from src.Models import UString
from src.eval_tools import evaluation, print_results, vis_results
import ipdb
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
from tqdm import tqdm
from sklearn.metrics import average_precision_score
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
ROOT_PATH = os.path.dirname(__file__)
def average_losses(losses_all):
total_loss, cross_entropy, log_posterior, log_prior, aux_loss, rank_loss = 0, 0, 0, 0, 0, 0
losses_mean = {}
for losses in losses_all:
total_loss += losses['total_loss']
cross_entropy += losses['cross_entropy']
log_posterior += losses['log_posterior']
log_prior += losses['log_prior']
aux_loss += losses['auxloss']
rank_loss += losses['ranking']
losses_mean['total_loss'] = total_loss / len(losses_all)
losses_mean['cross_entropy'] = cross_entropy / len(losses_all)
losses_mean['log_posterior'] = log_posterior / len(losses_all)
losses_mean['log_prior'] = log_prior / len(losses_all)
losses_mean['auxloss'] = aux_loss / len(losses_all)
losses_mean['ranking'] = rank_loss / len(losses_all)
return losses_mean
def test_all(testdata_loader, model):
all_pred = []
all_labels = []
all_toas = []
losses_all = []
with torch.no_grad():
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas) in enumerate(testdata_loader):
# run forward inference
losses, all_outputs, hiddens = model(batch_xs, batch_ys, batch_toas, graph_edges,
hidden_in=None, edge_weights=edge_weights, npass=10, nbatch=len(testdata_loader), testing=False)
# make total loss
losses['total_loss'] = p.loss_alpha * (losses['log_posterior'] - losses['log_prior']) + losses['cross_entropy']
losses['total_loss'] += p.loss_beta * losses['auxloss']
losses['total_loss'] += p.loss_yita * losses['ranking']
losses_all.append(losses)
num_frames = batch_xs.size()[1]
batch_size = batch_xs.size()[0]
pred_frames = np.zeros((batch_size, num_frames), dtype=np.float32)
# run inference
for t in range(num_frames):
pred = all_outputs[t]['pred_mean']
pred = pred.cpu().numpy() if pred.is_cuda else pred.detach().numpy()
pred_frames[:, t] = np.exp(pred[:, 1]) / np.sum(np.exp(pred), axis=1)
# gather results and ground truth
all_pred.append(pred_frames)
label_onehot = batch_ys.cpu().numpy()
label = np.reshape(label_onehot[:, 1], [batch_size,])
all_labels.append(label)
toas = np.squeeze(batch_toas.cpu().numpy()).astype(np.int)
all_toas.append(toas)
all_pred = np.vstack((np.vstack(all_pred[:-1]), all_pred[-1]))
all_labels = np.hstack((np.hstack(all_labels[:-1]), all_labels[-1]))
all_toas = np.hstack((np.hstack(all_toas[:-1]), all_toas[-1]))
return all_pred, all_labels, all_toas, losses_all
def test_all_vis(testdata_loader, model, vis=True, multiGPU=False, device=torch.device('cuda')):
if multiGPU:
model = torch.nn.DataParallel(model)
model = model.to(device=device)
model.eval()
all_pred = []
all_labels = []
all_toas = []
vis_data = []
all_uncertains = []
with torch.no_grad():
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas, detections, video_ids) in tqdm(enumerate(testdata_loader), desc="batch progress", total=len(testdata_loader)):
# run forward inference
losses, all_outputs, hiddens = model(batch_xs, batch_ys, batch_toas, graph_edges,
hidden_in=None, edge_weights=edge_weights, npass=10, nbatch=len(testdata_loader), testing=False, eval_uncertain=True)
num_frames = batch_xs.size()[1]
batch_size = batch_xs.size()[0]
pred_frames = np.zeros((batch_size, num_frames), dtype=np.float32)
pred_uncertains = np.zeros((batch_size, num_frames, 2), dtype=np.float32)
# run inference
for t in range(num_frames):
# prediction
pred = all_outputs[t]['pred_mean'] # B x 2
pred = pred.cpu().numpy() if pred.is_cuda else pred.detach().numpy()
pred_frames[:, t] = np.exp(pred[:, 1]) / np.sum(np.exp(pred), axis=1)
# uncertainties
aleatoric = all_outputs[t]['aleatoric'] # B x 2 x 2
aleatoric = aleatoric.cpu().numpy() if aleatoric.is_cuda else aleatoric.detach().numpy()
epistemic = all_outputs[t]['epistemic'] # B x 2 x 2
epistemic = epistemic.cpu().numpy() if epistemic.is_cuda else epistemic.detach().numpy()
pred_uncertains[:, t, 0] = aleatoric[:, 0, 0] + aleatoric[:, 1, 1]
pred_uncertains[:, t, 1] = epistemic[:, 0, 0] + epistemic[:, 1, 1]
# gather results and ground truth
all_pred.append(pred_frames)
label_onehot = batch_ys.cpu().numpy()
label = np.reshape(label_onehot[:, 1], [batch_size,])
all_labels.append(label)
toas = np.squeeze(batch_toas.cpu().numpy()).astype(np.int)
all_toas.append(toas)
all_uncertains.append(pred_uncertains)
if vis:
# gather data for visualization
vis_data.append({'pred_frames': pred_frames, 'label': label, 'pred_uncertain': pred_uncertains,
'toa': toas, 'detections': detections, 'video_ids': video_ids})
all_pred = np.vstack((np.vstack(all_pred[:-1]), all_pred[-1]))
all_labels = np.hstack((np.hstack(all_labels[:-1]), all_labels[-1]))
all_toas = np.hstack((np.hstack(all_toas[:-1]), all_toas[-1]))
all_uncertains = np.vstack((np.vstack(all_uncertains[:-1]), all_uncertains[-1]))
return all_pred, all_labels, all_toas, all_uncertains, vis_data
def write_scalars(logger, cur_epoch, cur_iter, losses, lr):
# fetch results
total_loss = losses['total_loss'].mean().item()
cross_entropy = losses['cross_entropy'].mean()
log_prior = losses['log_prior'].mean().item()
log_posterior = losses['log_posterior'].mean().item()
aux_loss = losses['auxloss'].mean().item()
rank_loss = losses['ranking'].mean().item()
# print info
print('----------------------------------')
print('epoch: %d, iter: %d' % (cur_epoch, cur_iter))
print('total loss = %.6f' % (total_loss))
print('cross_entropy = %.6f' % (cross_entropy))
print('log_posterior = %.6f' % (log_posterior))
print('log_prior = %.6f' % (log_prior))
print('aux_loss = %.6f' % (aux_loss))
print('rank_loss = %.6f' % (rank_loss))
# write to tensorboard
logger.add_scalars("train/losses/total_loss", {'total_loss': total_loss}, cur_iter)
logger.add_scalars("train/losses/cross_entropy", {'cross_entropy': cross_entropy}, cur_iter)
logger.add_scalars("train/losses/log_posterior", {'log_posterior': log_posterior}, cur_iter)
logger.add_scalars("train/losses/log_prior", {'log_prior': log_prior}, cur_iter)
logger.add_scalars("train/losses/complexity_cost", {'complexity_cost': log_posterior-log_prior}, cur_iter)
logger.add_scalars("train/losses/aux_loss", {'aux_loss': aux_loss}, cur_iter)
logger.add_scalars("train/losses/rank_loss", {'rank_loss': rank_loss}, cur_iter)
# write learning rate
logger.add_scalars("train/learning_rate/lr", {'lr': lr}, cur_iter)
def write_test_scalars(logger, cur_epoch, cur_iter, losses, metrics):
# fetch results
total_loss = losses['total_loss'].mean().item()
cross_entropy = losses['cross_entropy'].mean()
# write to tensorboard
loss_info = {'total_loss': total_loss, 'cross_entropy': cross_entropy}
aux_loss = losses['auxloss'].mean().item()
loss_info.update({'aux_loss': aux_loss})
logger.add_scalars("test/losses/total_loss", loss_info, cur_iter)
logger.add_scalars("test/accuracy/AP", {'AP': metrics['AP']}, cur_iter)
logger.add_scalars("test/accuracy/time-to-accident", {'mTTA': metrics['mTTA'],
'TTA_R80': metrics['TTA_R80']}, cur_iter)
def write_weight_histograms(writer, net, epoch):
writer.add_histogram('histogram/w1_mu', net.predictor.l1.weight_mu, epoch)
writer.add_histogram('histogram/w1_rho', net.predictor.l1.weight_rho, epoch)
writer.add_histogram('histogram/w2_mu', net.predictor.l2.weight_mu, epoch)
writer.add_histogram('histogram/w2_rho', net.predictor.l2.weight_rho, epoch)
writer.add_histogram('histogram/b1_mu', net.predictor.l1.bias_mu, epoch)
writer.add_histogram('histogram/b1_rho', net.predictor.l1.bias_rho, epoch)
writer.add_histogram('histogram/b2_mu', net.predictor.l2.bias_mu, epoch)
writer.add_histogram('histogram/b2_rho', net.predictor.l2.bias_rho, epoch)
def load_checkpoint(model, optimizer=None, filename='checkpoint.pth.tar', isTraining=True):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
if isTraining:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch
def train_eval():
### --- CONFIG PATH ---
data_path = os.path.join(ROOT_PATH, p.data_path, p.dataset)
# model snapshots
model_dir = os.path.join(p.output_dir, p.dataset, 'snapshot')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# tensorboard logging
logs_dir = os.path.join(p.output_dir, p.dataset, 'logs')
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
logger = SummaryWriter(logs_dir)
# gpu options
gpu_ids = [int(id) for id in p.gpus.split(',')]
print("Using GPU devices: ", gpu_ids)
os.environ['CUDA_VISIBLE_DEVICES'] = p.gpus
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# create data loader
if p.dataset == 'dad':
from src.DataLoader import DADDataset
train_data = DADDataset(data_path, p.feature_name, 'training', toTensor=True, device=device)
test_data = DADDataset(data_path, p.feature_name, 'testing', toTensor=True, device=device)
elif p.dataset == 'a3d':
from src.DataLoader import A3DDataset
train_data = A3DDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = A3DDataset(data_path, p.feature_name, 'test', toTensor=True, device=device)
elif p.dataset == 'crash':
from src.DataLoader import CrashDataset
train_data = CrashDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = CrashDataset(data_path, p.feature_name, 'test', toTensor=True, device=device)
else:
raise NotImplementedError
traindata_loader = DataLoader(dataset=train_data, batch_size=p.batch_size, shuffle=True, drop_last=True)
testdata_loader = DataLoader(dataset=test_data, batch_size=p.batch_size, shuffle=False, drop_last=True)
# building model
model = UString(train_data.dim_feature, p.hidden_dim, p.latent_dim,
n_layers=p.num_rnn, n_obj=train_data.n_obj, n_frames=train_data.n_frames, fps=train_data.fps,
with_saa=True, uncertain_ranking=True)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=p.base_lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5)
if len(gpu_ids) > 1:
model = torch.nn.DataParallel(model)
model = model.to(device=device)
model.train() # set the model into training status
# resume training
start_epoch = -1
if p.resume:
model, optimizer, start_epoch = load_checkpoint(model, optimizer=optimizer, filename=p.model_file)
# write histograms
write_weight_histograms(logger, model, 0)
iter_cur = 0
best_metric = 0
for k in range(p.epoch):
if k <= start_epoch:
iter_cur += len(traindata_loader)
continue
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas) in enumerate(traindata_loader):
# ipdb.set_trace()
optimizer.zero_grad()
losses, all_outputs, hidden_st = model(batch_xs, batch_ys, batch_toas, graph_edges, edge_weights=edge_weights, npass=2, nbatch=len(traindata_loader), eval_uncertain=True)
complexity_loss = losses['log_posterior'] - losses['log_prior']
losses['total_loss'] = p.loss_alpha * complexity_loss + losses['cross_entropy']
losses['total_loss'] += p.loss_beta * losses['auxloss']
losses['total_loss'] += p.loss_yita * losses['ranking']
# backward
losses['total_loss'].mean().backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
# write the losses info
lr = optimizer.param_groups[0]['lr']
write_scalars(logger, k, iter_cur, losses, lr)
iter_cur += 1
# test and evaluate the model
if iter_cur % p.test_iter == 0:
model.eval()
all_pred, all_labels, all_toas, losses_all = test_all(testdata_loader, model)
model.train()
loss_val = average_losses(losses_all)
print('----------------------------------')
print("Starting evaluation...")
metrics = {}
metrics['AP'], metrics['mTTA'], metrics['TTA_R80'] = evaluation(all_pred, all_labels, all_toas, fps=test_data.fps)
print('----------------------------------')
# keep track of validation losses
write_test_scalars(logger, k, iter_cur, loss_val, metrics)
# save model
model_file = os.path.join(model_dir, 'bayesian_gcrnn_model_%02d.pth'%(k))
torch.save({'epoch': k,
'model': model.module.state_dict() if len(gpu_ids)>1 else model.state_dict(),
'optimizer': optimizer.state_dict()}, model_file)
if metrics['AP'] > best_metric:
best_metric = metrics['AP']
# update best model file
update_final_model(model_file, os.path.join(model_dir, 'final_model.pth'))
print('Model has been saved as: %s'%(model_file))
scheduler.step(losses['log_posterior'])
# write histograms
write_weight_histograms(logger, model, k+1)
logger.close()
def update_final_model(src_file, dest_file):
# source file must exist
assert os.path.exists(src_file), "src file does not exist!"
# destinate file should be removed first if exists
if os.path.exists(dest_file):
if not os.path.samefile(src_file, dest_file):
os.remove(dest_file)
# copy file
shutil.copyfile(src_file, dest_file)
def test_eval():
### --- CONFIG PATH ---
data_path = os.path.join(ROOT_PATH, p.data_path, p.dataset)
# result path
result_dir = os.path.join(p.output_dir, p.dataset, 'test')
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# visualization results
p.visualize = False if p.evaluate_all else p.visualize
vis_dir = None
if p.visualize:
vis_dir = os.path.join(result_dir, 'vis')
if not os.path.exists(vis_dir):
os.makedirs(vis_dir)
# gpu options
gpu_ids = [int(id) for id in p.gpus.split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = p.gpus
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# create data loader
if p.dataset == 'dad':
from src.DataLoader import DADDataset
test_data = DADDataset(data_path, p.feature_name, 'testing', toTensor=True, device=device, vis=True)
elif p.dataset == 'a3d':
from src.DataLoader import A3DDataset
test_data = A3DDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
elif p.dataset == 'crash':
from src.DataLoader import CrashDataset
test_data = CrashDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
else:
raise NotImplementedError
testdata_loader = DataLoader(dataset=test_data, batch_size=p.batch_size, shuffle=False, drop_last=True)
num_samples = len(test_data)
print("Number of testing samples: %d"%(num_samples))
# building model
model = UString(test_data.dim_feature, p.hidden_dim, p.latent_dim,
n_layers=p.num_rnn, n_obj=test_data.n_obj, n_frames=test_data.n_frames, fps=test_data.fps,
with_saa=True, uncertain_ranking=True)
# start to evaluate
if p.evaluate_all:
model_dir = os.path.join(p.output_dir, p.dataset, 'snapshot')
assert os.path.exists(model_dir)
Epochs, APvid_all, AP_all, mTTA_all, TTA_R80_all, Unc_all = [], [], [], [], [], []
modelfiles = sorted(os.listdir(model_dir))
for filename in modelfiles:
epoch_str = filename.split("_")[-1].split(".pth")[0]
print("Evaluation for epoch: " + epoch_str)
model_file = os.path.join(model_dir, filename)
model, _, _ = load_checkpoint(model, filename=model_file, isTraining=False)
# run model inference
all_pred, all_labels, all_toas, all_uncertains, _ = test_all_vis(testdata_loader, model, vis=False, device=device)
# evaluate results
AP, mTTA, TTA_R80 = evaluation(all_pred, all_labels, all_toas, fps=test_data.fps)
mUncertains = np.mean(all_uncertains, axis=(0, 1))
all_vid_scores = [max(pred[:int(toa)]) for toa, pred in zip(all_toas, all_pred)]
AP_video = average_precision_score(all_labels, all_vid_scores)
APvid_all.append(AP_video)
# save
Epochs.append(epoch_str)
AP_all.append(AP)
mTTA_all.append(mTTA)
TTA_R80_all.append(TTA_R80)
Unc_all.append(mUncertains)
# print results to file
print_results(Epochs, APvid_all, AP_all, mTTA_all, TTA_R80_all, Unc_all, result_dir)
else:
result_file = os.path.join(vis_dir, "..", "pred_res.npz")
if not os.path.exists(result_file):
model, _, _ = load_checkpoint(model, filename=p.model_file, isTraining=False)
# run model inference
all_pred, all_labels, all_toas, all_uncertains, vis_data = test_all_vis(testdata_loader, model, vis=True, device=device)
# save predictions
np.savez(result_file[:-4], pred=all_pred, label=all_labels, toas=all_toas, uncertainties=all_uncertains, vis_data=vis_data)
else:
print("Result file exists. Loaded from cache.")
all_results = np.load(result_file, allow_pickle=True)
all_pred, all_labels, all_toas, all_uncertains, vis_data = \
all_results['pred'], all_results['label'], all_results['toas'], all_results['uncertainties'], all_results['vis_data']
# evaluate results
all_vid_scores = [max(pred[:int(toa)]) for toa, pred in zip(all_toas, all_pred)]
AP_video = average_precision_score(all_labels, all_vid_scores)
print("video-level AP=%.5f"%(AP_video))
AP, mTTA, TTA_R80 = evaluation(all_pred, all_labels, all_toas, fps=test_data.fps)
# evaluate uncertainties
mUncertains = np.mean(all_uncertains, axis=(0, 1))
print("Mean aleatoric uncertainty: %.6f"%(mUncertains[0]))
print("Mean epistemic uncertainty: %.6f"%(mUncertains[1]))
# visualize
vis_results(vis_data, p.batch_size, vis_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='./data',
help='The relative path of dataset.')
parser.add_argument('--dataset', type=str, default='dad', choices=['a3d', 'dad', 'crash'],
help='The name of dataset. Default: dad')
parser.add_argument('--base_lr', type=float, default=1e-3,
help='The base learning rate. Default: 1e-3')
parser.add_argument('--epoch', type=int, default=30,
help='The number of training epoches. Default: 30')
parser.add_argument('--batch_size', type=int, default=10,
help='The batch size in training process. Default: 10')
parser.add_argument('--num_rnn', type=int, default=1,
help='The number of RNN cells for each timestamp. Default: 1')
parser.add_argument('--feature_name', type=str, default='vgg16', choices=['vgg16', 'res101'],
help='The name of feature embedding methods. Default: vgg16')
parser.add_argument('--test_iter', type=int, default=64,
help='The number of iteration to perform a evaluation process. Default: 64')
parser.add_argument('--hidden_dim', type=int, default=256,
help='The dimension of hidden states in RNN. Default: 256')
parser.add_argument('--latent_dim', type=int, default=256,
help='The dimension of latent space. Default: 256')
parser.add_argument('--loss_alpha', type=float, default=0.001,
help='The weighting factor of posterior and prior losses. Default: 1e-3')
parser.add_argument('--loss_beta', type=float, default=10,
help='The weighting factor of auxiliary loss. Default: 10')
parser.add_argument('--loss_yita', type=float, default=10,
help='The weighting factor of uncertainty ranking loss. Default: 10')
parser.add_argument('--gpus', type=str, default="0",
help="The delimited list of GPU IDs separated with comma. Default: '0'.")
parser.add_argument('--phase', type=str, choices=['train', 'test'],
help='The state of running the model. Default: train')
parser.add_argument('--evaluate_all', action='store_true',
help='Whether to evaluate models of all epoches. Default: False')
parser.add_argument('--visualize', action='store_true',
help='The visualization flag. Default: False')
parser.add_argument('--resume', action='store_true',
help='If to resume the training. Default: False')
parser.add_argument('--model_file', type=str, default='./output_debug/bayes_gcrnn/vgg16/dad/snapshot/gcrnn_model_90.pth',
help='The trained GCRNN model file for demo test only.')
parser.add_argument('--output_dir', type=str, default='./output_debug/bayes_gcrnn/vgg16',
help='The directory of src need to save in the training.')
p = parser.parse_args()
if p.phase == 'test':
test_eval()
else:
train_eval()
| 23,703 | 48.280665 | 185 | py |
UString | UString-master/demo.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import os, sys
import os.path as osp
import argparse
import torch
import torch.nn as nn
from torchvision import models, transforms
from PIL import Image
import matplotlib.pyplot as plt
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
VGG = models.vgg16(pretrained=True)
self.feature = VGG.features
self.classifier = nn.Sequential(*list(VGG.classifier.children())[:-3])
pretrained_dict = VGG.state_dict()
model_dict = self.classifier.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.classifier.load_state_dict(model_dict)
self.dim_feat = 4096
def forward(self, x):
output = self.feature(x)
output = output.view(output.size(0), -1)
output = self.classifier(output)
return output
def init_feature_extractor(backbone='vgg16', device=torch.device('cuda')):
feat_extractor = None
if backbone == 'vgg16':
feat_extractor = VGG16()
feat_extractor = feat_extractor.to(device=device)
feat_extractor.eval()
else:
raise NotImplementedError
return feat_extractor
def bbox_sampling(bbox_result, nbox=19, imsize=None, topN=5):
"""
imsize[0]: height
imsize[1]: width
"""
assert not isinstance(bbox_result, tuple)
bboxes = np.vstack(bbox_result) # n x 5
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)]
labels = np.concatenate(labels) # n
ndet = bboxes.shape[0]
# fix bbox
new_boxes = []
for box, label in zip(bboxes, labels):
x1 = min(max(0, int(box[0])), imsize[1])
y1 = min(max(0, int(box[1])), imsize[0])
x2 = min(max(x1 + 1, int(box[2])), imsize[1])
y2 = min(max(y1 + 1, int(box[3])), imsize[0])
if (y2 - y1 + 1 > 2) and (x2 - x1 + 1 > 2):
new_boxes.append([x1, y1, x2, y2, box[4], label])
if len(new_boxes) == 0: # no bboxes
new_boxes.append([0, 0, imsize[1]-1, imsize[0]-1, 1.0, 0])
new_boxes = np.array(new_boxes, dtype=int)
# sampling
n_candidate = min(topN, len(new_boxes))
if len(new_boxes) <= nbox - n_candidate:
indices = np.random.choice(n_candidate, nbox - len(new_boxes), replace=True)
sampled_boxes = np.vstack((new_boxes, new_boxes[indices]))
elif len(new_boxes) > nbox - n_candidate and len(new_boxes) <= nbox:
indices = np.random.choice(n_candidate, nbox - len(new_boxes), replace=False)
sampled_boxes = np.vstack((new_boxes, new_boxes[indices]))
else:
sampled_boxes = new_boxes[:nbox]
return sampled_boxes
def bbox_to_imroi(transform, bboxes, image):
imroi_data = []
for bbox in bboxes:
imroi = image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
imroi = transform(Image.fromarray(imroi)) # (3, 224, 224), torch.Tensor
imroi_data.append(imroi)
imroi_data = torch.stack(imroi_data)
return imroi_data
def extract_features(detector, feat_extractor, video_file, n_frames=100, n_boxes=19):
assert os.path.join(video_file), video_file
# prepare video reader and data transformer
videoReader = mmcv.VideoReader(video_file)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()]
)
features = np.zeros((n_frames, n_boxes + 1, feat_extractor.dim_feat), dtype=np.float32)
detections = np.zeros((n_frames, n_boxes, 6)) # (50 x 19 x 6)
frame_prev = None
for idx in range(n_frames):
if idx >= len(videoReader):
print("Copy frame from previous time step.")
frame = frame_prev.copy()
else:
frame = videoReader.get_frame(idx)
# run object detection inference
bbox_result = inference_detector(detector, frame)
# sampling a fixed number of bboxes
bboxes = bbox_sampling(bbox_result, nbox=n_boxes, imsize=frame.shape[:2])
detections[idx, :, :] = bboxes
# prepare frame data
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
with torch.no_grad():
# bboxes to roi feature
ims_roi = bbox_to_imroi(transform, bboxes, frame)
ims_roi = ims_roi.float().to(device=device)
feature_roi = feat_extractor(ims_roi)
# extract image feature
ims_frame = transform(Image.fromarray(frame))
ims_frame = torch.unsqueeze(ims_frame, dim=0).float().to(device=device)
feature_frame = feat_extractor(ims_frame)
# obtain feature matrix
features[idx, 0, :] = np.squeeze(feature_frame.cpu().numpy()) if feature_frame.is_cuda else np.squeeze(feature_frame.detach().numpy())
features[idx, 1:, :] = np.squeeze(feature_roi.cpu().numpy()) if feature_roi.is_cuda else np.squeeze(feature_roi.detach().numpy())
frame_prev = frame
return detections, features
def init_accident_model(model_file, dim_feature=4096, hidden_dim=256, latent_dim=256, n_obj=19, n_frames=50, fps=10.0):
# building model
model = UString(dim_feature, hidden_dim, latent_dim,
n_layers=1, n_obj=n_obj, n_frames=n_frames, fps=fps, with_saa=False, uncertain_ranking=True)
model = model.to(device=device)
model.eval()
# load check point
model, _, _ = load_checkpoint(model, filename=model_file, isTraining=False)
return model
def load_input_data(feature_file, device=torch.device('cuda')):
# load feature file and return the transformed data
data = np.load(feature_file)
features = data['data'] # 50 x 20 x 4096
labels = [0, 1]
detections = data['det'] # 50 x 19 x 6
toa = [45] # [useless]
def generate_st_graph(detections):
# create graph edges
num_frames, num_boxes = detections.shape[:2]
num_edges = int(num_boxes * (num_boxes - 1) / 2)
graph_edges = []
edge_weights = np.zeros((num_frames, num_edges), dtype=np.float32)
for i in range(num_frames):
# generate graph edges (fully-connected)
edge = generate_graph_from_list(range(num_boxes))
graph_edges.append(np.transpose(np.stack(edge).astype(np.int32))) # 2 x 171
# compute the edge weights by distance
edge_weights[i] = compute_graph_edge_weights(detections[i, :, :4], edge) # 171,
return graph_edges, edge_weights
def generate_graph_from_list(L, create_using=None):
import networkx, itertools
G = networkx.empty_graph(len(L),create_using)
if len(L)>1:
if G.is_directed():
edges = itertools.permutations(L,2)
else:
edges = itertools.combinations(L,2)
G.add_edges_from(edges)
graph_edges = list(G.edges())
return graph_edges
def compute_graph_edge_weights(boxes, edges):
"""
:param: boxes: (19, 4)
:param: edges: (171, 2)
:return: weights: (171,)
"""
N = boxes.shape[0]
assert len(edges) == N * (N-1) / 2
weights = np.ones((len(edges),), dtype=np.float32)
for i, edge in enumerate(edges):
c1 = [0.5 * (boxes[edge[0], 0] + boxes[edge[0], 2]),
0.5 * (boxes[edge[0], 1] + boxes[edge[0], 3])]
c2 = [0.5 * (boxes[edge[1], 0] + boxes[edge[1], 2]),
0.5 * (boxes[edge[1], 1] + boxes[edge[1], 3])]
d = (c1[0] - c2[0])**2 + (c1[1] - c2[1])**2
weights[i] = np.exp(-d)
# normalize weights
if np.sum(weights) > 0:
weights = weights / np.sum(weights) # N*(N-1)/2,
else:
weights = np.ones((len(edges),), dtype=np.float32)
return weights
graph_edges, edge_weights = generate_st_graph(detections)
# transform to torch.Tensor
features = torch.Tensor(np.expand_dims(features, axis=0)).to(device) # 50 x 20 x 4096
labels = torch.Tensor(np.expand_dims(labels, axis=0)).to(device)
graph_edges = torch.Tensor(np.expand_dims(graph_edges, axis=0)).long().to(device)
edge_weights = torch.Tensor(np.expand_dims(edge_weights, axis=0)).to(device)
toa = torch.Tensor(np.expand_dims(toa, axis=0)).to(device)
detections = np.expand_dims(detections, axis=0)
vid = feature_file.split('/')[-1].split('.')[0]
return features, labels, graph_edges, edge_weights, toa, detections, vid
def load_checkpoint(model, optimizer=None, filename='checkpoint.pth.tar', isTraining=True):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
# filter out modules only used in training
pretrained_dict = {k: v for k, v in checkpoint['model'].items() if not any(filtered in k for filtered in ['self_aggregation', 'predictor_aux'])}
model.load_state_dict(pretrained_dict)
# model.load_state_dict(checkpoint['model'])
if isTraining:
optimizer.load_state_dict(checkpoint['optimizer'])
# print("=> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch
def parse_results(all_outputs, batch_size=1, n_frames=50):
# parse inference results
pred_score = np.zeros((batch_size, n_frames), dtype=np.float32)
pred_au = np.zeros((batch_size, n_frames), dtype=np.float32)
pred_eu = np.zeros((batch_size, n_frames), dtype=np.float32)
# run inference
for t in range(n_frames):
# prediction
pred = all_outputs[t]['pred_mean'] # B x 2
pred = pred.cpu().numpy() if pred.is_cuda else pred.detach().numpy()
pred_score[:, t] = np.exp(pred[:, 1]) / np.sum(np.exp(pred), axis=1)
# uncertainties
aleatoric = all_outputs[t]['aleatoric'] # B x 2 x 2
aleatoric = aleatoric.cpu().numpy() if aleatoric.is_cuda else aleatoric.detach().numpy()
epistemic = all_outputs[t]['epistemic'] # B x 2 x 2
epistemic = epistemic.cpu().numpy() if epistemic.is_cuda else epistemic.detach().numpy()
pred_au[:, t] = aleatoric[:, 0, 0] + aleatoric[:, 1, 1]
pred_eu[:, t] = epistemic[:, 0, 0] + epistemic[:, 1, 1]
return pred_score, pred_au, pred_eu
def get_video_frames(video_file, n_frames=50):
# get the video data
cap = cv2.VideoCapture(video_file)
ret, frame = cap.read()
video_data = []
counter = 0
while (ret):
video_data.append(frame)
ret, frame = cap.read()
counter += 1
assert len(video_data) >= n_frames, video_file
video_data = video_data[:n_frames]
return video_data
def preprocess_results(pred_score, aleatoric, epistemic, cumsum=False):
from scipy.interpolate import make_interp_spline
std_alea = 1.0 * np.sqrt(aleatoric)
std_epis = 1.0 * np.sqrt(epistemic)
# sampling
xvals = np.linspace(0,len(pred_score)-1,10)
pred_mean_reduce = pred_score[xvals.astype(np.int)]
pred_std_alea_reduce = std_alea[xvals.astype(np.int)]
pred_std_epis_reduce = std_epis[xvals.astype(np.int)]
# smoothing
xvals_new = np.linspace(1,len(pred_score)+1, p.n_frames)
pred_score = make_interp_spline(xvals, pred_mean_reduce)(xvals_new)
std_alea = make_interp_spline(xvals, pred_std_alea_reduce)(xvals_new)
std_epis = make_interp_spline(xvals, pred_std_epis_reduce)(xvals_new)
pred_score[pred_score >= 1.0] = 1.0-1e-3
xvals = np.copy(xvals_new)
# copy the first value into x=0
xvals = np.insert(xvals_new, 0, 0)
pred_score = np.insert(pred_score, 0, pred_score[0])
std_alea = np.insert(std_alea, 0, std_alea[0])
std_epis = np.insert(std_epis, 0, std_epis[0])
# take cummulative sum of results
if cumsum:
pred_score = np.cumsum(pred_score)
pred_score = pred_score / np.max(pred_score)
return xvals, pred_score, std_alea, std_epis
def draw_curve(xvals, pred_score, std_alea, std_epis):
ax.fill_between(xvals, pred_score - std_alea, pred_score + std_alea, facecolor='wheat', alpha=0.5)
ax.fill_between(xvals, pred_score - std_epis, pred_score + std_epis, facecolor='yellow', alpha=0.5)
plt.plot(xvals, pred_score, linewidth=3.0)
plt.axhline(y=0.5, xmin=0, xmax=max(xvals)/(p.n_frames + 2), linewidth=3.0, color='g', linestyle='--')
# plt.grid(True)
plt.tight_layout()
def set_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='visualize', choices=['extract_feature', 'inference', 'visualize'])
parser.add_argument('--gpu_id', help='GPU ID', type=int, default=0)
parser.add_argument('--n_frames', type=int, help='The number of input video frames.', default=50)
parser.add_argument('--seed', type=int, help='The random seed.', default=123)
parser.add_argument('--fps', type=float, help='The fps of input video.', default=10.0)
parser.add_argument('--fps_display', type=float, help='The fps of output video.', default=2.0)
# feature extraction
parser.add_argument('--video_file', type=str, default='demo/000821.mp4')
parser.add_argument('--mmdetection', type=str, help="the path to the mmdetection.", default="lib/mmdetection")
# inference
parser.add_argument('--feature_file', type=str, help="the path to the feature file.", default="demo/000821_feature.npz")
parser.add_argument('--ckpt_file', type=str, help="the path to the model file.", default="demo/final_model_ccd.pth")
# visualize
parser.add_argument('--result_file', type=str, help="the path to the result file.", default="demo/000821_result.npz")
parser.add_argument('--vis_file', type=str, help="the path to the visualization file.", default="demo/000821_vis.avi")
p = parser.parse_args()
set_random_seed(p.seed)
device = torch.device('cuda:'+str(p.gpu_id)) if torch.cuda.is_available() else torch.device('cpu')
if p.task == 'extract_feature':
from mmdet.apis import init_detector, inference_detector, show_result
import mmcv
# init object detector
cfg_file = osp.join(p.mmdetection, "configs/cascade_rcnn_x101_64x4d_fpn_1x_kitti2d.py")
model_file = osp.join(p.mmdetection, "work_dirs/cascade_rcnn_x101_64x4d_fpn_1x_kitti2d/latest.pth")
detector = init_detector(cfg_file, model_file, device=device)
# init feature extractor
feat_extractor = init_feature_extractor(backbone='vgg16', device=device)
# object detection & feature extraction
detections, features = extract_features(detector, feat_extractor, p.video_file, n_frames=p.n_frames)
feat_file = p.video_file[:-4] + '_feature.npz'
np.savez_compressed(feat_file, data=features, det=detections)
elif p.task == 'inference':
from src.Models import UString
# load feature file
features, labels, graph_edges, edge_weights, toa, detections, vid = load_input_data(p.feature_file, device=device)
# prepare model
model = init_accident_model(p.ckpt_file, dim_feature=features.shape[-1], n_frames=p.n_frames, fps=p.fps)
with torch.no_grad():
# run inference
_, all_outputs, _ = model(features, labels, toa, graph_edges, hidden_in=None, edge_weights=edge_weights, npass=10, eval_uncertain=True)
# parse and save results
pred_score, pred_au, pred_eu = parse_results(all_outputs, n_frames=p.n_frames)
result_file = osp.join(osp.dirname(p.feature_file), p.feature_file.split('/')[-1].split('_')[0] + '_result.npz')
np.savez_compressed(result_file, score=pred_score[0], aleatoric=pred_au[0], epistemic=pred_eu[0], det=detections[0])
elif p.task == 'visualize':
video_data = get_video_frames(p.video_file, n_frames=p.n_frames)
all_results = np.load(p.result_file, allow_pickle=True)
pred_score, aleatoric, epistemic, detections = all_results['score'], all_results['aleatoric'], all_results['epistemic'], all_results['det']
xvals, pred_score, std_alea, std_epis = preprocess_results(pred_score, aleatoric, epistemic, cumsum=False)
fig, ax = plt.subplots(1, figsize=(24, 3.5))
fontsize = 25
plt.ylim(0, 1.1)
plt.xlim(0, len(xvals)+1)
plt.ylabel('Probability', fontsize=fontsize)
plt.xlabel('Frame (FPS=%d)'%(p.fps), fontsize=fontsize)
plt.xticks(range(0, len(xvals)+1, int(p.n_frames / p.fps_display)), fontsize=fontsize)
plt.yticks(fontsize=fontsize)
from matplotlib.animation import FFMpegWriter
curve_writer = FFMpegWriter(fps=p.fps_display, metadata=dict(title='Movie Test', artist='Matplotlib',comment='Movie support!'))
with curve_writer.saving(fig, "demo/curve_video.mp4", 100):
for t in range(len(xvals)):
draw_curve(xvals[:(t+1)], pred_score[:(t+1)], std_alea[:(t+1)], std_epis[:(t+1)])
curve_writer.grab_frame()
curve_frames = get_video_frames("demo/curve_video.mp4", n_frames=p.n_frames)
# create video writer
video_writer = cv2.VideoWriter(p.vis_file, cv2.VideoWriter_fourcc(*'DIVX'), p.fps_display, (video_data[0].shape[1], video_data[0].shape[0]))
for t, frame in enumerate(video_data):
det_boxes = detections[t] # 19 x 6
for box in det_boxes:
if box[4] > 0:
print(box[4])
cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 3)
img = curve_frames[t]
width = frame.shape[1]
height = int(img.shape[0] * (width / img.shape[1]))
img = cv2.resize(img, (width, height), interpolation = cv2.INTER_AREA)
frame[frame.shape[0]-height:frame.shape[0]] = cv2.addWeighted(frame[frame.shape[0]-height:frame.shape[0]], 0.3, img, 0.7, 0)
video_writer.write(frame)
else:
print("invalid task.")
| 18,597 | 44.920988 | 152 | py |
UString | UString-master/src/DataLoader.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pickle
import torch
from torch.utils.data import Dataset
import networkx
import itertools
class DADDataset(Dataset):
def __init__(self, data_path, feature, phase='training', toTensor=False, device=torch.device('cuda'), vis=False):
self.data_path = os.path.join(data_path, feature + '_features')
self.feature = feature
self.phase = phase
self.toTensor = toTensor
self.device = device
self.vis = vis
self.n_frames = 100
self.n_obj = 19
self.fps = 20.0
self.dim_feature = self.get_feature_dim(feature)
filepath = os.path.join(self.data_path, phase)
self.files_list = self.get_filelist(filepath)
def __len__(self):
data_len = len(self.files_list)
return data_len
def get_feature_dim(self, feature_name):
if feature_name == 'vgg16':
return 4096
elif feature_name == 'res101':
return 2048
else:
raise ValueError
def get_filelist(self, filepath):
assert os.path.exists(filepath), "Directory does not exist: %s"%(filepath)
file_list = []
for filename in sorted(os.listdir(filepath)):
file_list.append(filename)
return file_list
def __getitem__(self, index):
data_file = os.path.join(self.data_path, self.phase, self.files_list[index])
assert os.path.exists(data_file)
try:
data = np.load(data_file)
features = data['data'] # 100 x 20 x 4096
labels = data['labels'] # 2
detections = data['det'] # 100 x 19 x 6
except:
raise IOError('Load data error! File: %s'%(data_file))
if labels[1] > 0:
toa = [90.0]
else:
toa = [self.n_frames + 1]
graph_edges, edge_weights = generate_st_graph(detections)
if self.toTensor:
features = torch.Tensor(features).to(self.device) # 100 x 20 x 4096
labels = torch.Tensor(labels).to(self.device)
graph_edges = torch.Tensor(graph_edges).long().to(self.device)
edge_weights = torch.Tensor(edge_weights).to(self.device)
toa = torch.Tensor(toa).to(self.device)
if self.vis:
video_id = str(data['ID'])[5:11] # e.g.: b001_000490_*
return features, labels, graph_edges, edge_weights, toa, detections, video_id
else:
return features, labels, graph_edges, edge_weights, toa
class A3DDataset(Dataset):
def __init__(self, data_path, feature, phase='train', toTensor=False, device=torch.device('cuda'), vis=False):
self.data_path = data_path
self.feature = feature
self.phase = phase
self.toTensor = toTensor
self.device = device
self.vis = vis
self.n_frames = 100
self.n_obj = 19
self.fps = 20.0
self.dim_feature = self.get_feature_dim(feature)
self.files_list, self.labels_list = self.read_datalist(data_path, phase)
def __len__(self):
data_len = len(self.files_list)
return data_len
def get_feature_dim(self, feature_name):
if feature_name == 'vgg16':
return 4096
elif feature_name == 'res101':
return 2048
else:
raise ValueError
def read_datalist(self, data_path, phase):
# load training set
list_file = os.path.join(data_path, self.feature + '_features', '%s.txt' % (phase))
assert os.path.exists(list_file), "file not exists: %s"%(list_file)
fid = open(list_file, 'r')
data_files, data_labels = [], []
for line in fid.readlines():
filename, label = line.rstrip().split(' ')
data_files.append(filename)
data_labels.append(int(label))
fid.close()
return data_files, data_labels
def get_toa(self, clip_id):
# handle clip id like "uXXC8uQHCoc_000011_0" which should be "uXXC8uQHCoc_000011"
clip_id = clip_id if len(clip_id.split('_')[-1]) > 1 else clip_id[:-2]
label_file = os.path.join(self.data_path, 'frame_labels', clip_id + '.txt')
assert os.path.exists(label_file)
f = open(label_file, 'r')
label_all = []
for line in f.readlines():
label = int(line.rstrip().split(' ')[1])
label_all.append(label)
f.close()
label_all = np.array(label_all, dtype=np.int32)
toa = np.where(label_all == 1)[0][0]
toa = max(1, toa) # time-of-accident should not be equal to zero
return toa
def __getitem__(self, index):
data_file = os.path.join(self.data_path, self.feature + '_features', self.files_list[index])
assert os.path.exists(data_file), "file not exists: %s"%(data_file)
data = np.load(data_file)
features = data['features']
label = self.labels_list[index]
label_onehot = np.array([0, 1]) if label > 0 else np.array([1, 0])
# get time of accident
file_id = self.files_list[index].split('/')[1].split('.npz')[0]
if label > 0:
toa = [self.get_toa(file_id)]
else:
toa = [self.n_frames + 1]
# construct graph
attr = 'positive' if label > 0 else 'negative'
dets_file = os.path.join(self.data_path, 'detections', attr, file_id + '.pkl')
assert os.path.exists(dets_file), "file not exists: %s"%(dets_file)
with open(dets_file, 'rb') as f:
detections = pickle.load(f)
detections = np.array(detections) # 100 x 19 x 6
graph_edges, edge_weights = generate_st_graph(detections)
f.close()
if self.toTensor:
features = torch.Tensor(features).to(self.device) # 100 x 20 x 4096
label_onehot = torch.Tensor(label_onehot).to(self.device) # 2
graph_edges = torch.Tensor(graph_edges).long().to(self.device)
edge_weights = torch.Tensor(edge_weights).to(self.device)
toa = torch.Tensor(toa).to(self.device)
if self.vis:
# file_id = file_id if len(file_id.split('_')[-1]) > 1 else file_id[:-2]
# video_path = os.path.join(self.data_path, 'video_frames', file_id, 'images')
# assert os.path.exists(video_path), video_path
return features, label_onehot, graph_edges, edge_weights, toa, detections, file_id
else:
return features, label_onehot, graph_edges, edge_weights, toa
class CrashDataset(Dataset):
def __init__(self, data_path, feature, phase='train', toTensor=False, device=torch.device('cuda'), vis=False):
self.data_path = data_path
self.feature = feature
self.phase = phase
self.toTensor = toTensor
self.device = device
self.vis = vis
self.n_frames = 50
self.n_obj = 19
self.fps = 10.0
self.dim_feature = self.get_feature_dim(feature)
self.files_list, self.labels_list = self.read_datalist(data_path, phase)
self.toa_dict = self.get_toa_all(data_path)
def __len__(self):
data_len = len(self.files_list)
return data_len
def get_feature_dim(self, feature_name):
if feature_name == 'vgg16':
return 4096
elif feature_name == 'res101':
return 2048
else:
raise ValueError
def read_datalist(self, data_path, phase):
# load training set
list_file = os.path.join(data_path, self.feature + '_features', '%s.txt' % (phase))
assert os.path.exists(list_file), "file not exists: %s"%(list_file)
fid = open(list_file, 'r')
data_files, data_labels = [], []
for line in fid.readlines():
filename, label = line.rstrip().split(' ')
data_files.append(filename)
data_labels.append(int(label))
fid.close()
return data_files, data_labels
def get_toa_all(self, data_path):
toa_dict = {}
annofile = os.path.join(data_path, 'videos', 'Crash-1500.txt')
annoData = self.read_anno_file(annofile)
for anno in annoData:
labels = np.array(anno['label'], dtype=np.int)
toa = np.where(labels == 1)[0][0]
toa = min(max(1, toa), self.n_frames-1)
toa_dict[anno['vid']] = toa
return toa_dict
def read_anno_file(self, anno_file):
assert os.path.exists(anno_file), "Annotation file does not exist! %s"%(anno_file)
result = []
with open(anno_file, 'r') as f:
for line in f.readlines():
items = {}
items['vid'] = line.strip().split(',[')[0]
labels = line.strip().split(',[')[1].split('],')[0]
items['label'] = [int(val) for val in labels.split(',')]
assert sum(items['label']) > 0, 'invalid accident annotation!'
others = line.strip().split(',[')[1].split('],')[1].split(',')
items['startframe'], items['vid_ytb'], items['lighting'], items['weather'], items['ego_involve'] = others
result.append(items)
f.close()
return result
def __getitem__(self, index):
data_file = os.path.join(self.data_path, self.feature + '_features', self.files_list[index])
assert os.path.exists(data_file), "file not exists: %s"%(data_file)
try:
data = np.load(data_file)
features = data['data'] # 50 x 20 x 4096
labels = data['labels'] # 2
detections = data['det'] # 50 x 19 x 6
vid = str(data['ID'])
except:
raise IOError('Load data error! File: %s'%(data_file))
if labels[1] > 0:
toa = [self.toa_dict[vid]]
else:
toa = [self.n_frames + 1]
graph_edges, edge_weights = generate_st_graph(detections)
if self.toTensor:
features = torch.Tensor(features).to(self.device) # 50 x 20 x 4096
labels = torch.Tensor(labels).to(self.device)
graph_edges = torch.Tensor(graph_edges).long().to(self.device)
edge_weights = torch.Tensor(edge_weights).to(self.device)
toa = torch.Tensor(toa).to(self.device)
if self.vis:
return features, labels, graph_edges, edge_weights, toa, detections, vid
else:
return features, labels, graph_edges, edge_weights, toa
def generate_st_graph(detections):
# create graph edges
num_frames, num_boxes = detections.shape[:2]
num_edges = int(num_boxes * (num_boxes - 1) / 2)
graph_edges = []
edge_weights = np.zeros((num_frames, num_edges), dtype=np.float32)
for i in range(num_frames):
# generate graph edges (fully-connected)
edge = generate_graph_from_list(range(num_boxes))
graph_edges.append(np.transpose(np.stack(edge).astype(np.int32))) # 2 x 171
# compute the edge weights by distance
edge_weights[i] = compute_graph_edge_weights(detections[i, :, :4], edge) # 171,
return graph_edges, edge_weights
def generate_graph_from_list(L, create_using=None):
G = networkx.empty_graph(len(L),create_using)
if len(L)>1:
if G.is_directed():
edges = itertools.permutations(L,2)
else:
edges = itertools.combinations(L,2)
G.add_edges_from(edges)
graph_edges = list(G.edges())
return graph_edges
def compute_graph_edge_weights(boxes, edges):
"""
:param: boxes: (19, 4)
:param: edges: (171, 2)
:return: weights: (171,)
"""
N = boxes.shape[0]
assert len(edges) == N * (N-1) / 2
weights = np.ones((len(edges),), dtype=np.float32)
for i, edge in enumerate(edges):
c1 = [0.5 * (boxes[edge[0], 0] + boxes[edge[0], 2]),
0.5 * (boxes[edge[0], 1] + boxes[edge[0], 3])]
c2 = [0.5 * (boxes[edge[1], 0] + boxes[edge[1], 2]),
0.5 * (boxes[edge[1], 1] + boxes[edge[1], 3])]
d = (c1[0] - c2[0])**2 + (c1[1] - c2[1])**2
weights[i] = np.exp(-d)
# normalize weights
if np.sum(weights) > 0:
weights = weights / np.sum(weights) # N*(N-1)/2,
else:
weights = np.ones((len(edges),), dtype=np.float32)
return weights
if __name__ == '__main__':
from torch.utils.data import DataLoader
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='./data',
help='The relative path of dataset.')
parser.add_argument('--dataset', type=str, default='dad', choices=['a3d', 'dad', 'crash'],
help='The name of dataset. Default: dad')
parser.add_argument('--batch_size', type=int, default=10,
help='The batch size in training process. Default: 10')
parser.add_argument('--feature_name', type=str, default='vgg16', choices=['vgg16', 'res101'],
help='The name of feature embedding methods. Default: vgg16')
p = parser.parse_args()
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
data_path = os.path.join(ROOT_PATH, p.data_path, p.dataset)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# create data loader
if p.dataset == 'dad':
train_data = DADDataset(data_path, p.feature_name, 'training', toTensor=True, device=device)
test_data = DADDataset(data_path, p.feature_name, 'testing', toTensor=True, device=device, vis=True)
elif p.dataset == 'a3d':
train_data = A3DDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = A3DDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
elif p.dataset == 'crash':
train_data = CrashDataset(data_path, p.feature_name, 'train', toTensor=True, device=device)
test_data = CrashDataset(data_path, p.feature_name, 'test', toTensor=True, device=device, vis=True)
else:
raise NotImplementedError
traindata_loader = DataLoader(dataset=train_data, batch_size=p.batch_size, shuffle=True, drop_last=True)
testdata_loader = DataLoader(dataset=test_data, batch_size=p.batch_size, shuffle=False, drop_last=True)
for e in range(2):
print('Epoch: %d'%(e))
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas) in tqdm(enumerate(traindata_loader), total=len(traindata_loader)):
if i == 0:
print('feature dim:', batch_xs.size())
print('label dim:', batch_ys.size())
print('graph edges dim:', graph_edges.size())
print('edge weights dim:', edge_weights.size())
print('time of accidents dim:', batch_toas.size())
for e in range(2):
print('Epoch: %d'%(e))
for i, (batch_xs, batch_ys, graph_edges, edge_weights, batch_toas, detections, video_ids) in \
tqdm(enumerate(testdata_loader), desc="batch progress", total=len(testdata_loader)):
if i == 0:
print('feature dim:', batch_xs.size())
print('label dim:', batch_ys.size())
print('graph edges dim:', graph_edges.size())
print('edge weights dim:', edge_weights.size())
print('time of accidents dim:', batch_toas.size())
| 15,669 | 39.386598 | 141 | py |
UString | UString-master/src/utils.py | import math
import numpy as np
# utility functions
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
def glorot(tensor):
stdv = math.sqrt(6.0 / (tensor.size(0) + tensor.size(1)))
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
def ones(tensor):
if tensor is not None:
tensor.data.fill_(1)
def reset(nn):
def _reset(item):
if hasattr(item, 'reset_parameters'):
item.reset_parameters()
if nn is not None:
if hasattr(nn, 'children') and len(list(nn.children())) > 0:
for item in nn.children():
_reset(item)
else:
_reset(nn)
def tuple_to_array(lot):
out = np.array(list(lot[0]))
for i in range(1, len(lot)):
out = np.vstack((out, np.array(list(lot[i]))))
return out
| 970 | 20.108696 | 68 | py |
UString | UString-master/src/BayesModels.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(0,1)
@property
def sigma(self):
return torch.log1p(torch.exp(self.rho))
def sample(self):
epsilon = self.normal.sample(self.rho.size()).to(self.mu.device)
return self.mu + self.sigma * epsilon
def log_prob(self, input):
return (-math.log(math.sqrt(2 * math.pi))
- torch.log(self.sigma)
- ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum()
class ScaleMixtureGaussian(object):
def __init__(self, pi, sigma1, sigma2):
super().__init__()
self.pi = pi
self.sigma1 = sigma1
self.sigma2 = sigma2
def log_prob(self, input):
gaussian1 = torch.distributions.Normal(0, self.sigma1.to(input.device))
gaussian2 = torch.distributions.Normal(0, self.sigma2.to(input.device))
prob1 = torch.exp(gaussian1.log_prob(input))
prob2 = torch.exp(gaussian2.log_prob(input))
return (torch.log(self.pi * prob1 + (1-self.pi) * prob2)).sum()
class BayesianLinear(nn.Module):
def __init__(self, in_features, out_features, pi=0.5, sigma_1=None, sigma_2=None):
super().__init__()
self.in_features = in_features
self.out_features = out_features
if sigma_1 is None or sigma_2 is None:
sigma_1 = torch.FloatTensor([math.exp(-0)])
sigma_2 = torch.FloatTensor([math.exp(-6)])
# Weight parameters
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2))
self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-5,-4))
self.weight = Gaussian(self.weight_mu, self.weight_rho)
# Bias parameters
self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2))
self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-5,-4))
self.bias = Gaussian(self.bias_mu, self.bias_rho)
# Prior distributions
self.weight_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2)
self.bias_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2)
self.log_prior = 0
self.log_variational_posterior = 0
def forward(self, input, sample=False, calculate_log_probs=False):
if self.training or sample:
weight = self.weight.sample()
bias = self.bias.sample()
else:
weight = self.weight.mu
bias = self.bias.mu
if self.training or calculate_log_probs:
self.log_prior = self.weight_prior.log_prob(weight) + self.bias_prior.log_prob(bias)
self.log_variational_posterior = self.weight.log_prob(weight) + self.bias.log_prob(bias)
else:
self.log_prior, self.log_variational_posterior = 0, 0
return F.linear(input, weight, bias)
| 3,130 | 38.632911 | 100 | py |
UString | UString-master/src/__init__.py | 0 | 0 | 0 | py |
|
UString | UString-master/src/eval_tools.py | import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.interpolate import make_interp_spline
def evaluation(all_pred, all_labels, time_of_accidents, fps=20.0):
"""
:param: all_pred (N x T), where N is number of videos, T is the number of frames for each video
:param: all_labels (N,)
:param: time_of_accidents (N,) int element
:output: AP (average precision, AUC), mTTA (mean Time-to-Accident), TTA@R80 (TTA at Recall=80%)
"""
preds_eval = []
min_pred = np.inf
n_frames = 0
for idx, toa in enumerate(time_of_accidents):
if all_labels[idx] > 0:
pred = all_pred[idx, :int(toa)] # positive video
else:
pred = all_pred[idx, :] # negative video
# find the minimum prediction
min_pred = np.min(pred) if min_pred > np.min(pred) else min_pred
preds_eval.append(pred)
n_frames += len(pred)
total_seconds = all_pred.shape[1] / fps
# iterate a set of thresholds from the minimum predictions
# temp_shape = int((1.0 - max(min_pred, 0)) / 0.001 + 0.5)
Precision = np.zeros((n_frames))
Recall = np.zeros((n_frames))
Time = np.zeros((n_frames))
cnt = 0
for Th in np.arange(max(min_pred, 0), 1.0, 0.001):
Tp = 0.0
Tp_Fp = 0.0
Tp_Tn = 0.0
time = 0.0
counter = 0.0 # number of TP videos
# iterate each video sample
for i in range(len(preds_eval)):
# true positive frames: (pred->1) * (gt->1)
tp = np.where(preds_eval[i]*all_labels[i]>=Th)
Tp += float(len(tp[0])>0)
if float(len(tp[0])>0) > 0:
# if at least one TP, compute the relative (1 - rTTA)
time += tp[0][0] / float(time_of_accidents[i])
counter = counter+1
# all positive frames
Tp_Fp += float(len(np.where(preds_eval[i]>=Th)[0])>0)
if Tp_Fp == 0: # predictions of all videos are negative
continue
else:
Precision[cnt] = Tp/Tp_Fp
if np.sum(all_labels) ==0: # gt of all videos are negative
continue
else:
Recall[cnt] = Tp/np.sum(all_labels)
if counter == 0:
continue
else:
Time[cnt] = (1-time/counter)
cnt += 1
# sort the metrics with recall (ascending)
new_index = np.argsort(Recall)
Precision = Precision[new_index]
Recall = Recall[new_index]
Time = Time[new_index]
# unique the recall, and fetch corresponding precisions and TTAs
_,rep_index = np.unique(Recall,return_index=1)
rep_index = rep_index[1:]
new_Time = np.zeros(len(rep_index))
new_Precision = np.zeros(len(rep_index))
for i in range(len(rep_index)-1):
new_Time[i] = np.max(Time[rep_index[i]:rep_index[i+1]])
new_Precision[i] = np.max(Precision[rep_index[i]:rep_index[i+1]])
# sort by descending order
new_Time[-1] = Time[rep_index[-1]]
new_Precision[-1] = Precision[rep_index[-1]]
new_Recall = Recall[rep_index]
# compute AP (area under P-R curve)
AP = 0.0
if new_Recall[0] != 0:
AP += new_Precision[0]*(new_Recall[0]-0)
for i in range(1,len(new_Precision)):
AP += (new_Precision[i-1]+new_Precision[i])*(new_Recall[i]-new_Recall[i-1])/2
# transform the relative mTTA to seconds
mTTA = np.mean(new_Time) * total_seconds
print("Average Precision= %.4f, mean Time to accident= %.4f"%(AP, mTTA))
sort_time = new_Time[np.argsort(new_Recall)]
sort_recall = np.sort(new_Recall)
TTA_R80 = sort_time[np.argmin(np.abs(sort_recall-0.8))] * total_seconds
print("Recall@80%, Time to accident= " +"{:.4}".format(TTA_R80))
return AP, mTTA, TTA_R80
def print_results(Epochs, APvid_all, AP_all, mTTA_all, TTA_R80_all, Unc_all, result_dir):
result_file = os.path.join(result_dir, 'eval_all.txt')
with open(result_file, 'w') as f:
for e, APvid, AP, mTTA, TTA_R80, Un in zip(Epochs, APvid_all, AP_all, mTTA_all, TTA_R80_all, Unc_all):
f.writelines('Epoch: %s,'%(e) + ' APvid={:.3f}, AP={:.3f}, mTTA={:.3f}, TTA_R80={:.3f}, mAU={:.5f}, mEU={:.5f}\n'.format(APvid, AP, mTTA, TTA_R80, Un[0], Un[1]))
f.close()
def vis_results(vis_data, batch_size, vis_dir, smooth=False, vis_batchnum=2):
assert vis_batchnum <= len(vis_data)
for b in range(vis_batchnum):
results = vis_data[b]
pred_frames = results['pred_frames']
labels = results['label']
toa = results['toa']
video_ids = results['video_ids']
detections = results['detections']
uncertainties = results['pred_uncertain']
for n in range(batch_size):
pred_mean = pred_frames[n, :] # (90,)
pred_std_alea = 1.0 * np.sqrt(uncertainties[n, :, 0])
pred_std_epis = 1.0 * np.sqrt(uncertainties[n, :, 1])
xvals = range(len(pred_mean))
if smooth:
# sampling
xvals = np.linspace(0,len(pred_mean)-1,20)
pred_mean_reduce = pred_mean[xvals.astype(np.int)]
pred_std_alea_reduce = pred_std_alea[xvals.astype(np.int)]
pred_std_epis_reduce = pred_std_epis[xvals.astype(np.int)]
# smoothing
xvals_new = np.linspace(1,len(pred_mean)+1,80)
pred_mean = make_interp_spline(xvals, pred_mean_reduce)(xvals_new)
pred_std_alea = make_interp_spline(xvals, pred_std_alea_reduce)(xvals_new)
pred_std_epis = make_interp_spline(xvals, pred_std_epis_reduce)(xvals_new)
pred_mean[pred_mean >= 1.0] = 1.0-1e-3
xvals = xvals_new
# fix invalid values
indices = np.where(xvals <= toa[n])[0]
xvals = xvals[indices]
pred_mean = pred_mean[indices]
pred_std_alea = pred_std_alea[indices]
pred_std_epis = pred_std_epis[indices]
# plot the probability predictions
fig, ax = plt.subplots(1, figsize=(24, 3.5))
ax.fill_between(xvals, pred_mean - pred_std_alea, pred_mean + pred_std_alea, facecolor='wheat', alpha=0.5)
ax.fill_between(xvals, pred_mean - pred_std_epis, pred_mean + pred_std_epis, facecolor='yellow', alpha=0.5)
plt.plot(xvals, pred_mean, linewidth=3.0)
if toa[n] <= pred_frames.shape[1]:
plt.axvline(x=toa[n], ymax=1.0, linewidth=3.0, color='r', linestyle='--')
# plt.axhline(y=0.7, xmin=0, xmax=0.9, linewidth=3.0, color='g', linestyle='--')
# draw accident region
x = [toa[n], pred_frames.shape[1]]
y1 = [0, 0]
y2 = [1, 1]
ax.fill_between(x, y1, y2, color='C1', alpha=0.3, interpolate=True)
fontsize = 25
plt.ylim(0, 1.1)
plt.xlim(1, pred_frames.shape[1])
plt.ylabel('Probability', fontsize=fontsize)
plt.xlabel('Frame (FPS=20)', fontsize=fontsize)
plt.xticks(range(0, pred_frames.shape[1], 10), fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.grid(True)
plt.tight_layout()
tag = 'pos' if labels[n] > 0 else 'neg'
plt.savefig(os.path.join(vis_dir, video_ids[n] + '_' + tag + '.png'))
plt.close()
# plt.show() | 7,404 | 43.608434 | 173 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.