repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
blend
|
blend-master/tools/meteor-1.4/scripts/meteor_shower.py
|
#!/usr/bin/env python
# Learn Meteor parameters quickly with up to 42 Meteors
# Run as many as requeted in parallel
# Meteors use 1 cpu / 2gb each
import collections, os, subprocess, sys, threading
def main(argv):
if len(argv[1:]) < 7:
print >> sys.stderr, 'Learn Meteor parameters efficiently with parallel Trainers'
print >> sys.stderr, 'Usage: {0} <meteor.jar> <lang> <n-mods> <task> <data_dir> <work_dir> <n-jobs> [other args like -a par.gz, -ch, ...]'.format(argv[0])
sys.exit(1)
# Args
meteor_jar = os.path.abspath(argv[1])
lang = argv[2]
n_mods = int(argv[3])
task = argv[4]
data_dir = os.path.abspath(argv[5])
work_dir = os.path.abspath(argv[6])
n_jobs = int(argv[7])
sb_dir = os.path.join(work_dir, 'sandbox')
other_args = argv[8:]
# Working dir
if os.path.exists(work_dir):
print 'Work dir {0} exists, exiting'.format(work_dir)
sys.exit(1)
os.mkdir(work_dir)
os.mkdir(sb_dir)
# Weight ranges for jobs based on mod count
w_start_list = [1, 0, 0, 0]
w_end_list = [1, 0, 0, 0]
for i in range(n_mods):
w_end_list[i] = 1
w_start = ''
w_end = ''
for i in range(4):
w_start += str(w_start_list[i]) + ' '
w_end += str(w_end_list[i]) + ' '
w_start = w_start.strip()
w_end = w_end.strip()
# Step is always the same
step = '0.05 0.10 0.05 0.05 1.0 0.2 0.2 0.2'
# Queue Trainer commands
queue = collections.deque([])
for i in range(42):
sb_sub_dir = os.path.join(sb_dir, '{0}'.format(i + 1))
os.mkdir(sb_sub_dir)
out_file = os.path.join(work_dir, 'output.{0}'.format(i + 1))
a = 0.05 * (i / 2)
(g_min, g_max) = (0, 0.5) if (i % 2 == 0) else (0.55, 1.0)
start = '{0} 0 {1} 0 {2}'.format(a, g_min, w_start)
end = '{0} 2.5 {1} 1.0 {2}'.format(a, g_max, w_end)
# Retry in case of filesystem failure
trainer_cmd = 'cd {sd} && while true ; do sleep 1 ; java -XX:+UseCompressedOops -Xmx2G -cp {0} Trainer {1} {2} -l {3} -i \'{4}\' -f \'{5}\' -s \'{6}\' {args} > {7} ; if [ "$?" = "0" ] ; then break ; fi ; done'.format(meteor_jar, task, data_dir, lang, start, end, step, out_file, sd=sb_sub_dir, args=' '.join(other_args))
queue.append(trainer_cmd)
# Run Trainers
for i in range(n_jobs):
queue.append(-1)
threads = []
for i in range(n_jobs):
t = threading.Thread(target=run, args=(queue,))
threads.append(t)
t.start()
for t in threads:
t.join()
# Sort output
sort_cmd = 'cat {0}/output.* | sort -g -S4G > {0}/output.sort'.format(work_dir)
subprocess.call(sort_cmd, shell=True)
# Run commands until end of queue
def run(queue):
while True:
cmd = queue.popleft()
if cmd == -1:
return
subprocess.call(cmd, shell=True)
if __name__ == '__main__' : main(sys.argv)
| 2,954 | 32.579545 | 328 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/bleu.py
|
#!/usr/bin/env python
import codecs, os, shutil, subprocess, sys, tempfile
mteval_pl = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'mt-diff', 'files', 'mteval-v13m.pl')
def main(argv):
if len(argv[1:]) < 2:
print 'Score with NIST BLEU'
print ''
print 'usage: {0} <hyp> <ref> [opts]'.format(argv[0])
print ''
print '-------------------'
print 'Options for scoring'
print '-------------------'
print ''
subprocess.call(['perl', mteval_pl, '-h'])
sys.exit(1)
hyp = argv[1]
ref = argv[2]
opts = argv[3:]
src_sgm = tempfile.mktemp(suffix='.sgm')
tst_sgm = tempfile.mktemp(suffix='.sgm')
ref_sgm = tempfile.mktemp(suffix='.sgm')
sgm(ref, src_sgm, 'srcset')
sgm(hyp, tst_sgm, 'tstset')
sgm(ref, ref_sgm, 'refset')
cmd = ['perl', mteval_pl, '-s', src_sgm, '-t', tst_sgm, '-r', ref_sgm]
for opt in opts:
cmd.append(opt)
subprocess.call(cmd)
os.remove(src_sgm)
os.remove(tst_sgm)
os.remove(ref_sgm)
def sgm(f_in, f_out, f_type):
i = open(f_in)
o = open(f_out, 'w')
s = 0
print >> o, '<{0} trglang="trg" setid="set" srclang="src">'.format(f_type)
print >> o, '<doc docid="doc" sysid="sys">'
for line in i:
s += 1
print >> o, '<seg id="{0}"> {1} </seg>'.format(s, line.strip())
print >> o, '</doc>'
print >> o, '</{0}>'.format(f_type)
i.close()
o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,540 | 25.568966 | 78 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/filter_merge_rank_set.py
|
#!/usr/bin/env python
# Filter and merge multiple rank training sets into a single set. Segments are
# relabeled by original data set and renumbered after filtering. The resulting
# combined set contains only segments for which rank judgments exist.
# Filtering is highly recommended even for single sets to greatly reduce
# training time.
import codecs, os, re, sys
def main(argv):
if len(argv[1:]) < 2:
print 'usage: {0} <clean-dir> <file1.rank> [file2.rank ...]'.format(argv[0])
print 'Rank files should have same basename (src-tgt.rank)'
print 'Original test sets identified with ~n'
exit(1)
clean_dir = argv[1]
r_files = argv[2:]
if clean_dir == os.path.dirname(os.path.abspath(r_files[0])):
print 'This is a bad idea. Please specify a different clean-dir.'
sys.exit(1)
# Single rank file
r_out = codecs.open(os.path.join(clean_dir, os.path.basename(r_files[0])),
'w', 'utf-8')
r_n = 0
id = 0
# For each rank file
for r_file in r_files:
r_n += 1
# Renumber segments in rank file, keep order
seg = {}
r_in = codecs.open(r_file, 'r', 'utf-8')
for line in r_in:
f = line.split()
if f[0] not in seg:
id += 1
seg[f[0]] = id
# Append rank set numbers to system names
print >> r_out, '{0}\t{1}\t{2}\t{3}\t{4}'.format(seg[f[0]],
append_n(f[1], r_n), f[2], append_n(f[3], r_n), f[4])
r_in.close()
r_base = os.path.basename(os.path.abspath(r_file))
prefix = r_base[0:r_base.find('.')]
f_dir = os.path.dirname(os.path.abspath(r_file))
# Filter and renumber segments in system outputs and ref file
for sgm_file in os.listdir(f_dir):
if not (sgm_file.startswith(prefix) and sgm_file.endswith('.sgm')):
continue
sgm_in = codecs.open(os.path.join(f_dir, sgm_file), 'r', 'utf-8')
# Append rank set numbers to system names
sgm_out = codecs.open(os.path.join(clean_dir,
append_n(sgm_file, r_n)), 'w', 'utf-8')
for line in sgm_in:
r = re.search(u'^<seg id="([0-9]+)">', line, re.I)
if not r:
print >> sgm_out, line.strip()
continue
if r.group(1) in seg:
print >> sgm_out, re.sub(u'^<seg id="[0-9]+">',
'<seg id="{0}">'.format(seg[r.group(1)]), line).strip()
sgm_in.close()
sgm_out.close()
# Finished writing rank file
r_out.close()
# Append set number to appropriate location
def append_n(s, n):
i = s.find('.')
if i == -1:
i = len(s)
return '{0}~{1}{2}'.format(s[0:i], n, s[i:])
if __name__ == '__main__' : main(sys.argv)
| 2,940 | 33.6 | 84 |
py
|
blend
|
blend-master/tools/meteor-1.4/scripts/wmt_fmt.py
|
#!/usr/bin/env python
# Read Meteor output, write to WMT score format
import os, sys
def main(argv):
if len(argv[1:]) < 3:
print 'usage: {0} <lang-pair> <test-set> <system> [metric]'.format(
argv[0])
print 'writes metric.lang-pair.test-set.system.{seg.scr,sys.scr}'
print ''
print 'Pipe Meteor output to this script'
sys.exit(1)
lp = argv[1]
ts = argv[2]
s = argv[3]
m = argv[4] if len(argv[1:]) > 3 else 'Meteor'
seg_f = '{0}.{1}.{2}.{3}.seg.scr'.format(m, lp, ts, s)
sys_f = '{0}.{1}.{2}.{3}.sys.scr'.format(m, lp, ts, s)
stop = False
if os.path.exists(seg_f):
print 'exists: {0}'.format(seg_f)
stop = True
if os.path.exists(sys_f):
print 'exists: {0}'.format(sys_f)
stop = True
if stop:
sys.exit(1)
seg_o = open(seg_f, 'w')
sys_o = open(sys_f, 'w')
while True:
line = sys.stdin.readline()
if not line:
break
if line.startswith('Segment'):
f = line.split()
print >> seg_o, '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(m, lp, ts, s,
f[1], f[3])
if line.startswith('Final score'):
scr = line.split()[2]
print >> sys_o, '{0}\t{1}\t{2}\t{3}\t{4}'.format(m, lp, ts, s,
scr)
seg_o.close()
sys_o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,447 | 25.814815 | 79 |
py
|
blend
|
blend-master/tools/meteor-1.4/xray/Generation.py
|
import re, shutil, subprocess, sys, tempfile
from MeteorAlignment import *
# Edit as needed
xelatex_cmd = '/usr/bin/xelatex'
# Edit as needed
gnuplot_cmd = '/usr/bin/gnuplot'
def check_xelatex():
if not shutil.os.path.exists(xelatex_cmd):
print 'Could not find xelatex_cmd \'{0}\''.format(xelatex_cmd)
print 'Please install xetex or update path in Generation.py'
return False
return True
def check_gnuplot():
if not shutil.os.path.exists(xelatex_cmd):
print 'Could not find gnuplot_cmd \'{0}\''.format(gnuplot_cmd)
print 'Please install gnuplot or update path in Generation.py'
return False
return True
#
# LaTeX
#
MAX_LEN = 50
def xelatex(tex_file, pdf_file, work_dir=shutil.os.curdir):
# Working dir
out_dir = tempfile.mkdtemp()
# PDF output file
if '.' in tex_file:
out_pdf = tex_file[0:tex_file.rfind('.')] + '.pdf'
else:
out_pdf = tex_file + '.pdf'
# Run xelatex
subprocess.Popen([xelatex_cmd, '-interaction', 'batchmode', \
'-output-directory', out_dir, tex_file], cwd=work_dir, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
# Copy pdf file and remove temp files
shutil.copyfile(shutil.os.path.join(out_dir, out_pdf), pdf_file)
shutil.rmtree(out_dir)
def escape(s):
s = s.replace('\\', '\\backslash')
s = re.sub('([$&%{}#_])', r'\\\1', s)
return s
def get_font(uni):
if uni:
return r'''\usepackage{fontspec}
\setmainfont{unifont}
'''
else:
return r'''\renewcommand{\rmdefault}{phv} % Arial
\renewcommand{\sfdefault}{phv} % Arial
'''
def check_printable(a1, a2=None):
# Too long
if len(a1.sen2) > MAX_LEN:
print >> sys.stderr, 'Skipping', a1.name, '- too large:', \
len(a1.sen2), 'reference words'
return False
# Different references?
if a2 and a1.sen2 != a2.sen2:
print >> sys.stderr, 'Skipping', a1.name, \
'- different references used'
return False
return True
def print_align_table(tex_out, a1, a2=None, a_type=ALIGN_METEOR):
'''LaTeX generation function: use with caution'''
print >> tex_out, r'%Table start'
# Print color declarations
r = 0.6
g = 0.6
b = 1.0
step = 0.4 / max(1, len(a1.sen2))
half = len(a1.sen2) / 2
for i in range(len(a1.sen2)):
if i >= half:
r += step * 1.5
g += step * .25
b -= step * 1.5
else:
r += step * .5
g += step * 1.0
b -= step * .5
print >> tex_out, r'\definecolor{{ref{0}}}{{rgb}}{{{1},{2},{3}}}'\
.format(i, min(1.0, r), min(1.0, g), min(1.0, b))
# Print table start
line = r'\noindent\begin{tabular}{|l'
for i in range(len(a1.sen2)):
line += r'|'
line += r'p{10pt}'
if a2:
line += r'|l'
line += r'|}'
print >> tex_out, line
print >> tex_out, r'\hline'
# Print sentence 2
line = ''
if a2:
line += r'\Large\color{z}{$\blacksquare$} \color{y}{$\blacksquare$}'
for i in range(len(a1.sen2)):
w2 = escape(a1.sen2[i])
line += r'&\begin{sideways}' + r'\cellcolor{{ref{0}}}'.format(i) + \
w2 + '\hspace{12pt}\end{sideways}'
if a2:
line += r'&\rex \rap'
print >> tex_out, line + r'\\'
# Print each row for sentences a1.sen1, a2.sen1
max_len = max(len(a1.sen1), len(a2.sen1)) if a2 else len(a1.sen1)
fill1 = FILL
if a2:
fill1 = FILL_L
fill2 = FILL_R
for i in range(max_len):
print >> tex_out, r'\hline'
line = ''
if i < len(a1.sen1):
line += r'\ssp '
if a1.sen1_matched[i] != NO_MATCH:
line += r'\cellcolor{{ref{0}}}'.format(a1.sen1_matched[i])
line += escape(a1.sen1[i]) + r' \ssp'
for j in range(len(a1.sen2)):
line += r'&\hspace{2pt}'
if i < len(a1.sen1):
match = a1.matrix[i][j]
if match:
line += fill1[a1.matrix[i][j]]
if a2 and i < len(a2.sen1):
match = a2.matrix[i][j]
if match:
line += fill2[match]
if a2:
line += r'&'
if i < len(a2.sen1):
line += r'\ssp '
if a2.sen1_matched[i] != NO_MATCH:
line += r'\cellcolor{{ref{0}}}'.format(a2.sen1_matched[i])
line += escape(a2.sen1[i]) + r'\ssp '
print >> tex_out, line + r'\\'
print >> tex_out, r'\hline'
# Print table footer
print >> tex_out, r'\end{tabular}'
print >> tex_out, r''
print >> tex_out, r'\vspace{6pt}'
# Print alignment information
if a_type == ALIGN_DEFAULT:
print >> tex_out, r'\noindent {0}'.format(a1.name)
# Compare stats
elif a_type == ALIGN_METEOR:
print >> tex_out, r'\noindent Segment {0}\\\\'.format(escape(a1.name))
if a2:
p_diff = a2.p - a1.p
r_diff = a2.r - a1.r
fr_diff = a2.frag - a1.frag
sc_diff = a2.score - a1.score
print >> tex_out, r'\noindent\begin{tabular}{lm{12pt}rm{24pt}rm{24pt}r}'
print >> tex_out, r'\hline'
print >> tex_out, r'P:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.p, a2.p, 'gb' if p_diff >= 0 else 'rb', p_diff)
print >> tex_out, r'R:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.r, a2.r, 'gb' if r_diff >= 0 else 'rb', r_diff)
print >> tex_out, r'Frag:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.frag, a2.frag, 'rb' if fr_diff > 0 else 'gb', fr_diff)
print >> tex_out, r'Score:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.score, a2.score, 'gb' if sc_diff >= 0 else 'rb', sc_diff)
else:
print >> tex_out, r'\noindent\begin{tabular}{lm{12pt}r}'
print >> tex_out, r'\hline'
print >> tex_out, r'P:&&{0:.3f}\\'.format(a1.p)
print >> tex_out, r'R:&&{0:.3f}\\'.format(a1.r)
print >> tex_out, r'Frag:&&{0:.3f}\\'.format(a1.frag)
print >> tex_out, r'Score:&&{0:.3f}\\'.format(a1.score)
print >> tex_out, r'\end{tabular}'
# End table
print >> tex_out, r'%Table end'
print >> tex_out, ''
print >> tex_out, r'\newpage'
print >> tex_out, ''
FILL = {'ex': r'\mex', 'ap': r'\map', 'rm': r'\mrm'}
FILL_L = {'ex': r'\lex', 'ap': r'\lap'}
FILL_R = {'ex': r'\rex', 'ap': r'\rap'}
DEC_HEADER1 = r'''\documentclass[landscape]{article}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Include these packages and declarations in your tex file %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{rotating}
\usepackage{colortbl}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage[T1]{fontenc}
'''
DEC_HEADER2 = r'''
\definecolor{z}{rgb}{0.7,0.7,0.7}
\definecolor{g}{rgb}{0.5,1.0,0.5}
\definecolor{y}{rgb}{1.0,1.0,0.5}
\definecolor{r}{rgb}{1.0,0.5,0.5}
\definecolor{gb}{rgb}{0.0,0.5,0.0}
\definecolor{rb}{rgb}{0.5,0.0,0.0}
\newcommand{\ssp}{\hspace{2pt}}
\newcommand{\lex}{\cellcolor{z}}
\newcommand{\lap}{\cellcolor{y}}
\newcommand{\rex}{$\bullet$}
\newcommand{\rap}{$\boldsymbol\circ$}
\newcommand{\mex}{\cellcolor{g}$\bullet$}
\newcommand{\map}{\cellcolor{y}$\boldsymbol\circ$}
\newcommand{\mrm}{\cellcolor{r}X}
% Search for '%Table start' and '%Table end' to find alignment boundaries
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage[margin=0.5in]{geometry}
\pagenumbering{0}
%\renewcommand{\rmdefault}{phv} % Arial
%\renewcommand{\sfdefault}{phv} % Arial
\renewcommand{\tabcolsep}{1pt}
\begin{document}
'''
DOC_HEADER_COMPARE = r'''
\noindent\large Meteor Alignments\\
\noindent\normalsize Reference top row\\
{sys1} (sentence 1) left column\\
{sys2} (sentence 2) right column\\\\
Matches identified by color (sen 1) and symbol (sen 2)\\\\
Color spectrum follows reference word order\\\\
\small
\noindent\begin{{tabular}}{{|l|c|c|}}
\hline
Match Type&Sentence 1&Sentence 2\\
\hline
Exact&\color{{z}}{{$\blacksquare$}}&\rex\\
\hline
Stem / Synonym / Paraphrase&\color{{y}}{{$\blacksquare$}}&\rap\\
\hline
\end{{tabular}}
\vspace{{6pt}}
\noindent Key: match markers for sentences
\newpage
'''
DOC_HEADER_SINGLE = r'''
\noindent\large Meteor Alignments for {sysname}\\
\noindent\normalsize Reference top row\\
Hypothesis left column\\
Matches identified by color and symbol\\\\
Color spectrum follows reference word order\\\\
\small
\noindent\begin{{tabular}}{{|l|p{{10pt}}|}}
\hline
Match Type&\\
\hline
Exact&\ssp\mex\\
\hline
Stem / Synonym / Paraphrase&\ssp\map\\
\hline
\end{{tabular}}
\vspace{{6pt}}
\noindent Key: match markers for sentences
\newpage
'''
DOC_HEADER_ALIGN = r'''
\noindent\large\textsc{Meteor} Alignments\\
\noindent\normalsize Reference top row\\
Hypothesis left column\\
Matches identified by color and symbol\\\\
Color spectrum follows reference word order\\\\
\small
\noindent\begin{tabular}{|l|p{10pt}|}
\hline
Match Type&\\
\hline
Exact&\ssp\mex\\
\hline
Stem / Synonym / Paraphrase&\ssp\map\\
\hline
Deleted&\ssp\mrm\\
\hline
\end{tabular}
\vspace{6pt}
\noindent Key: match markers for sentences
\newpage
'''
DOC_FOOTER = r'''\end{document}'''
#
# Gnuplot
#
ROW_LABEL = ['0.0-0.1', '0.1-0.2', '0.2-0.3', '0.3-0.4', '0.4-0.5', '0.5-0.6', \
'0.6-0.7', '0.7-0.8', '0.8-0.9', '0.9-1.0']
def write_dat_file(dat_file, data, xlabel='Score', syslabels=None):
col_label = [xlabel[0].upper() + xlabel[1:]]
for i in range(len(data)):
if syslabels and len(syslabels) > i:
col_label.append(syslabels[i])
else:
col_label.append('System-{0}'.format(i + 1))
dat_out = open(dat_file, 'w')
print >>dat_out, '\t'.join(col_label)
for row in zip(ROW_LABEL, zip(*data)):
print >>dat_out, row[0] + '\t' + '\t'.join([str(x) for x in row[1]])
dat_out.close()
def write_plot_hist(work_dir, dat_file, plot_file, eps_file, xlabel='Score', num_data_cols=1):
uc_label = xlabel[0].upper() + xlabel[1:]
col_line = ''
for i in range(num_data_cols - 1):
col_line += ', \'\' u {0} ti col'.format(i + 3)
plot_out = open(shutil.os.path.join(work_dir, plot_file), 'w')
print >> plot_out, GNUPLOT_HISTOGRAM.format(data=dat_file, eps=eps_file, \
label=uc_label, columns=col_line)
plot_out.close()
def gnuplot(work_dir, plot_file):
subprocess.Popen([gnuplot_cmd, plot_file], cwd=work_dir, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
GNUPLOT_HISTOGRAM = '''\
set auto x
set auto y
set style data histogram
set style histogram cluster gap 1
set style fill solid border -1
set boxwidth 0.9
set xtic rotate by -45 scale 0
set xlabel '{label}'
set ylabel 'Number of segments'
set terminal postscript eps enhanced color solid rounded 18
set output '{eps}'
plot '{data}' u 2:xtic(1) ti col{columns}
'''
| 11,189 | 29.407609 | 185 |
py
|
blend
|
blend-master/tools/meteor-1.4/xray/MeteorAlignment.py
|
import math
ALIGN_DEFAULT = 1
ALIGN_METEOR = 2
MATCH_TYPES = ['ex', 'ap', 'ap', 'ap', 'rm']
NO_MATCH = 'blank'
class ScoredAlignment(object):
name = ''
sen1 = []
sen2 = []
p = 0.0
r = 0.0
frag = 0.0
score = 0.0
matrix = [[]]
sen1_matched = []
def __init__(self, align_in=None, a_type=None):
if align_in and a_type:
self.read_alignment(align_in, a_type)
def read_alignment(self, align_in, a_type=ALIGN_DEFAULT):
'''Read next alignment from an input stream
'''
# Read next line
line = align_in.readline()
if not line:
return
# Line should be 'Alignment...'
if not line.startswith('Alignment'):
print 'Error: alignment does not start with Alignment line'
return
# Alignment name
f = line.split()
if a_type == ALIGN_METEOR:
# Name tokens
self.name = '\t'.join(f[1:-4])
# P R Fr Sc
self.p, self.r, self.frag, self.score = map(float, f[-4:])
else:
self.name = line.strip()
# Sentence words
self.sen1 = align_in.readline().split()
self.sen2 = align_in.readline().split()
# Matrix
self.matrix = []
self.sen1_matched = []
for w1 in self.sen1:
row = []
for w2 in self.sen2:
row.append('')
self.matrix.append(row)
self.sen1_matched.append(NO_MATCH)
# discard header 'Line2Start...'
align_in.readline()
# Read matches
while True:
line = align_in.readline()
if not line.strip():
break
m2, m1, mod_name, s = line.split()
m2_s, m2_l = map(int, m2.split(':'))
m1_s, m1_l = map(int, m1.split(':'))
mod = int(mod_name)
for i in range(m1_l):
self.sen1_matched[m1_s + i] = m2_s
for j in range(m2_l):
self.matrix[m1_s + i][m2_s + j] = MATCH_TYPES[mod]
# Reverse sentence 2 and alignment to render right to left
def rtl(self):
self.sen2.reverse()
for x in self.matrix:
x.reverse()
self.sen1_matched.reverse()
class ScoredSegment(object):
sen_len = 0
p = 0.0
r = 0.0
frag = 0.0
score = 0.0
def __init__(self, sen_len, p, r, frag, score):
self.sen_len = sen_len
self.p = p
self.r = r
self.frag = frag
self.score = score
def extract_scores(alignments):
scores = []
for align in alignments:
scores.append(ScoredSegment(len(align.sen2), align.p, align.r, \
align.frag, align.score))
return scores
def read_align_file(align_file, max_align=-1, a_type=ALIGN_METEOR):
a_in = open(align_file)
alignments = []
count = 0
while True:
if max_align != -1 and count >= max_align:
break
count += 1
a = ScoredAlignment(a_in, a_type)
if not a.name:
break
alignments.append(a)
return alignments
def cmp_score_best(x, y):
diff = (x[0].score - x[1].score) - (y[0].score - y[1].score)
return -1 if diff > 0 else 1 if diff < 0 else 0
def cmp_score_diff(x, y):
diff = abs(x[0].score - x[1].score) - abs(y[0].score - y[1].score)
return 1 if diff > 0 else -1 if diff < 0 else 0
def cmp_score(x, y):
diff = x.score - y.score
return 1 if diff > 0 else -1 if diff < 0 else 0
def get_score_dist(scores, size=10):
step = 1.0 / size
dist = [0] * size
for s in [abs(x) for x in scores]:
if math.isnan(s):
dist[0] += 1
continue
dist[min(size - 1, int(math.floor(float(s) / step)))] += 1
return dist
| 3,837 | 25.468966 | 72 |
py
|
blend
|
blend-master/tools/meteor-1.4/xray/xray.py
|
#!/usr/bin/env python
import optparse, shutil, subprocess, sys, tempfile
from MeteorAlignment import *
from Generation import *
def main(argv):
if not (check_xelatex() and check_gnuplot()):
sys.exit(1)
# Options
opt = optparse.OptionParser( \
usage='Usage: %prog [options] <align.out> [align.out2 ...]')
opt.add_option('-c', '--compare', action='store_true', dest='compare', \
default=False, help='compare alignments of two result sets (only first 2 input files used)')
opt.add_option('-b', '--best-first', action='store_true', dest='bestfirst', \
default=False, help='Sort by improvement of sys2 over sys1')
opt.add_option('-n', '--no-align', action='store_true', dest='noalign', \
default=False, help='do not visualize alignments')
opt.add_option('-x', '--max', dest='maxalign', default='-1', \
metavar='MAX', help='max alignments to sample (default use all)')
opt.add_option('-p', '--prefix', dest='prefix', default='mx', \
metavar='PRE', help='prefix for output files (default mx)')
opt.add_option('-l', '--label', dest='label', default=None, \
metavar='LBL', help='optional system label list, comma separated: label1,label2,...')
opt.add_option('-u', '--unifont', action='store_true', dest='uni', \
default=False, help='use unifont (use for non-western languages)')
opt.add_option('-r', '--right-to-left', action='store_true', dest='rtl', \
default=False, help='language written right to left')
# Parse
o, a = opt.parse_args()
if not a:
print 'MX: X-Ray your translation output'
opt.print_help()
sys.exit(1)
compare = o.compare
best_first = o.bestfirst
no_align = o.noalign
max_align = int(o.maxalign)
prefix = o.prefix
label = o.label
uni = o.uni
rtl = o.rtl
align_files = a
seg_scores = []
label_list = label.split(',') if label else []
for i in range(len(label_list)):
label_list[i] = label_list[i][0].upper() + label_list[i][1:]
for i in range(len(label_list), len(a)):
label_list.append('System-{0}'.format(i + 1))
pre_dir = prefix + '-files'
try:
shutil.os.mkdir(pre_dir)
except:
print >> sys.stderr, 'Dir {0} exists, will overwrite contents'\
.format(pre_dir)
#
# Visualize alignments
#
# Compare 2 alignments
if compare:
# File check
if len(align_files) < 2:
print 'Comparison requires 2 alignment files'
sys.exit(1)
# Out files
pdf_file = prefix + '-align.pdf'
tex_file = 'align.tex'
# Read alignments
align_1 = read_align_file(a[0], max_align)
align_2 = read_align_file(a[1], max_align)
seg_scores.append(extract_scores(align_1))
seg_scores.append(extract_scores(align_2))
alignments = zip(align_1, align_2)
alignments.sort(cmp=cmp_score_best if best_first else cmp_score_diff,
reverse=True)
if not no_align:
# Write tex file
tex_out = open(shutil.os.path.join(pre_dir, tex_file), 'w')
# Header
print >> tex_out, DEC_HEADER1
print >> tex_out, get_font(uni)
print >> tex_out, DEC_HEADER2
print >> tex_out, DOC_HEADER_COMPARE.format(sys1=label_list[0], \
sys2=label_list[1])
# Print each alignment
for i in range(len(alignments)):
a1, a2 = alignments[i]
if rtl:
a1.rtl()
a2.rtl()
if not check_printable(a1, a2):
continue
print_align_table(tex_out, a1, a2)
# Print footer
print >> tex_out, DOC_FOOTER
# Close file
tex_out.close()
# Compile pdf file
print >> sys.stderr, \
'Compiling {0} - this may take a few minutes...'.format(pdf_file)
xelatex(tex_file, pdf_file, work_dir=pre_dir)
# Write N individual alignment files
else:
for i in range(len(align_files)):
# Out files
pdf_file = '{0}-align-{1}.pdf'.format(prefix, label_list[i].lower())
tex_file = 'align-{1}.tex'.format(prefix, i + 1)
# Read alignments
alignments = read_align_file(a[i], max_align)
seg_scores.append(extract_scores(alignments))
alignments.sort(cmp=cmp_score, reverse=True)
if no_align:
continue
# Write tex file
tex_out = open(shutil.os.path.join(pre_dir, tex_file), 'w')
# Header
print >> tex_out, DEC_HEADER1
print >> tex_out, get_font(uni)
print >> tex_out, DEC_HEADER2
print >> tex_out, DOC_HEADER_SINGLE.format(sysname=label_list[i])
# Print each alignment
for i in range(len(alignments)):
a1 = alignments[i]
if rtl:
a1.rtl()
if not check_printable(a1):
continue
print_align_table(tex_out, a1)
# Print footer
print >> tex_out, DOC_FOOTER
# Close file
tex_out.close()
# Compile pdf file
print >> sys.stderr, \
'Compiling {0} - this may take a few minutes...'.format(pdf_file)
xelatex(tex_file, pdf_file, work_dir=pre_dir)
#
# Graph scores
#
# All scores
for stat in ('score', 'frag', 'p', 'r'):
dat_file = '{0}-all.dat'.format(stat)
plot_file = '{0}-all.plot'.format(stat)
eps_file = '{0}-all.eps'.format(stat)
dists = []
for scores in seg_scores:
dists.append(get_score_dist([eval('x.' + stat) for x in scores]))
write_dat_file(shutil.os.path.join(pre_dir, dat_file), dists, stat, \
label_list)
write_plot_hist(pre_dir, dat_file, plot_file, eps_file, stat, \
len(dists))
gnuplot(pre_dir, plot_file)
# Scores by length
for stat in ('score', 'frag', 'p', 'r'):
for r in [[1, 10], [11, 25], [26, 50], [51]]:
if len(r) == 2:
label = '{0}-{1}'.format(r[0], r[1])
else:
label = '{0}+'.format(r[0])
dat_file = '{0}-{1}.dat'.format(stat, label)
plot_file = '{0}-{1}.plot'.format(stat, label)
eps_file = '{0}-{1}.eps'.format(stat, label)
dists = []
for scores in seg_scores:
if len(r) == 2:
values = [eval('x.' + stat) for x in scores if x.sen_len \
>= r[0] and x.sen_len <= r[1]]
else:
values = [eval('x.' + stat) for x in scores if x.sen_len \
>= r[0]]
dists.append(get_score_dist(values))
write_dat_file(shutil.os.path.join(pre_dir, dat_file), dists, \
stat, label_list)
write_plot_hist(pre_dir, dat_file, plot_file, eps_file, stat, \
len(dists))
gnuplot(pre_dir, plot_file)
# Write files
score_pdf = prefix + '-score.pdf'
score_tex = 'score.tex'
shutil.copyfile(shutil.os.path.join(shutil.os.path.dirname(__file__), \
'template', 'score.tex'), shutil.os.path.join(pre_dir, score_tex))
print >> sys.stderr, \
'Compiling {0}...'.format(score_pdf)
xelatex(score_tex, score_pdf, work_dir=pre_dir)
print >> sys.stderr, \
'Supporting files written to {0}.'.format(pre_dir)
if __name__ == '__main__' : main(sys.argv)
| 7,705 | 37.148515 | 98 |
py
|
blend
|
blend-master/tools/meteor-1.4/xray/visualize_alignments.py
|
#!/usr/bin/env python
import sys
from MeteorAlignment import *
from Generation import *
def main(argv):
if not check_xelatex():
sys.exit()
if len(argv[1:]) < 2:
print 'usage: {0} <align.out> <prefix> [max]'.format(argv[0])
print 'writes: <prefix>.pdf, <prefix>.tex'
print 'max determines max number of alignments to visualize'
sys.exit()
align_file = argv[1]
prefix = argv[2]
max_align = int(argv[3]) if len(argv[1:]) > 2 else -1
pdf_file = prefix + '.pdf'
tex_file = prefix + '.tex'
alignments = read_align_file(align_file, max_align=max_align, a_type=ALIGN_DEFAULT)
tex_out = open(tex_file, 'w')
print >> tex_out, DEC_HEADER1
print >> tex_out, get_font(True)
print >> tex_out, DEC_HEADER2
print >> tex_out, DOC_HEADER_ALIGN
for i in range(len(alignments)):
a = alignments[i]
if not check_printable(a):
continue
print_align_table(tex_out, a, a_type=ALIGN_DEFAULT)
# Print footer
print >> tex_out, DOC_FOOTER
# Close file
tex_out.close()
print >> sys.stderr, \
'Compiling {0} - this may take a few minutes...'.format(pdf_file)
xelatex(tex_file, pdf_file)
if __name__ == '__main__' : main(sys.argv)
| 1,274 | 26.12766 | 87 |
py
|
blend
|
blend-master/tools/meteor-1.5/mt-diff/mt-diff.py
|
#!/usr/bin/env python
import math, os, re, shutil, subprocess, sys, tempfile
# MT-Diff: measure changes in segment-level quality between two systems
# according to BLEU and Meteor
bleu_script = os.path.abspath(os.path.join(os.path.dirname(__file__), \
'files', 'mteval-v13m.pl'))
meteor_jar = os.path.abspath(os.path.join(os.path.dirname( \
os.path.dirname(__file__)), 'meteor-1.4.jar'))
langs = 'en cz de es fr ar other'
labels = [(-1.0 + 0.1 * i, -0.9 + 0.1 * i) for i in range(20)]
labels.insert(10, (0, 0))
def main(argv):
# Meteor jar check
if not os.path.exists(meteor_jar):
print 'Please edit the meteor_jar line of {0} to reflect the location of meteor-*.jar'.format(__file__)
sys.exit(1)
# Usage
if len(argv[1:]) < 4:
print 'usage: {0} <lang> <sys1.hyp> <sys2.hyp> <ref1> [ref2 ...]'. \
format(argv[0])
print 'langs: {0}'.format(langs)
sys.exit(1)
# Language
lang = argv[1]
if lang not in langs.split():
print 'langs: {0}'.format(langs)
sys.exit(1)
# Files
hyp1_file = argv[2]
hyp2_file = argv[3]
ref_files = argv[4:]
# Work directory
work_dir = tempfile.mkdtemp(prefix='mt-diff-')
# SGML Files
hyp1_sgm = os.path.join(work_dir, 'hyp1')
hyp2_sgm = os.path.join(work_dir, 'hyp2')
src_sgm = os.path.join(work_dir, 'src')
ref_sgm = os.path.join(work_dir, 'ref')
# Hyp1
write_sgm(hyp1_file, hyp1_sgm, \
'<tstset trglang="any" setid="any" srclang="any">', '</tstset>')
# Hyp2
write_sgm(hyp2_file, hyp2_sgm, \
'<tstset trglang="any" setid="any" srclang="any">', '</tstset>')
# Src (ref1)
ref_len = write_sgm(ref_files[0], src_sgm, \
'<srcset trglang="any" setid="any" srclang="any">', '</srcset>')
# Ref (all refs)
write_ref_sgm(ref_files, ref_sgm, \
'<refset trglang="any" setid="any" srclang="any">', '</refset>')
# BLEU
print 'BLEU scoring hyp1...'
bleu1, bs1 = bleu(hyp1_sgm, ref_sgm, src_sgm, work_dir)
print 'BLEU scoring hyp2...'
bleu2, bs2 = bleu(hyp2_sgm, ref_sgm, src_sgm, work_dir)
bleu_diff = diff_scr(bleu1, bleu2)
bleu_dd = diff_dist(bleu_diff)
# Meteor
print 'Meteor scoring hyp1...'
meteor1, ms1 = meteor(hyp1_sgm, ref_sgm, lang, work_dir)
print 'Meteor scoring hyp2...'
meteor2, ms2 = meteor(hyp2_sgm, ref_sgm, lang, work_dir)
meteor_diff = diff_scr(meteor1, meteor2)
meteor_dd = diff_dist(meteor_diff)
# Header
print ''
print '+---------------------------------+'
print '| Segment Level Difference |'
print '+-------------+--------+----------+'
print '| Change | BLEU | Meteor |'
print '+-------------+--------+----------+'
# Scores
for (l, b, m) in zip(labels, bleu_dd, meteor_dd):
if l == (0, 0):
print '| 0.0 | {2:6} | {3:6} |'.format(l[0], l[1], b, m)
else:
print '| {0:4} - {1:4} | {2:6} | {3:6} |'.format(l[0], l[1], b, m)
# Footer
print '+-------------+--------+----------+'
print '| System2 + | {0:6} | {1:6} |'. \
format(sum(bleu_dd[11:]), sum(meteor_dd[11:]))
print '| System2 - | {0:6} | {1:6} |'. \
format(sum(bleu_dd[0:10]), sum(meteor_dd[0:10]))
print '+-------------+--------+----------+'
print '| # Segments | {0:6} |'.format(ref_len)
print '+-------------+-------------------+'
print '| System Level Score |'
print '+-------------+-------------------+'
print '| System1 | {0:0.4f} | {1:0.4f} |'.format(bs1, ms1)
print '| System2 | {0:0.4f} | {1:0.4f} |'.format(bs2, ms2)
print '+-------------+--------+----------+'
# Cleanup
shutil.rmtree(work_dir)
def bleu(hyp, ref, src, work_dir=os.curdir):
# Run BLEU
bleu_cmd = ['perl', bleu_script, '-t', hyp, '-r', ref, '-s', src, '-b', \
'--metricsMATR', '--no-norm']
subprocess.Popen(bleu_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \
cwd=work_dir).wait()
# Get scores from file
seg = {}
scr = open(os.path.join(work_dir, 'BLEU-seg.scr'))
for line in scr:
part = line.strip().split()
seg['{0}:{1}'.format(part[2], part[3])] = float(part[4])
scr.close()
scr = open(os.path.join(work_dir, 'BLEU-sys.scr'))
sys_s = float(scr.readline().split()[-1])
scr.close()
return (seg, sys_s)
def meteor(hyp, ref, lang='en', work_dir=os.curdir):
# Run Meteor
meteor_cmd = ['java', '-Xmx2G', '-jar', meteor_jar, hyp, ref, '-sgml', \
'-l', lang]
subprocess.Popen(meteor_cmd, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, cwd=work_dir).wait()
# Get scores from file
seg = {}
scr = open(os.path.join(work_dir, 'meteor-seg.scr'))
for line in scr:
part = line.strip().split()
seg['{0}:{1}'.format(part[2], part[3])] = float(part[4])
scr.close()
scr = open(os.path.join(work_dir, 'meteor-sys.scr'))
sys_s = float(scr.readline().split()[-1])
scr.close()
return (seg, sys_s)
def diff_scr(scr1, scr2):
diff = []
for key in scr1.keys():
diff.append(scr2[key] - scr1[key])
return diff
def diff_dist(diff):
step = 0.1
dist = [0] * 20
zero = 0
for d in diff:
if d == 0:
zero +=1
else:
dist[min(19, int(10 + d * 10))] += 1
dist.insert(10, zero)
return dist
def write_sgm(in_file, out_sgm, header, footer):
file_in = open(in_file)
file_out = open(out_sgm, 'w')
print >> file_out, header
print >> file_out, '<doc sysid="any" docid="any">'
i = 0
for line in file_in:
i += 1
print >> file_out, '<seg id="{0}"> {1} </seg>'.format(i, line.strip())
print >> file_out, '</doc>'
print >> file_out, footer
file_in.close()
file_out.close()
return i
def write_ref_sgm(in_files, out_sgm, header, footer):
file_out = open(out_sgm, 'w')
print >> file_out, header
sys_id = 0
for in_file in in_files:
sys_id += 1
file_in = open(in_file)
print >> file_out, '<doc sysid="{0}" docid="any">'.format(sys_id)
i = 0
for line in file_in:
i += 1
print >> file_out, '<seg id="{0}"> {1} </seg>'. \
format(i, line.strip())
print >> file_out, '</doc>'
file_in.close()
print >> file_out, footer
file_out.close()
if __name__ == '__main__' : main(sys.argv)
| 6,507 | 31.217822 | 111 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/build_wordnet_files.py
|
#!/usr/bin/env python
import os, sys
# Set for WordNet3
excFiles = ["adj.exc", "adv.exc", "noun.exc", "verb.exc"]
senseFile = "index.sense"
nounFile = "data.noun"
verbFile = "data.verb"
adjFile = "data.adj"
nounRelations = ["@", "@i", "~", "~i"] # Hypernym (instance), Hyponym (instance)
verbRelations = ["@", "~", "*"] # Hypernym, Hyponym, Entailment
adjRelations = ["\\"] # Pertainym
def main(argv):
if len(argv) < 3:
print "Build synonym files from WordNet"
print "usage:", argv[0], "<wordnetDictDir>", "<outDir>", "[language]"
print "example:", os.path.basename(argv[0]), \
"/usr/local/WordNet-3.0/dict", "synonyms"
sys.exit(1)
wnDir = argv[1]
outDir = argv[2]
lang = "english"
if len(argv) > 3 : lang = argv[3]
# Create exceptions file
exc = {} # exc[word] = formList
for excFile in excFiles:
inExc = open(os.path.join(wnDir, excFile), "r")
while True:
line = inExc.readline()
if not line : break
words = line.split()
form = words[0]
for i in range(1, len(words)):
word = words[i]
if word not in exc.keys():
exc[word] = []
exc[word].append(form)
inExc.close()
outExc = open(os.path.join(outDir, lang + ".exceptions"), "w")
for word in sorted(exc.keys()):
outExc.write(word + "\n")
formLine = ""
for form in exc[word]:
formLine += form + " "
outExc.write(formLine.strip() + "\n")
outExc.close()
# Create Synsets file
# For reasonable runtime, this assumes that different senses of the same
# word are on sequential lines. If this is not the case, change the synonym
# file to point to a sorted version (any consistent sorting method).
inSyn = open(os.path.join(wnDir, senseFile), "r")
outSyn = open(os.path.join(outDir, lang + ".synsets"), "w")
curWord = ""
synSets = ""
while True:
line = inSyn.readline()
if not line : break
terms = line.split()
word = terms[0].split("%")[0]
synSet = terms[1]
if word != curWord:
if curWord != "":
outSyn.write(curWord + "\n")
outSyn.write(synSets.strip() + "\n")
curWord = word
synSets = ""
synSets += synSet + " "
outSyn.write(curWord + "\n")
outSyn.write(synSets.strip() + "\n")
inSyn.close()
outSyn.close()
# Create Relations (Hypernymy, Hypnonymy, Entailment) file
outRel = open(os.path.join(outDir, lang + ".relations"), "w")
scanData(os.path.join(wnDir, nounFile), nounRelations, outRel)
scanData(os.path.join(wnDir, verbFile), verbRelations, outRel)
scanData(os.path.join(wnDir, nounFile), adjRelations, outRel)
outRel.close()
# Scan a data file and write extras to output stream
def scanData(fileName, pointerList, outStream):
inData = open(fileName, "r")
while True:
line = inData.readline()
if not line : break
if line.startswith(" "):
continue
terms = line.split()
synSet = terms[0]
extraLine = ""
i = 7
while i < len(terms):
if terms[i] == "|":
break
if terms[i] in pointerList:
extraLine += terms[i + 1] + " "
i += 3
i += 1
if (extraLine != ""):
outStream.write(synSet + "\n")
outStream.write(extraLine.strip() + "\n")
inData.close()
if __name__ == "__main__" : main(sys.argv)
| 3,141 | 24.136 | 80 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/wmt_bleu.py
|
#!/usr/bin/env python
# Score with BLEU, produce WMT score files
import os, subprocess, sys
def main(argv):
if len(argv[1:]) < 5:
print 'usage: {0} <test> <ref> <lang-pair> <test-set> <system>'.format(
argv[0])
print 'writes bleu.lang-pair.test-set.system.{seg.scr,sys.scr}'
sys.exit(1)
BLEU = [os.path.join(os.path.dirname(__file__), 'bleu.py')]
tst = argv[1]
ref = argv[2]
lp = argv[3]
ts = argv[4]
s = argv[5]
seg_f = 'bleu.{}.{}.{}.seg.scr'.format(lp, ts, s)
sys_f = 'bleu.{}.{}.{}.sys.scr'.format(lp, ts, s)
stop = False
if os.path.exists(seg_f):
print 'exists: {}'.format(seg_f)
stop = True
if os.path.exists(sys_f):
print 'exists: {}'.format(sys_f)
stop = True
if stop:
sys.exit(1)
seg_o = open(seg_f, 'w')
sys_o = open(sys_f, 'w')
BLEU.append(tst)
BLEU.append(ref)
p = subprocess.Popen(BLEU, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
i = 0
while True:
line = p.stderr.readline()
i += 1
if not line:
break
print >> seg_o, 'bleu\t{}\t{}\t{}\t{}\t{}'.format(lp, ts, s, i, line.strip())
line = p.stdout.readline()
print >> sys_o, 'bleu\t{}\t{}\t{}\t{}'.format(lp, ts, s, line.strip())
seg_o.close()
sys_o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,420 | 24.375 | 85 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/combine_segcor_trainset.py
|
#!/usr/bin/env python
import os, shutil, sys
def main(argv):
if len(argv) < 3:
print "Create a single Meteor training set from HTER test sets"
print "usage:", argv[0], "<outDir> <hterDir1> [hterDir2] ..."
sys.exit(1)
outDir = argv[1]
hterDirs = argv[2:]
if os.path.exists(outDir):
print "File", outDir, "exists, aborting to avoid overwriting files"
sys.exit(1)
os.mkdir(outDir)
for hterDir in hterDirs:
base = os.path.basename(hterDir)
shutil.copy(os.path.join(hterDir, "tst.sgm"), os.path.join(outDir, \
base + ".tst"))
shutil.copy(os.path.join(hterDir, "ref.sgm"), os.path.join(outDir, \
base + ".ref"))
shutil.copy(os.path.join(hterDir, "ter.seg"), os.path.join(outDir, \
base + ".ter"))
if __name__ == "__main__" : main(sys.argv)
| 780 | 24.193548 | 70 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/sgmlize.py
|
#!/usr/bin/env python
# Convert to and from SGML easily. There exist many SGML/XML standards
# for MT evaulation. This script produces files in a format compatible
# with meteor-*.jar, mteval-v*.pl, and tercom.*.jar
import codecs, re, sys
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
def main(argv):
if len(argv[1:]) < 1:
print 'SGMLize'
print 'Do you want a [s]rc, [t]est, [r]eference, or [p]laintext?'
print 'usage: {0} (s|t|r|p) < <textfile>'.format(argv[0])
print 'ex: {0} t < sys-output.txt > sys-output.sgm'.format(argv[0])
sys.exit(1)
t_type = argv[1]
if t_type not in ['s', 't', 'r', 'p']:
print 'usage: {0} (s|t|r|p) < <textfile>'.format(argv[0])
sys.exit(1)
if t_type == 'p':
while True:
line = sys.stdin.readline()
if not line:
break
r = re.search(u'<seg[^>]+>\s*(.*\S)\s*<.seg>', line, re.I)
if r:
print unescape(r.group(1))
return
tag = 'srcset' if t_type == 's' else 'tstset' if t_type == 't' else 'refset'
seg = 0
print u'<{0} trglang="any" setid="any" srclang="any">'.format(tag)
print u'<doc docid="any" sysid="sys">'
while True:
line = sys.stdin.readline()
if not line:
break
seg += 1
print u'<seg id="{0}"> {1} </seg>'.format(seg, escape(line.strip()))
print u'</doc>'
print u'</{0}>'.format(tag)
def escape(s):
return s.replace('&', '&').replace('"', '"').replace('\'', '''). \
replace('<', '<').replace('>', '>')
def unescape(s):
return s.replace('"', '"').replace(''', '\'').replace('<', '<'). \
replace('>', '>').replace('&', '&')
if __name__ == '__main__': main(sys.argv)
| 1,881 | 30.366667 | 84 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/unroll_wmt_ranks.py
|
#!/usr/bin/env python
import sys
LANGS = {
'cs': 'Czech',
'en': 'English',
'es': 'Spanish',
'de': 'German',
'fr': 'French',
'hi': 'Hindi',
'ru': 'Russian',
}
N_SYSTEMS = 5
def main(argv):
if len(argv[1:]) != 1:
sys.stderr.write('usage: {} fr-en <wmt-data.csv >fr-en.rank\n'.format(argv[0]))
sys.exit(2)
# Language pair
lp = argv[1]
(l1, l2) = (LANGS[l] for l in lp.split('-'))
# Read header
names = dict((k, v) for (v, k) in enumerate(sys.stdin.readline().strip().split(',')))
src = names['srclang']
tgt = names['trglang']
idx = names['srcIndex']
systems = [names['system{}Id'.format(i)] for i in range(1, N_SYSTEMS + 1)]
ranks = [names['system{}rank'.format(i)] for i in range(1, N_SYSTEMS + 1)]
# Find matching lines
for line in sys.stdin:
fields = line.strip().split(',')
if fields[src] != l1 or fields[tgt] != l2:
continue
ranked = []
for (system, rank) in zip(systems, ranks):
s = fields[system]
r = int(fields[rank])
# Skip blank
if r != -1:
ranked.append((r, s))
# Sort by rank, lowest to highest
ranked.sort()
# Unroll to binary judgments
for i in range(len(ranked)):
for j in range(i + 1, len(ranked)):
# Skip ties
if ranked[i][0] < ranked[j][0]:
sys.stdout.write('{id}\t{lp}\t{sys1}\t{lp}\t{sys2}\n'.format(id=fields[idx], lp=lp, sys1=ranked[i][1], sys2=ranked[j][1]))
if __name__ == '__main__':
main(sys.argv)
| 1,679 | 27.474576 | 142 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/delete_stray_matches.py
|
#!/usr/bin/env python
import re, sys
DEFAULT_DIST = 0.0
DEFAULT_LEN = 0
def main(argv):
# Directions
if len(argv[1:]) < 1:
sys.stderr.write('Using defaults - for help, use {0} -h\n'.format(argv[0]))
min_dist = DEFAULT_DIST
min_len = DEFAULT_LEN
words = []
# help or min distance
if len(argv[1:]) > 0:
if argv[1] in '--help':
print 'Delete single matches to improve monotonicity of alignments'
print ''
print 'usage:', argv[0], 'min_rel_dist', 'min_seg_len', \
'word_list', '<', 'matcher.out', '>', 'matcher.out.mon'
print ''
print 'min_rel_dist - minimum relative distance for deletion' + \
'(default = X)'
print 'min_seg_len - minimum segment length (reference) to' + \
'consider (default = X)'
print 'word_list - file of words, one per line, to consider' + \
'for deletion (default = all words)'
sys.exit()
else:
min_dist = float(argv[1])
# min length
if len(argv[1:]) > 1:
min_len = int(argv[2])
# word list
if len(argv[1:]) > 2:
words_in = open(argv[3])
for line in words_in:
words.append(line.strip().split()[0])
words_in.close()
# Read alignments
while True:
# Next line should be 'Alignment...'
line = sys.stdin.readline()
# End of file
if not line:
break
if not line.startswith('Alignment'):
print 'Error: file does not start with Alignment line'
print 'Please use exact output of Matcher'
sys.exit(1)
print line,
sen1 = sys.stdin.readline()
words1 = sen1.split()
print sen1,
sen2 = sys.stdin.readline()
words2 = sen2.split()
print sen2,
print sys.stdin.readline(),
# Read matches
match_words2 = []
match_words1 = []
match_start2 = []
match_start1 = []
match_len2 = []
match_len1 = []
mods = []
scores = []
while True:
line = sys.stdin.readline()
if not line.strip():
break
m2, m1, mod, score = line.split()
m2_s, m2_l = map(int, m2.split(':'))
match_start2.append(m2_s)
match_len2.append(m2_l)
match_words2.append(words2[m2_s : m2_s + m2_l])
m1_s, m1_l = map(int, m1.split(':'))
match_start1.append(m1_s)
match_len1.append(m1_l)
match_words1.append(words1[m1_s : m1_s + m1_l])
mods.append(mod)
scores.append(score)
# For sentences minimum length or above that have more than one match
if len(words2) >= min_len and len(mods) > 1:
# Look for stray matches
for i in range(len(mods)):
# Phrase matches safe
if match_len1[i] > 1 or match_len2[i] > 1:
continue
# Words not on list safe
if words:
if words2[match_start2[i]] not in words \
and words1[match_start1[i]] not in words:
continue
# Distance from monotonicity with previous match
if i == 0:
dist_prev = 0
else:
dist_prev = abs((match_start1[i] - match_start1[i - 1]) \
- (match_start2[i] - match_start2[i - 1]))
# Distance from monotonicity with next match
if i == len(mods) - 1:
dist_next = 0
else:
dist_next = abs((match_start1[i + 1] - match_start1[i]) \
- (match_start2[i + 1] - match_start2[i]))
# Anchored matches safe
if i != 0 and dist_next == 0:
continue
if i != len(mods) - 1 and dist_prev == 0:
continue
# Total jump distance
dist = min(dist_prev, dist_next)
# Delete if exceeds threshold
if float(dist) / len(words2) >= min_dist:
mods[i] = -1 # dist / len(words2)
# Write new match lines
for i in range(len(mods)):
print '{0}:{1}\t\t\t{2}:{3}\t\t\t{4}\t\t{5}'.format( \
match_start2[i], match_len2[i], match_start1[i], match_len1[i], \
mods[i], scores[i])
print ''
if __name__ == '__main__' : main(sys.argv)
| 4,642 | 33.909774 | 83 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/agg.py
|
#!/usr/bin/env python
# Aggregate: sum input lines by column. Useful for aggregating
# MeteorStats lines as a MERT implementation would.
from sys import argv, exit, stdin
parse = int
if len(argv) > 1:
if argv[1].startswith('-h'):
print 'usage: agg [-f] FILE'
exit()
if argv[1] == '-f':
parse = float
else:
stdin = open(argv[1], 'r')
if len(argv) > 2:
stdin = open(argv[2], 'r')
agg = None
while True:
line = stdin.readline()
if not line:
break
f = line.split()
if agg == None:
agg = [0] * len(f)
if len(f) != len(agg):
print 'error: number of columns not constant'
exit(1)
for i in range(len(agg)):
agg[i] += parse(f[i])
if agg:
print ' '.join([str(x) for x in agg])
stdin.close()
| 814 | 19.375 | 63 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/freq.py
|
#!/usr/bin/env python
# Simple word relative frequency counter. Used to create
# function word lists.
from sys import stdin, argv
freq = {}
total = 0
if argv[1:]:
stdin = open(argv[1], 'r')
while True:
line = stdin.readline()
if not line:
break
f = line.split()
for w in f:
freq[w] = 1 if w not in freq else freq[w] + 1
total += 1
for w in sorted(freq, cmp=lambda x,y: freq[y] - freq[x]):
print w, float(freq[w]) / total
| 476 | 18.08 | 57 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/ter.py
|
#!/usr/bin/env python
import os, subprocess, shutil, sys, tempfile
TERCOM = os.path.join(os.path.dirname(__file__), 'tercom-0.8.0.jar')
def main(argv):
if len(argv[1:]) < 2:
print >> sys.stderr, 'Usage: {0} hyps refs [--no-norm] [--char]'.format(argv[0])
print >> sys.stderr, 'Segment scores to stderr, final to stdout'
sys.exit(1)
norm = '-s' if '--no-norm' in argv[3:] else '-N'
char = '--char' in argv[3:]
work = tempfile.mkdtemp(prefix='ter.')
hyps = os.path.join(work, 'hyps')
mktrans(argv[1], hyps, char)
refs = os.path.join(work, 'refs')
mktrans(argv[2], refs, char)
out = open(os.path.join(work, 'out'), 'w')
err = open(os.path.join(work, 'err'), 'w')
tab = os.path.join(work, 'ter')
p = subprocess.Popen(['java', '-jar', TERCOM, '-h', hyps, '-r', refs, '-o',
'sum', '-n', tab, norm], stdout=out, stderr=err)
p.wait()
out.close()
err.close()
t = open(tab + '.sum')
while True:
line = t.readline()
if line.startswith('Sent Id'):
t.readline()
break
while True:
line = t.readline()
if line.startswith('---'):
break
print >> sys.stderr, line.split()[-1]
print t.readline().split()[-1]
shutil.rmtree(work)
def mktrans(f, tmp, char=False):
o = open(tmp, 'w')
i = 0
for line in open(f, 'r'):
i += 1
if char:
line = ' '.join([ch for ch in line if ch != ' '])
print >> o, '{0} ({1})'.format(line.strip(), i)
o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,644 | 26.416667 | 88 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/rankconsist.py
|
#!/usr/bin/env python
import collections
import sys
def main(argv):
if len(argv[1:]) != 2:
sys.stderr.write('usage: {} scr rank\n'.format(argv[0]))
sys.exit(2)
scr = collections.defaultdict(lambda: collections.defaultdict(dict))
for line in open(argv[1]):
# Meteor en-ru newstest2013 balagur.2693 5 0.373206146468917
(metric, lp, testset, system, id, score) = line.strip().split()
system = system.split('.')[0]
score = float(score)
scr[lp][system][id] = score
conc = 0
disc = 0
for line in open(argv[2]):
# 1018 cs-en cu-bojar cs-en jhu-heiro
(id, lp1, sys1, lp2, sys2) = line.strip().split()
if scr[lp1][sys1][id] > scr[lp2][sys2][id]:
conc += 1
else:
disc += 1
tau = float(conc - disc) / (conc + disc)
sys.stderr.write('Tau: {} ({}/{})\n'.format(tau, conc, conc + disc))
if __name__ == '__main__':
main(sys.argv)
| 976 | 25.405405 | 72 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/meteor_shower.py
|
#!/usr/bin/env python
# Learn Meteor parameters quickly with up to 42 Meteors
# Run as many as requeted in parallel
# Meteors use 1 cpu / 2gb each
import collections, os, subprocess, sys, threading
def main(argv):
if len(argv[1:]) < 7:
print >> sys.stderr, 'Learn Meteor parameters efficiently with parallel Trainers'
print >> sys.stderr, 'Usage: {0} <meteor.jar> <lang> <n-mods> <task> <data_dir> <work_dir> <n-jobs> [other args like -a par.gz, -ch, ...]'.format(argv[0])
sys.exit(1)
# Args
meteor_jar = os.path.abspath(argv[1])
lang = argv[2]
n_mods = int(argv[3])
task = argv[4]
data_dir = os.path.abspath(argv[5])
work_dir = os.path.abspath(argv[6])
n_jobs = int(argv[7])
sb_dir = os.path.join(work_dir, 'sandbox')
other_args = argv[8:]
# Working dir
if os.path.exists(work_dir):
print 'Work dir {0} exists, exiting'.format(work_dir)
sys.exit(1)
os.mkdir(work_dir)
os.mkdir(sb_dir)
# Weight ranges for jobs based on mod count
w_start_list = [1, 0, 0, 0]
w_end_list = [1, 0, 0, 0]
for i in range(n_mods):
w_end_list[i] = 1
w_start = ''
w_end = ''
for i in range(4):
w_start += str(w_start_list[i]) + ' '
w_end += str(w_end_list[i]) + ' '
w_start = w_start.strip()
w_end = w_end.strip()
# Step is always the same
step = '0.05 0.10 0.05 0.05 1.0 0.2 0.2 0.2'
# Queue Trainer commands
queue = collections.deque([])
for i in range(42):
sb_sub_dir = os.path.join(sb_dir, '{0}'.format(i + 1))
os.mkdir(sb_sub_dir)
out_file = os.path.join(work_dir, 'output.{0}'.format(i + 1))
a = 0.05 * (i / 2)
(g_min, g_max) = (0, 0.5) if (i % 2 == 0) else (0.55, 1.0)
start = '{0} 0 {1} 0 {2}'.format(a, g_min, w_start)
end = '{0} 2.5 {1} 1.0 {2}'.format(a, g_max, w_end)
# Retry in case of filesystem failure
trainer_cmd = 'cd {sd} && while true ; do sleep 1 ; java -Xmx1G -cp {0} Trainer {1} {2} -l {3} -i \'{4}\' -f \'{5}\' -s \'{6}\' {args} > {7} ; if [ "$?" = "0" ] ; then break ; fi ; done'.format(meteor_jar, task, data_dir, lang, start, end, step, out_file, sd=sb_sub_dir, args=' '.join(other_args))
queue.append(trainer_cmd)
# Run Trainers
for i in range(n_jobs):
queue.append(-1)
threads = []
for i in range(n_jobs):
t = threading.Thread(target=run, args=(queue,))
threads.append(t)
t.start()
for t in threads:
t.join()
# Sort output
sort_cmd = 'cat {0}/output.* |sort -g -S4G --parallel={1} >{0}/output.sort'.format(work_dir, n_jobs)
subprocess.call(sort_cmd, shell=True)
# Run commands until end of queue
def run(queue):
while True:
cmd = queue.popleft()
if cmd == -1:
return
subprocess.call(cmd, shell=True)
if __name__ == '__main__' : main(sys.argv)
| 2,952 | 32.556818 | 305 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/meteorToMosesAlign.py
|
#!/usr/bin/env python
# Author: Austin Matthews
import sys
for Line in sys.stdin:
Line = Line.strip()
if Line.startswith( "Alignment" ):
sys.stdin.next() # Hyp
sys.stdin.next() # Ref
sys.stdin.next() # Table header
Alignments = []
for Line in sys.stdin:
Line = Line.strip()
if not Line:
break
HypPair, RefPair, Module, Score = Line.split()
if Module == "-1":
continue
HypIndex, HypLength = HypPair.split( ":" )
RefIndex, RefLength = RefPair.split( ":" )
HypIndex = int( HypIndex )
RefIndex = int( RefIndex )
HypLength = int( HypLength )
RefLength = int( RefLength )
for i in range( HypIndex, HypIndex + HypLength ):
for j in range( RefIndex, RefIndex + RefLength ):
Alignments.append( ( j, i ) )
print " ".join( [ "%d-%d" % Pair for Pair in sorted(Alignments) ] )
| 834 | 23.558824 | 69 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/wmt_ter.py
|
#!/usr/bin/env python
import os, subprocess, sys
def main(argv):
if len(argv[1:]) < 5:
print 'usage: {0} <test> <ref> <lang-pair> <test-set> <system>'.format(
argv[0])
print 'writes ter.lang-pair.test-set.system.{seg.scr,sys.scr}'
sys.exit(1)
BLEU = [os.path.join(os.path.dirname(__file__), 'ter.py')]
tst = argv[1]
ref = argv[2]
lp = argv[3]
ts = argv[4]
s = argv[5]
seg_f = 'ter.{}.{}.{}.seg.scr'.format(lp, ts, s)
sys_f = 'ter.{}.{}.{}.sys.scr'.format(lp, ts, s)
stop = False
if os.path.exists(seg_f):
print 'exists: {}'.format(seg_f)
stop = True
if os.path.exists(sys_f):
print 'exists: {}'.format(sys_f)
stop = True
if stop:
sys.exit(1)
seg_o = open(seg_f, 'w')
sys_o = open(sys_f, 'w')
BLEU.append(tst)
BLEU.append(ref)
p = subprocess.Popen(BLEU, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
i = 0
while True:
line = p.stderr.readline()
i += 1
if not line:
break
print >> seg_o, 'ter\t{}\t{}\t{}\t{}\t{}'.format(lp, ts, s, i, line.strip())
line = p.stdout.readline()
print >> sys_o, 'ter\t{}\t{}\t{}\t{}'.format(lp, ts, s, line.strip())
seg_o.close()
sys_o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,370 | 24.388889 | 84 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/bleu.py
|
#!/usr/bin/env python
import os, shutil, subprocess, sys, tempfile
mteval_pl = os.path.abspath(os.path.join(os.path.dirname(__file__), 'mteval-v13m.pl'))
def main(argv):
if len(argv[1:]) < 2:
print >> sys.stderr, 'Usage: {0} <hyp> <ref> [--no-norm] [--char]'.format(argv[0])
print >> sys.stderr, 'Segment scores to stderr, final to stdout'
sys.exit(1)
work = tempfile.mkdtemp(prefix='bleu.')
src_sgm = os.path.join(work, 'src.sgm')
tst_sgm = os.path.join(work, 'tst.sgm')
ref_sgm = os.path.join(work, 'ref.sgm')
opts = argv[3:]
cmd = ['perl', mteval_pl, '-s', src_sgm, '-t', tst_sgm, '-r', ref_sgm, '--metricsMATR']
out = open(os.path.join(work, 'out'), 'w')
err = open(os.path.join(work, 'err'), 'w')
char = False
for opt in opts:
if opt == '--char':
char = True
else:
cmd.append(opt)
sgm(argv[2], src_sgm, 'srcset', char)
sgm(argv[1], tst_sgm, 'tstset', char)
sgm(argv[2], ref_sgm, 'refset', char)
subprocess.Popen(cmd, stdout=out, stderr=err, cwd=work).wait()
for line in open(os.path.join(work, 'BLEU-seg.scr')):
print >> sys.stderr, line.split()[-1]
print open(os.path.join(work, 'BLEU-sys.scr')).readline().split()[-1]
shutil.rmtree(work)
def sgm(f_in, f_out, f_type, char=False):
i = open(f_in, 'r')
o = open(f_out, 'w')
s = 0
print >> o, '<{0} trglang="trg" setid="set" srclang="src">'.format(f_type)
print >> o, '<doc docid="doc" sysid="sys">'
for line in i:
# Strip invalid utf-8
line = line.decode('utf-8', errors='ignore').encode('utf-8')
s += 1
if char:
line = ' '.join([ch for ch in line if ch != ' '])
print >> o, '<seg id="{0}"> {1} </seg>'.format(s, line.strip())
print >> o, '</doc>'
print >> o, '</{0}>'.format(f_type)
i.close()
o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,979 | 30.428571 | 91 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/filter_merge_rank_set.py
|
#!/usr/bin/env python
# Filter and merge multiple rank training sets into a single set. Segments are
# relabeled by original data set and renumbered after filtering. The resulting
# combined set contains only segments for which rank judgments exist.
# Filtering is highly recommended even for single sets to greatly reduce
# training time.
import os, re, sys
def main(argv):
if len(argv[1:]) < 2:
print 'usage: {0} <clean-dir> <file1.rank> [file2.rank ...]'.format(argv[0])
print 'Rank files should have same basename (src-tgt.rank)'
print 'Original test sets identified with ~n'
exit(1)
clean_dir = argv[1]
r_files = argv[2:]
if clean_dir == os.path.dirname(os.path.abspath(r_files[0])):
print 'This is a bad idea. Please specify a different clean-dir.'
sys.exit(1)
# Single rank file
r_out = open(os.path.join(clean_dir, os.path.basename(r_files[0])),
'w')
r_n = 0
id = 0
# For each rank file
for r_file in r_files:
r_n += 1
# Renumber segments in rank file, keep order
seg = {}
r_in = open(r_file, 'r')
for line in r_in:
f = line.split()
if f[0] not in seg:
id += 1
seg[f[0]] = id
# Append rank set numbers to system names
print >> r_out, '{0}\t{1}\t{2}\t{3}\t{4}'.format(seg[f[0]],
append_n(f[1], r_n), f[2], append_n(f[3], r_n), f[4])
r_in.close()
r_base = os.path.basename(os.path.abspath(r_file))
prefix = r_base[0:r_base.find('.')]
f_dir = os.path.dirname(os.path.abspath(r_file))
# Filter and renumber segments in system outputs and ref file
for sgm_file in os.listdir(f_dir):
if not (sgm_file.startswith(prefix) and sgm_file.endswith('.sgm')):
continue
sgm_in = open(os.path.join(f_dir, sgm_file), 'r')
# Append rank set numbers to system names
sgm_out = open(os.path.join(clean_dir,
append_n(sgm_file, r_n)), 'w')
for line in sgm_in:
r = re.search(u'^<seg id="([0-9]+)">', line, re.I)
if not r:
print >> sgm_out, line.strip()
continue
if r.group(1) in seg:
print >> sgm_out, re.sub(u'^<seg id="[0-9]+">',
'<seg id="{0}">'.format(seg[r.group(1)]), line).strip()
sgm_in.close()
sgm_out.close()
# Finished writing rank file
r_out.close()
# Append set number to appropriate location
def append_n(s, n):
i = s.find('.')
if i == -1:
i = len(s)
return '{0}~{1}{2}'.format(s[0:i], n, s[i:])
if __name__ == '__main__' : main(sys.argv)
| 2,869 | 32.372093 | 84 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/tc_train_set.py
|
#!/usr/bin/env python
import os
import sys
ID = 'ID'
MT = 'MT'
SCORES = ('HTER', 'Rating')
NORM_SCORES = ('Keypress', 'Edits', 'Time')
PAUSE_SCORES = ('APR', 'PWR')
class DataSet:
def __init__(self, dir):
os.mkdir(dir)
self.tst = open(os.path.join(dir, 'corpus.tst'), 'w')
self.ref = open(os.path.join(dir, 'corpus.ref'), 'w')
self.ter = open(os.path.join(dir, 'corpus.ter'), 'w')
self.tst.write('<tstset trglang="any" setid="any" srclang="any">\n<doc docid="any" sysid="sys">\n')
self.ref.write('<refset trglang="any" setid="any" srclang="any">\n<doc docid="any" sysid="sys">\n')
self.i = 0
def add(self, hyp, ref, score):
self.i += 1
self.tst.write('<seg id="{}"> {} </seg>\n'.format(self.i, hyp))
self.ref.write('<seg id="{}"> {} </seg>\n'.format(self.i, ref))
self.ter.write('any {} {}\n'.format(self.i, score))
def close(self):
self.tst.write('</doc>\n</tstset>\n')
self.ref.write('</doc>\n</tstset>\n')
self.tst.close()
self.ref.close()
self.ter.close()
def main():
if len(sys.argv[1:]) < 3:
sys.stderr.write('Create Meteor training sets from TransCenter reports and pre-generated (standard) references\n')
sys.stderr.write('usage: {} [out-dir] report-dir1 ref1 [report-dir2 ref2 ...]\n'.format(sys.argv[0]))
sys.exit(2)
out_dir = os.path.abspath(sys.argv[1])
if os.path.exists(out_dir):
sys.stderr.write('{} exists, exiting.\n'.format(out_dir))
sys.exit(1)
os.mkdir(out_dir)
# Open streams
hyps = open(os.path.join(out_dir, 'corpus.hyps'), 'w')
refs = open(os.path.join(out_dir, 'corpus.refs'), 'w')
data = {}
for label in SCORES + NORM_SCORES + PAUSE_SCORES:
data[label] = DataSet(os.path.join(out_dir, label))
# Scan input directories
dirs = [sys.argv[i] for i in range(2, len(sys.argv), 2)]
ref_files = [sys.argv[i] for i in range(3, len(sys.argv), 2)]
for (dir, rf) in zip((os.path.abspath(dir) for dir in dirs), (os.path.abspath(rf) for rf in ref_files)):
sys.stderr.write('{} ({})\n'.format(dir, rf))
ref_lines = [line.strip() for line in open(rf)]
ref_lens = [len(line.split()) for line in ref_lines]
mt_lines = []
for f in os.listdir(dir):
# Find everything else from summary files
if f.startswith('summary.') and f.endswith('.csv'):
user = f[len('summary.'):len(f)-len('.csv')]
csv = open(os.path.join(dir, f))
sys.stderr.write('+ {}\n'.format(user))
headers = dict((y, x) for (x, y) in enumerate(csv.readline().strip().split('\t')))
for line in csv:
fields = line.strip().split('\t')
id = int(fields[headers[ID]])
mt = fields[headers[MT]]
# Keep for other files
mt_lines.append(mt)
# Add to master list
hyps.write('{}\n'.format(mt))
refs.write('{}\n'.format(ref_lines[id-1]))
# Add raw scores to data
for label in SCORES:
data[label].add(mt, ref_lines[id-1], fields[headers[label]])
# Length-norm scores
for label in NORM_SCORES:
data[label].add(mt, ref_lines[id-1], float(fields[headers[label]]) / ref_lens[id-1])
# Corresponding pause file
csv = open(os.path.join(dir, 'pause.{}.csv'.format(user)))
headers = dict((y, x) for (x, y) in enumerate(csv.readline().strip().split('\t')))
for line in csv:
fields = line.strip().split('\t')
id = int(fields[headers[ID]])
for label in PAUSE_SCORES:
data[label].add(mt_lines[id-1], ref_lines[id-1], fields[headers[label]])
# Close streams
hyps.close()
refs.close()
for label in SCORES + NORM_SCORES + PAUSE_SCORES:
data[label].close()
if __name__ == '__main__':
main()
| 4,189 | 39.679612 | 122 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/new_language.py
|
#!/usr/bin/env python
import codecs
import glob
import gzip
import os
import shutil
import subprocess
import sys
METEOR_JAR = glob.glob(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'meteor-*.jar'))[0]
JVM_SIZE = '12G'
def lower(in_f, out_f, limit=None):
inp = gzip.open(in_f) if in_f.endswith('.gz') else open(in_f)
out = gzip.open(out_f, 'w') if out_f.endswith('.gz') else open(out_f, 'w')
i = 0
for line in inp:
i += 1
out.write(line.decode('utf-8').lower().encode('utf-8'))
if limit and i == limit:
break
inp.close()
out.close()
def par_fmt(in_f, out_f):
with gzip.open(out_f, 'wb') as out:
for line in gzip.open(in_f):
(p1, p2, prob) = line.strip().split(' ||| ')
out.write('{}\n'.format(prob))
out.write('{}\n'.format(p1))
out.write('{}\n'.format(p2))
def main(argv):
if len(argv[1:]) < 4:
sys.stderr.write('usage: out-dir corpus.f corpus.e phrase-table.gz [target-corpus.e]\n'.format(argv[0]))
sys.exit(2)
(out_dir, corpus_f, corpus_e, pt) = argv[1:5]
tgt_corpus_e = argv[5] if len(argv[1:]) == 5 else corpus_e
if os.path.exists(out_dir):
sys.stderr.write('{} exists, exiting.'.format(out_dir))
sys.exit(1)
os.mkdir(out_dir)
# Lowercase inputs
corpus_f_lc = os.path.join(out_dir, 'corpus.f')
corpus_e_lc = os.path.join(out_dir, 'corpus.e')
pt_lc = os.path.join(out_dir, 'pt.gz')
tgt_corpus_e_lc = os.path.join(out_dir, 'tgt-corpus.e')
sys.stderr.write('Preparing inputs:\n')
sys.stderr.write('+ lc f\n')
lower(corpus_f, corpus_f_lc)
sys.stderr.write('+ lc e\n')
lower(corpus_e, corpus_e_lc)
sys.stderr.write('+ lc phrase table\n')
lower(pt, pt_lc)
sys.stderr.write('+ lc target e\n')
lower(tgt_corpus_e, tgt_corpus_e_lc, 10000)
sys.stderr.write('Running Parex:\n')
par_dir = os.path.join(out_dir, 'parex')
PAREX = ['java', '-XX:+UseCompressedOops', '-Xmx{}'.format(JVM_SIZE), '-cp', METEOR_JAR, 'Parex', corpus_f_lc, corpus_e_lc, pt_lc, tgt_corpus_e_lc, par_dir]
subprocess.call(PAREX)
sys.stderr.write('Copying files:\n')
dir_files = os.path.join(out_dir, 'meteor-files')
os.mkdir(dir_files)
sys.stderr.write('+ function.words\n')
shutil.copy(os.path.join(par_dir, 'parex.e'), os.path.join(dir_files, 'function.words'))
sys.stderr.write('+ paraphrase.gz\n')
par_fmt(os.path.join(par_dir, 'paraphrase.gz'), os.path.join(dir_files, 'paraphrase.gz'))
sys.stderr.write('{} is now ready to passed to Meteor with -new flag\n'.format(dir_files))
if __name__ == '__main__':
main(sys.argv)
| 2,698 | 32.320988 | 160 |
py
|
blend
|
blend-master/tools/meteor-1.5/scripts/wmt_fmt.py
|
#!/usr/bin/env python
# Read Meteor output, write to WMT score format
import os, sys
def main(argv):
if len(argv[1:]) < 3:
print 'usage: {0} <lang-pair> <test-set> <system> [metric]'.format(
argv[0])
print 'writes metric.lang-pair.test-set.system.{seg.scr,sys.scr}'
print ''
print 'Pipe Meteor output to this script'
sys.exit(1)
lp = argv[1]
ts = argv[2]
s = argv[3]
m = argv[4] if len(argv[1:]) > 3 else 'Meteor'
seg_f = '{0}.{1}.{2}.{3}.seg.scr'.format(m, lp, ts, s)
sys_f = '{0}.{1}.{2}.{3}.sys.scr'.format(m, lp, ts, s)
stop = False
if os.path.exists(seg_f):
print 'exists: {0}'.format(seg_f)
stop = True
if os.path.exists(sys_f):
print 'exists: {0}'.format(sys_f)
stop = True
if stop:
sys.exit(1)
seg_o = open(seg_f, 'w')
sys_o = open(sys_f, 'w')
while True:
line = sys.stdin.readline()
if not line:
break
if line.startswith('Segment'):
f = line.split()
print >> seg_o, '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(m, lp, ts, s,
f[1], f[3])
if line.startswith('Final score'):
scr = line.split()[2]
print >> sys_o, '{0}\t{1}\t{2}\t{3}\t{4}'.format(m, lp, ts, s,
scr)
seg_o.close()
sys_o.close()
if __name__ == '__main__' : main(sys.argv)
| 1,447 | 25.814815 | 79 |
py
|
blend
|
blend-master/tools/meteor-1.5/xray/Generation.py
|
import re, shutil, subprocess, sys, tempfile
from MeteorAlignment import *
# Edit as needed
xelatex_cmd = '/usr/bin/xelatex'
# Edit as needed
gnuplot_cmd = '/usr/bin/gnuplot'
def check_xelatex():
if not shutil.os.path.exists(xelatex_cmd):
print 'Could not find xelatex_cmd \'{0}\''.format(xelatex_cmd)
print 'Please install xetex or update path in Generation.py'
return False
return True
def check_gnuplot():
if not shutil.os.path.exists(xelatex_cmd):
print 'Could not find gnuplot_cmd \'{0}\''.format(gnuplot_cmd)
print 'Please install gnuplot or update path in Generation.py'
return False
return True
#
# LaTeX
#
MAX_LEN = 50
def xelatex(tex_file, pdf_file, work_dir=shutil.os.curdir):
# Working dir
out_dir = tempfile.mkdtemp()
# PDF output file
if '.' in tex_file:
out_pdf = tex_file[0:tex_file.rfind('.')] + '.pdf'
else:
out_pdf = tex_file + '.pdf'
# Run xelatex
subprocess.Popen([xelatex_cmd, '-interaction', 'batchmode', \
'-output-directory', out_dir, tex_file], cwd=work_dir, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
# Copy pdf file and remove temp files
shutil.copyfile(shutil.os.path.join(out_dir, out_pdf), pdf_file)
shutil.rmtree(out_dir)
def escape(s):
s = s.replace('\\', '\\backslash')
s = re.sub('([$&%{}#_])', r'\\\1', s)
return s
def get_font(uni):
if uni:
return r'''\usepackage{fontspec}
\setmainfont{unifont}
'''
else:
return r'''\renewcommand{\rmdefault}{phv} % Arial
\renewcommand{\sfdefault}{phv} % Arial
'''
def check_printable(a1, a2=None):
# Too long
if len(a1.sen2) > MAX_LEN:
print >> sys.stderr, 'Skipping', a1.name, '- too large:', \
len(a1.sen2), 'reference words'
return False
# Different references?
if a2 and a1.sen2 != a2.sen2:
print >> sys.stderr, 'Skipping', a1.name, \
'- different references used'
return False
return True
def print_align_table(tex_out, a1, a2=None, a_type=ALIGN_METEOR):
'''LaTeX generation function: use with caution'''
print >> tex_out, r'%Table start'
# Print color declarations
r = 0.6
g = 0.6
b = 1.0
step = 0.4 / max(1, len(a1.sen2))
half = len(a1.sen2) / 2
for i in range(len(a1.sen2)):
if i >= half:
r += step * 1.5
g += step * .25
b -= step * 1.5
else:
r += step * .5
g += step * 1.0
b -= step * .5
print >> tex_out, r'\definecolor{{ref{0}}}{{rgb}}{{{1},{2},{3}}}'\
.format(i, min(1.0, r), min(1.0, g), min(1.0, b))
# Print table start
line = r'\noindent\begin{tabular}{|l'
for i in range(len(a1.sen2)):
line += r'|'
line += r'p{10pt}'
if a2:
line += r'|l'
line += r'|}'
print >> tex_out, line
print >> tex_out, r'\hline'
# Print sentence 2
line = ''
if a2:
line += r'\Large\color{z}{$\blacksquare$} \color{y}{$\blacksquare$}'
for i in range(len(a1.sen2)):
w2 = escape(a1.sen2[i])
line += r'&\begin{sideways}' + r'\cellcolor{{ref{0}}}'.format(i) + \
w2 + '\hspace{12pt}\end{sideways}'
if a2:
line += r'&\rex \rap'
print >> tex_out, line + r'\\'
# Print each row for sentences a1.sen1, a2.sen1
max_len = max(len(a1.sen1), len(a2.sen1)) if a2 else len(a1.sen1)
fill1 = FILL
if a2:
fill1 = FILL_L
fill2 = FILL_R
for i in range(max_len):
print >> tex_out, r'\hline'
line = ''
if i < len(a1.sen1):
line += r'\ssp '
if a1.sen1_matched[i] != NO_MATCH:
line += r'\cellcolor{{ref{0}}}'.format(a1.sen1_matched[i])
line += escape(a1.sen1[i]) + r' \ssp'
for j in range(len(a1.sen2)):
line += r'&\hspace{2pt}'
if i < len(a1.sen1):
match = a1.matrix[i][j]
if match:
line += fill1[a1.matrix[i][j]]
if a2 and i < len(a2.sen1):
match = a2.matrix[i][j]
if match:
line += fill2[match]
if a2:
line += r'&'
if i < len(a2.sen1):
line += r'\ssp '
if a2.sen1_matched[i] != NO_MATCH:
line += r'\cellcolor{{ref{0}}}'.format(a2.sen1_matched[i])
line += escape(a2.sen1[i]) + r'\ssp '
print >> tex_out, line + r'\\'
print >> tex_out, r'\hline'
# Print table footer
print >> tex_out, r'\end{tabular}'
print >> tex_out, r''
print >> tex_out, r'\vspace{6pt}'
# Print alignment information
if a_type == ALIGN_DEFAULT:
print >> tex_out, r'\noindent {0}'.format(a1.name)
# Compare stats
elif a_type == ALIGN_METEOR:
print >> tex_out, r'\noindent Segment {0}\\\\'.format(escape(a1.name))
if a2:
p_diff = a2.p - a1.p
r_diff = a2.r - a1.r
fr_diff = a2.frag - a1.frag
sc_diff = a2.score - a1.score
print >> tex_out, r'\noindent\begin{tabular}{lm{12pt}rm{24pt}rm{24pt}r}'
print >> tex_out, r'\hline'
print >> tex_out, r'P:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.p, a2.p, 'gb' if p_diff >= 0 else 'rb', p_diff)
print >> tex_out, r'R:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.r, a2.r, 'gb' if r_diff >= 0 else 'rb', r_diff)
print >> tex_out, r'Frag:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.frag, a2.frag, 'rb' if fr_diff > 0 else 'gb', fr_diff)
print >> tex_out, r'Score:&&{0:.3f}&\centering vs&{1:.3f}&\centering :&{{\bf\color{{{2}}}{{{3:.3f}}}}}\\'.format(a1.score, a2.score, 'gb' if sc_diff >= 0 else 'rb', sc_diff)
else:
print >> tex_out, r'\noindent\begin{tabular}{lm{12pt}r}'
print >> tex_out, r'\hline'
print >> tex_out, r'P:&&{0:.3f}\\'.format(a1.p)
print >> tex_out, r'R:&&{0:.3f}\\'.format(a1.r)
print >> tex_out, r'Frag:&&{0:.3f}\\'.format(a1.frag)
print >> tex_out, r'Score:&&{0:.3f}\\'.format(a1.score)
print >> tex_out, r'\end{tabular}'
# End table
print >> tex_out, r'%Table end'
print >> tex_out, ''
print >> tex_out, r'\newpage'
print >> tex_out, ''
FILL = {'ex': r'\mex', 'ap': r'\map', 'rm': r'\mrm'}
FILL_L = {'ex': r'\lex', 'ap': r'\lap'}
FILL_R = {'ex': r'\rex', 'ap': r'\rap'}
DEC_HEADER1 = r'''\documentclass[landscape]{article}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Include these packages and declarations in your tex file %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{rotating}
\usepackage{colortbl}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage[T1]{fontenc}
'''
DEC_HEADER2 = r'''
\definecolor{z}{rgb}{0.7,0.7,0.7}
\definecolor{g}{rgb}{0.5,1.0,0.5}
\definecolor{y}{rgb}{1.0,1.0,0.5}
\definecolor{r}{rgb}{1.0,0.5,0.5}
\definecolor{gb}{rgb}{0.0,0.5,0.0}
\definecolor{rb}{rgb}{0.5,0.0,0.0}
\newcommand{\ssp}{\hspace{2pt}}
\newcommand{\lex}{\cellcolor{z}}
\newcommand{\lap}{\cellcolor{y}}
\newcommand{\rex}{$\bullet$}
\newcommand{\rap}{$\boldsymbol\circ$}
\newcommand{\mex}{\cellcolor{g}$\bullet$}
\newcommand{\map}{\cellcolor{y}$\boldsymbol\circ$}
\newcommand{\mrm}{\cellcolor{r}X}
% Search for '%Table start' and '%Table end' to find alignment boundaries
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage[margin=0.5in]{geometry}
\pagenumbering{0}
%\renewcommand{\rmdefault}{phv} % Arial
%\renewcommand{\sfdefault}{phv} % Arial
\renewcommand{\tabcolsep}{1pt}
\begin{document}
'''
DOC_HEADER_COMPARE = r'''
\noindent\large Meteor Alignments\\
\noindent\normalsize Reference top row\\
{sys1} (sentence 1) left column\\
{sys2} (sentence 2) right column\\\\
Matches identified by color (sen 1) and symbol (sen 2)\\\\
Color spectrum follows reference word order\\\\
\small
\noindent\begin{{tabular}}{{|l|c|c|}}
\hline
Match Type&Sentence 1&Sentence 2\\
\hline
Exact&\color{{z}}{{$\blacksquare$}}&\rex\\
\hline
Stem / Synonym / Paraphrase&\color{{y}}{{$\blacksquare$}}&\rap\\
\hline
\end{{tabular}}
\vspace{{6pt}}
\noindent Key: match markers for sentences
\newpage
'''
DOC_HEADER_SINGLE = r'''
\noindent\large Meteor Alignments for {sysname}\\
\noindent\normalsize Reference top row\\
Hypothesis left column\\
Matches identified by color and symbol\\\\
Color spectrum follows reference word order\\\\
\small
\noindent\begin{{tabular}}{{|l|p{{10pt}}|}}
\hline
Match Type&\\
\hline
Exact&\ssp\mex\\
\hline
Stem / Synonym / Paraphrase&\ssp\map\\
\hline
\end{{tabular}}
\vspace{{6pt}}
\noindent Key: match markers for sentences
\newpage
'''
DOC_HEADER_ALIGN = r'''
\noindent\large\textsc{Meteor} Alignments\\
\noindent\normalsize Reference top row\\
Hypothesis left column\\
Matches identified by color and symbol\\\\
Color spectrum follows reference word order\\\\
\small
\noindent\begin{tabular}{|l|p{10pt}|}
\hline
Match Type&\\
\hline
Exact&\ssp\mex\\
\hline
Stem / Synonym / Paraphrase&\ssp\map\\
\hline
Deleted&\ssp\mrm\\
\hline
\end{tabular}
\vspace{6pt}
\noindent Key: match markers for sentences
\newpage
'''
DOC_FOOTER = r'''\end{document}'''
#
# Gnuplot
#
ROW_LABEL = ['0.0-0.1', '0.1-0.2', '0.2-0.3', '0.3-0.4', '0.4-0.5', '0.5-0.6', \
'0.6-0.7', '0.7-0.8', '0.8-0.9', '0.9-1.0']
def write_dat_file(dat_file, data, xlabel='Score', syslabels=None):
col_label = [xlabel[0].upper() + xlabel[1:]]
for i in range(len(data)):
if syslabels and len(syslabels) > i:
col_label.append(syslabels[i])
else:
col_label.append('System-{0}'.format(i + 1))
dat_out = open(dat_file, 'w')
print >>dat_out, '\t'.join(col_label)
for row in zip(ROW_LABEL, zip(*data)):
print >>dat_out, row[0] + '\t' + '\t'.join([str(x) for x in row[1]])
dat_out.close()
def write_plot_hist(work_dir, dat_file, plot_file, eps_file, xlabel='Score', num_data_cols=1):
uc_label = xlabel[0].upper() + xlabel[1:]
col_line = ''
for i in range(num_data_cols - 1):
col_line += ', \'\' u {0} ti col'.format(i + 3)
plot_out = open(shutil.os.path.join(work_dir, plot_file), 'w')
print >> plot_out, GNUPLOT_HISTOGRAM.format(data=dat_file, eps=eps_file, \
label=uc_label, columns=col_line)
plot_out.close()
def gnuplot(work_dir, plot_file):
subprocess.Popen([gnuplot_cmd, plot_file], cwd=work_dir, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE).wait()
GNUPLOT_HISTOGRAM = '''\
set auto x
set auto y
set style data histogram
set style histogram cluster gap 1
set style fill solid border -1
set boxwidth 0.9
set xtic rotate by -45 scale 0
set xlabel '{label}'
set ylabel 'Number of segments'
set terminal postscript eps enhanced color solid rounded 18
set output '{eps}'
plot '{data}' u 2:xtic(1) ti col{columns}
'''
| 11,189 | 29.407609 | 185 |
py
|
blend
|
blend-master/tools/meteor-1.5/xray/MeteorAlignment.py
|
import math
ALIGN_DEFAULT = 1
ALIGN_METEOR = 2
MATCH_TYPES = ['ex', 'ap', 'ap', 'ap', 'rm']
NO_MATCH = 'blank'
class ScoredAlignment(object):
name = ''
sen1 = []
sen2 = []
p = 0.0
r = 0.0
frag = 0.0
score = 0.0
matrix = [[]]
sen1_matched = []
def __init__(self, align_in=None, a_type=None):
if align_in and a_type:
self.read_alignment(align_in, a_type)
def read_alignment(self, align_in, a_type=ALIGN_DEFAULT):
'''Read next alignment from an input stream
'''
# Read next line
line = align_in.readline()
if not line:
return
# Line should be 'Alignment...'
if not line.startswith('Alignment'):
print 'Error: alignment does not start with Alignment line'
return
# Alignment name
f = line.split()
if a_type == ALIGN_METEOR:
# Name tokens
self.name = '\t'.join(f[1:-4])
# P R Fr Sc
self.p, self.r, self.frag, self.score = map(float, f[-4:])
else:
self.name = line.strip()
# Sentence words
self.sen1 = align_in.readline().split()
self.sen2 = align_in.readline().split()
# Matrix
self.matrix = []
self.sen1_matched = []
for w1 in self.sen1:
row = []
for w2 in self.sen2:
row.append('')
self.matrix.append(row)
self.sen1_matched.append(NO_MATCH)
# discard header 'Line2Start...'
align_in.readline()
# Read matches
while True:
line = align_in.readline()
if not line.strip():
break
m2, m1, mod_name, s = line.split()
m2_s, m2_l = map(int, m2.split(':'))
m1_s, m1_l = map(int, m1.split(':'))
mod = int(mod_name)
for i in range(m1_l):
self.sen1_matched[m1_s + i] = m2_s
for j in range(m2_l):
self.matrix[m1_s + i][m2_s + j] = MATCH_TYPES[mod]
# Reverse sentence 2 and alignment to render right to left
def rtl(self):
self.sen2.reverse()
for x in self.matrix:
x.reverse()
self.sen1_matched.reverse()
class ScoredSegment(object):
sen_len = 0
p = 0.0
r = 0.0
frag = 0.0
score = 0.0
def __init__(self, sen_len, p, r, frag, score):
self.sen_len = sen_len
self.p = p
self.r = r
self.frag = frag
self.score = score
def extract_scores(alignments):
scores = []
for align in alignments:
scores.append(ScoredSegment(len(align.sen2), align.p, align.r, \
align.frag, align.score))
return scores
def read_align_file(align_file, max_align=-1, a_type=ALIGN_METEOR):
a_in = open(align_file)
alignments = []
count = 0
while True:
if max_align != -1 and count >= max_align:
break
count += 1
a = ScoredAlignment(a_in, a_type)
if not a.name:
break
alignments.append(a)
return alignments
def cmp_score_best(x, y):
diff = (x[0].score - x[1].score) - (y[0].score - y[1].score)
return -1 if diff > 0 else 1 if diff < 0 else 0
def cmp_score_diff(x, y):
diff = abs(x[0].score - x[1].score) - abs(y[0].score - y[1].score)
return 1 if diff > 0 else -1 if diff < 0 else 0
def cmp_score(x, y):
diff = x.score - y.score
return 1 if diff > 0 else -1 if diff < 0 else 0
def get_score_dist(scores, size=10):
step = 1.0 / size
dist = [0] * size
for s in [abs(x) for x in scores]:
if math.isnan(s):
dist[0] += 1
continue
dist[min(size - 1, int(math.floor(float(s) / step)))] += 1
return dist
| 3,837 | 25.468966 | 72 |
py
|
blend
|
blend-master/tools/meteor-1.5/xray/xray.py
|
#!/usr/bin/env python
import optparse, shutil, subprocess, sys, tempfile
from MeteorAlignment import *
from Generation import *
def main(argv):
if not (check_xelatex() and check_gnuplot()):
sys.exit(1)
# Options
opt = optparse.OptionParser( \
usage='Usage: %prog [options] <align.out> [align.out2 ...]')
opt.add_option('-c', '--compare', action='store_true', dest='compare', \
default=False, help='compare alignments of two result sets (only first 2 input files used)')
opt.add_option('-b', '--best-first', action='store_true', dest='bestfirst', \
default=False, help='Sort by improvement of sys2 over sys1')
opt.add_option('-n', '--no-align', action='store_true', dest='noalign', \
default=False, help='do not visualize alignments')
opt.add_option('-x', '--max', dest='maxalign', default='-1', \
metavar='MAX', help='max alignments to sample (default use all)')
opt.add_option('-p', '--prefix', dest='prefix', default='mx', \
metavar='PRE', help='prefix for output files (default mx)')
opt.add_option('-l', '--label', dest='label', default=None, \
metavar='LBL', help='optional system label list, comma separated: label1,label2,...')
opt.add_option('-u', '--unifont', action='store_true', dest='uni', \
default=False, help='use unifont (use for non-western languages)')
opt.add_option('-r', '--right-to-left', action='store_true', dest='rtl', \
default=False, help='language written right to left')
# Parse
o, a = opt.parse_args()
if not a:
print 'MX: X-Ray your translation output'
opt.print_help()
sys.exit(1)
compare = o.compare
best_first = o.bestfirst
no_align = o.noalign
max_align = int(o.maxalign)
prefix = o.prefix
label = o.label
uni = o.uni
rtl = o.rtl
align_files = a
seg_scores = []
label_list = label.split(',') if label else []
for i in range(len(label_list)):
label_list[i] = label_list[i][0].upper() + label_list[i][1:]
for i in range(len(label_list), len(a)):
label_list.append('System-{0}'.format(i + 1))
pre_dir = prefix + '-files'
try:
shutil.os.mkdir(pre_dir)
except:
print >> sys.stderr, 'Dir {0} exists, will overwrite contents'\
.format(pre_dir)
#
# Visualize alignments
#
# Compare 2 alignments
if compare:
# File check
if len(align_files) < 2:
print 'Comparison requires 2 alignment files'
sys.exit(1)
# Out files
pdf_file = prefix + '-align.pdf'
tex_file = 'align.tex'
# Read alignments
align_1 = read_align_file(a[0], max_align)
align_2 = read_align_file(a[1], max_align)
seg_scores.append(extract_scores(align_1))
seg_scores.append(extract_scores(align_2))
alignments = zip(align_1, align_2)
alignments.sort(cmp=cmp_score_best if best_first else cmp_score_diff,
reverse=True)
if not no_align:
# Write tex file
tex_out = open(shutil.os.path.join(pre_dir, tex_file), 'w')
# Header
print >> tex_out, DEC_HEADER1
print >> tex_out, get_font(uni)
print >> tex_out, DEC_HEADER2
print >> tex_out, DOC_HEADER_COMPARE.format(sys1=label_list[0], \
sys2=label_list[1])
# Print each alignment
for i in range(len(alignments)):
a1, a2 = alignments[i]
if rtl:
a1.rtl()
a2.rtl()
if not check_printable(a1, a2):
continue
print_align_table(tex_out, a1, a2)
# Print footer
print >> tex_out, DOC_FOOTER
# Close file
tex_out.close()
# Compile pdf file
print >> sys.stderr, \
'Compiling {0} - this may take a few minutes...'.format(pdf_file)
xelatex(tex_file, pdf_file, work_dir=pre_dir)
# Write N individual alignment files
else:
for i in range(len(align_files)):
# Out files
pdf_file = '{0}-align-{1}.pdf'.format(prefix, label_list[i].lower())
tex_file = 'align-{1}.tex'.format(prefix, i + 1)
# Read alignments
alignments = read_align_file(a[i], max_align)
seg_scores.append(extract_scores(alignments))
alignments.sort(cmp=cmp_score, reverse=True)
if no_align:
continue
# Write tex file
tex_out = open(shutil.os.path.join(pre_dir, tex_file), 'w')
# Header
print >> tex_out, DEC_HEADER1
print >> tex_out, get_font(uni)
print >> tex_out, DEC_HEADER2
print >> tex_out, DOC_HEADER_SINGLE.format(sysname=label_list[i])
# Print each alignment
for i in range(len(alignments)):
a1 = alignments[i]
if rtl:
a1.rtl()
if not check_printable(a1):
continue
print_align_table(tex_out, a1)
# Print footer
print >> tex_out, DOC_FOOTER
# Close file
tex_out.close()
# Compile pdf file
print >> sys.stderr, \
'Compiling {0} - this may take a few minutes...'.format(pdf_file)
xelatex(tex_file, pdf_file, work_dir=pre_dir)
#
# Graph scores
#
# All scores
for stat in ('score', 'frag', 'p', 'r'):
dat_file = '{0}-all.dat'.format(stat)
plot_file = '{0}-all.plot'.format(stat)
eps_file = '{0}-all.eps'.format(stat)
dists = []
for scores in seg_scores:
dists.append(get_score_dist([eval('x.' + stat) for x in scores]))
write_dat_file(shutil.os.path.join(pre_dir, dat_file), dists, stat, \
label_list)
write_plot_hist(pre_dir, dat_file, plot_file, eps_file, stat, \
len(dists))
gnuplot(pre_dir, plot_file)
# Scores by length
for stat in ('score', 'frag', 'p', 'r'):
for r in [[1, 10], [11, 25], [26, 50], [51]]:
if len(r) == 2:
label = '{0}-{1}'.format(r[0], r[1])
else:
label = '{0}+'.format(r[0])
dat_file = '{0}-{1}.dat'.format(stat, label)
plot_file = '{0}-{1}.plot'.format(stat, label)
eps_file = '{0}-{1}.eps'.format(stat, label)
dists = []
for scores in seg_scores:
if len(r) == 2:
values = [eval('x.' + stat) for x in scores if x.sen_len \
>= r[0] and x.sen_len <= r[1]]
else:
values = [eval('x.' + stat) for x in scores if x.sen_len \
>= r[0]]
dists.append(get_score_dist(values))
write_dat_file(shutil.os.path.join(pre_dir, dat_file), dists, \
stat, label_list)
write_plot_hist(pre_dir, dat_file, plot_file, eps_file, stat, \
len(dists))
gnuplot(pre_dir, plot_file)
# Write files
score_pdf = prefix + '-score.pdf'
score_tex = 'score.tex'
shutil.copyfile(shutil.os.path.join(shutil.os.path.dirname(__file__), \
'template', 'score.tex'), shutil.os.path.join(pre_dir, score_tex))
print >> sys.stderr, \
'Compiling {0}...'.format(score_pdf)
xelatex(score_tex, score_pdf, work_dir=pre_dir)
print >> sys.stderr, \
'Supporting files written to {0}.'.format(pre_dir)
if __name__ == '__main__' : main(sys.argv)
| 7,705 | 37.148515 | 98 |
py
|
blend
|
blend-master/tools/meteor-1.5/xray/visualize_alignments.py
|
#!/usr/bin/env python
import sys
from MeteorAlignment import *
from Generation import *
def main(argv):
if not check_xelatex():
sys.exit()
if len(argv[1:]) < 2:
print 'usage: {0} <align.out> <prefix> [max]'.format(argv[0])
print 'writes: <prefix>.pdf, <prefix>.tex'
print 'max determines max number of alignments to visualize'
sys.exit()
align_file = argv[1]
prefix = argv[2]
max_align = int(argv[3]) if len(argv[1:]) > 2 else -1
pdf_file = prefix + '.pdf'
tex_file = prefix + '.tex'
alignments = read_align_file(align_file, max_align=max_align, a_type=ALIGN_DEFAULT)
tex_out = open(tex_file, 'w')
print >> tex_out, DEC_HEADER1
print >> tex_out, get_font(True)
print >> tex_out, DEC_HEADER2
print >> tex_out, DOC_HEADER_ALIGN
for i in range(len(alignments)):
a = alignments[i]
if not check_printable(a):
continue
print_align_table(tex_out, a, a_type=ALIGN_DEFAULT)
# Print footer
print >> tex_out, DOC_FOOTER
# Close file
tex_out.close()
print >> sys.stderr, \
'Compiling {0} - this may take a few minutes...'.format(pdf_file)
xelatex(tex_file, pdf_file)
if __name__ == '__main__' : main(sys.argv)
| 1,274 | 26.12766 | 87 |
py
|
blend
|
blend-master/scripts/form-meteor-score.py
|
import sys
predir = sys.argv[1]
tst = sys.argv[2]
tstname = tst[5:-4] # rm data/, .sgm
def form_score(inf, outf):
fout = open(predir + outf, 'w')
for idx, line in enumerate(open(inf, 'rU')):
score = line.split()[-1]
fout.write(score + "\n")
fout.close()
if __name__ == '__main__':
form_score('meteor-ex', tstname + '.METEOR-ex')
form_score('meteor-st', tstname + '.METEOR-st')
form_score('meteor-sy', tstname + '.METEOR-sy')
form_score('meteor-pa', tstname + '.METEOR-pa')
| 530 | 25.55 | 51 |
py
|
blend
|
blend-master/scripts/form-gtm-score.py
|
import sys
predir = sys.argv[1]
tst = sys.argv[2]
tmpdir = './tmp/'
tstname = tst[5:-4] # rm data/, .sgm
def form_score(inf, outf):
fout = open(predir + outf, 'w')
for idx, line in enumerate(open(tmpdir + inf, 'rU')):
if 'doc "' in line:
score = line.split()[-1]
fout.write(score + "\n")
fout.close()
if __name__ == '__main__':
form_score('gtm1', tstname + '.GTM-1')
form_score('gtm2', tstname + '.GTM-2')
form_score('gtm3', tstname + '.GTM-3')
| 511 | 21.26087 | 57 |
py
|
blend
|
blend-master/scripts/form-terp-score.py
|
import sys
predir = sys.argv[1]
tst = sys.argv[2]
tstname = tst[5:-4] # rm data/, .sgm
def form_score(inf, outf):
fout = open(predir + outf, 'w')
for idx, line in enumerate(open(inf, 'rU')):
score = line.split()[-2]
fout.write('-' + score + "\n")
fout.close()
if __name__ == '__main__':
form_score('TERbase', tstname + '.nTERbase')
form_score('TER', tstname + '.nTER')
form_score('TERp', tstname + '.nTERp')
form_score('TERp-A', tstname + '.nTERp-A')
| 508 | 24.45 | 48 |
py
|
blend
|
blend-master/scripts/form-bleunist-score.py
|
import sys
predir = sys.argv[1]
tst = sys.argv[2]
inf_bleu = 'BLEU-seg.scr'
inf_nist = 'NIST-seg.scr'
tstname = tst[5:-4] # rm data/ .sgm
fo_bleu = open(predir + tstname + ".BLEU", 'w')
fo_nist = open(predir + tstname + ".NIST", 'w')
for idx, line in enumerate(open(inf_bleu, 'rU')):
score = line.split()[-1]
fo_bleu.write(score + "\n")
for idx, line in enumerate(open(inf_nist, 'rU')):
score = line.split()[-1]
fo_nist.write(score + "\n")
fo_bleu.close()
fo_nist.close()
| 495 | 19.666667 | 49 |
py
|
blend
|
blend-master/scripts/form-rouge-score.py
|
import sys
predir = sys.argv[1]
tst = sys.argv[2]
tstname = tst[5:] # rm data/
inf = 'ROUGE'
predir += tstname
fout1 = open(predir + '.ROUGE-1', 'w')
fout2 = open(predir + '.ROUGE-2', 'w')
fout3 = open(predir + '.ROUGE-3', 'w')
fout4 = open(predir + '.ROUGE-4', 'w')
foutL = open(predir + '.ROUGE-L', 'w')
foutS = open(predir + '.ROUGE-S*', 'w')
foutSU = open(predir + '.ROUGE-SU*', 'w')
foutW = open(predir + '.ROUGE-W', 'w')
for idx, line in enumerate(open(inf, 'rU')):
score = line.split()[-1][2:]
if 'X ROUGE-1 Eval' in line:
fout1.write(score + "\n")
elif 'X ROUGE-2 Eval' in line:
fout2.write(score + "\n")
elif 'X ROUGE-3 Eval' in line:
fout3.write(score + "\n")
elif 'X ROUGE-4 Eval' in line:
fout4.write(score + "\n")
elif 'X ROUGE-L Eval' in line:
foutL.write(score + "\n")
elif 'X ROUGE-W-1.2 Eval' in line:
foutW.write(score + "\n")
elif 'X ROUGE-S* Eval' in line:
foutS.write(score + "\n")
elif 'X ROUGE-SU* Eval' in line:
foutSU.write(score + "\n")
fout1.close()
fout2.close()
fout3.close()
fout4.close()
foutL.close()
foutS.close()
foutSU.close()
foutW.close()
| 1,187 | 23.75 | 44 |
py
|
marabunta
|
marabunta-master/setup.py
|
import os
# from setuptools import setup
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='marabunta',
version='1.0',
description='Library for design and control of artificial swarms.',
long_description=read("README.md"),
url='https://github.com/david-mateo/marabunta',
author='David Mateo',
author_email='[email protected]',
license='GPLv3',
keywords='control simulation robot swarming',
packages=['marabunta', 'marabunta.models'])
| 578 | 29.473684 | 74 |
py
|
marabunta
|
marabunta-master/examples/dr4/dr4_demo.py
|
from marabunta import *
from marabunta.models import PerimeterDefenseRobot
from math import *
from time import time,sleep
import sys
import threading
from settings import s
class CustomRobot(PerimeterDefenseRobot):
def update(self, deltat, v=None):
"""Same as PerimeterDefenseRobot except
that if there's an obstacle the speed
is capped at 0.08 instead of /2.
"""
self.broadcast_state()
# Perform swarming
target = self.spread_target()
if not target:
h= self.body.get_heading()
target = [1.5*sqrt(self.threshold)*cos(h) ,1.5*sqrt(self.threshold)*sin(h)]
# Avoid obstacles
target = self.correct_target_rotating(target)
obstacle = self.obstacle_near()
if obstacle:
v = min(v,0.08)
self.move_to_target(target, deltat, v, obstacle)
light = self.light_detected()
return light
dt=0.1
total_time = 4 * 60
speed=0.15
# These parameters have to be manually
# set for each robot.
init_pos = s["position"]
init_heading = s["heading"]
ID=s["ID"]
slot = s["slot"]
calibration = s.get("calibration")
mylock = threading.Lock() # required for devices with virtual ports like Raspberry.
body = ebotBody( init_pos , init_heading, mylock)
network = XBeeNetwork( slot, slot+0.1, 1.0, ID , mylock)
with CustomRobot( body, network, 1.e-8 ) as robot:
if calibration:
LS , RS = calibration
body.calibration(LS, RS)
robot.broadcast_state()
# Wait for XBee signal
while True:
messages = network.get_incomings()
if any( m[:4]=="stop" for m in messages if len(m)>1):
raise Exception("Stop!")
if any( m[:2]=="up" for m in messages if len(m)>1):
robot.broadcast_state()
sleep(1)
break
sleep(1)
robot.start_printing(0.5)
# MAIN LOOP
init_time = time()
end_time = init_time + total_time
while time() < end_time:
# Slow initial warmup
if time() < init_time + 5:
speed = 0.01 * (time() - init_time)
else:
speed = 0.15
# Check for a message ordering to go somewhere or to stop
messages = network.get_incomings()
for message in messages:
if len(message)>3 and message[:4]=="stop":
raise Exception("Stop!")
if len(message)>3 and message[:4]=="goto":
try:
gotoxy = message.split()
point = (float(gotoxy[1]), float(gotoxy[2]))
except:
print("Weird message received: ",message)
point = None
if point:
robot.network.send_message(message) # relay message
robot.go_to(point)
print("#Task completed at time %f"%(time()-init_time))
raise Exception("Task completed")
break
light = robot.update(dt, speed)
if light:
print("#Light detected at time %f"%(time()-init_time))
robot.move_forward( 0., 0. )
sleep(4)
raise Exception("I see the light at the end of the tunnel")
sleep(dt)
print("#Finish")
| 3,266 | 30.114286 | 87 |
py
|
marabunta
|
marabunta-master/examples/dr4/settings.py
|
from math import pi
s={}
s["ID"]="RXX"
s["position"] = [0., 0.]
s["heading"] = 0.
s["slot"] = 0.1
#s["calibration"] = [ 0000 , 0000 ]
| 135 | 14.111111 | 35 |
py
|
marabunta
|
marabunta-master/examples/dr4/dr4_simula.py
|
from marabunta import MockBody, MockNetwork
from marabunta.models import PerimeterDefenseRobot
from math import *
from time import time,sleep
import sys
import threading
#from settings import s
dt=0.2
total_time = 5 * 60
speed=0.15
log = open("the_cloud.dat","w+")
settings = []
settings.append( {"ID":1 , "position":[ 0.0 , 0.0 ] , "heading":0.} )
settings.append( {"ID":2 , "position":[ 0.5 , 0.0 ] , "heading":0.} )
settings.append( {"ID":3 , "position":[ 0.0 , 0.5 ] , "heading":0.} )
settings.append( {"ID":4 , "position":[ -0.5 , 0.0 ] , "heading":0.} )
settings.append( {"ID":5 , "position":[ 0.0 , -0.5 ] , "heading":0.} )
settings.append( {"ID":6 , "position":[ 0.5 , 0.5 ] , "heading":0.} )
settings.append( {"ID":7 , "position":[ 0.5 , -0.5 ] , "heading":0.} )
settings.append( {"ID":8 , "position":[ -0.5 , -0.5 ] , "heading":0.} )
settings.append( {"ID":9 , "position":[ -0.5 , 0.5 ] , "heading":0.} )
robots=[ PerimeterDefenseRobot( MockBody(s.get("position") ,s.get("heading")), MockNetwork(log, s.get("ID")) , 1.e-8) for s in settings]
[robot.body.load_obstacles("map_data.dat") for robot in robots]
[robot.turn_on() for robot in robots]
try:
[robot.broadcast_state() for robot in robots]
#robot.start_printing(0.5)
# MAIN LOOP
init_time = time()
end_time = init_time + total_time
for it in range(int(total_time/dt)):
# Check for a message ordering to go somewhere or to stop
messages = [] # network.get_incomings()
for message in messages:
if len(message)>3 and message[:4]=="stop":
raise Exception("Stop!")
if len(message)>3 and message[:4]=="goto":
try:
gotoxy = message.split()
point = (float(gotoxy[1]), float(gotoxy[2]))
except:
print "Weird message received: ",message
point = None
if point:
robot.network.send_message(message) # relay message
robot.go_to(point)
print "#Task completed at time %f"%(time()-init_time)
raise Exception("Task completed")
break
lights = [robot.update(dt, speed) for robot in robots]
if any(lights):
print "#Light detected at time %f"%(time()-init_time)
#robot.move_forward( 0., 0. )
#sleep(4)
raise Exception("I see the light at the end of the tunnel")
#sleep(dt)
finally:
[robot.turn_off() for robot in robots]
print "#Finish"
#robot.turn_off()
| 2,610 | 36.84058 | 136 |
py
|
marabunta
|
marabunta-master/examples/dr3/settings.py
|
from math import pi
s={}
s["ID"]="RXX"
s["position"] = [0., 0.]
s["heading"] = 0.
s["slot"] = 0.1
| 99 | 11.5 | 24 |
py
|
marabunta
|
marabunta-master/examples/dr3/leader.py
|
from marabunta import BaseRobot, ebotBody, XBeeNetwork, XBeeExpirationNetwork
from math import *
from time import time,sleep
import sys
from threading import Lock
from settings import s
class Leader(BaseRobot):
def update(self, deltat, v=None):
self.broadcast_state()
if self.obstacle_infront():
v = 0.0
self.move_forward(deltat, v)
return
dt=0.05
total_time = 50
speed=0.15
try:
num_friends = float(sys.argv[1])-1
except:
num_friends = 4-1
# These parameters have to be manually
# set for each robot.
init_pos = s["position"]
init_heading = s["heading"]
ID=s["ID"]
slot = s["slot"]
mylock = Lock()
body = ebotBody( init_pos , init_heading, mylock)
network = XBeeExpirationNetwork( 1.4, slot, slot+0.1, 1, ID , mylock)
robot = Leader( body, network )
robot.turn_on()
robot.broadcast_state()
friends = len(robot.get_agents())
patience = 50
while friends < num_friends and patience>0:
patience -= 1
print "# Only %i friends detected so far"%friends
print "#", "\t".join(robot.get_agents().keys())
friends = len(robot.get_agents())
robot.broadcast_state()
sleep(0.2)
# MAIN LOOP
rotate = 3
start_time = time()
end_time = time() + total_time
while time() < end_time:
if rotate==3 and time()>start_time + 5.:
rotate -=1
robot.align( [0, -1])
if rotate==2 and time()>start_time + 20:
rotate -=1
robot.align([1,0])
if rotate==1 and time()>start_time + 30:
rotate -=1
robot.align([-1,0])
robot.update(dt, speed)
pos = robot.body.get_position()
print pos[0] , pos[1] , robot.body.get_heading()
sleep(dt)
robot.turn_off()
| 1,679 | 22.333333 | 77 |
py
|
marabunta
|
marabunta-master/examples/dr3/heading.py
|
from marabunta import ebotBody, XBeeNetwork, XBeeExpirationNetwork
from marabunta.models import PerimeterDefenseRobot
from math import *
from time import time,sleep
import sys
from threading import Lock
from settings import s
dt=0.05
total_time = 50
speed=0.15
try:
num_friends = float(sys.argv[1])-1
except:
num_friends = 4-1
# These parameters have to be manually
# set for each robot.
init_pos = s["position"]
init_heading = s["heading"]
ID=s["ID"]
slot = s["slot"]
mylock = Lock()
body = ebotBody( init_pos , init_heading, mylock)
network = XBeeExpirationNetwork( 1.4, slot, slot+0.1, 1, ID , mylock)
robot = HeadingConsensusRobot( body, network )
robot.turn_on()
robot.broadcast_state()
friends = len(robot.get_agents())
patience = 50
while friends < num_friends and patience>0:
patience -= 1
print "# Only %i friends detected so far"%friends
print "#", "\t".join(robot.get_agents().keys())
friends = len(robot.get_agents())
robot.broadcast_state()
sleep(0.2)
# MAIN LOOP
end_time = time() + total_time
while time() < end_time:
robot.update(dt, speed)
#print robot.get_agents()
pos = robot.body.get_position()
print pos[0] , pos[1] , robot.body.get_heading()
sleep(dt)
robot.turn_off()
| 1,251 | 22.622642 | 69 |
py
|
marabunta
|
marabunta-master/examples/dr3/dr3_demo.py
|
from marabunta import ebotBody, XBeeNetwork
from marabunta.models import PerimeterDefenseRobot
from math import *
from time import time,sleep
import sys
from threading import Lock
from settings import s
dt=0.05
total_time = 60
speed=0.15
try:
num_friends = float(sys.argv[1])-1
except:
num_friends = 4-1
# These parameters have to be manually
# set for each robot.
init_pos = s["position"]
init_heading = s["heading"]
ID=s["ID"]
slot = s["slot"]
mylock = Lock()
body = ebotBody( init_pos , init_heading, mylock)
network = XBeeExpirationNetwork( 1.4, slot, slot+0.1, 1, ID , mylock)
robot = PerimeterDefenseRobot( body, network, 0.02 )
robot.turn_on()
robot.broadcast_state()
friends = len(robot.get_agents())
patience = 50
while friends < num_friends and patience>0:
patience -= 1
print "# Only %i friends detected so far"%friends
print "#", "\t".join(robot.get_agents().keys())
friends = len(robot.get_agents())
robot.broadcast_state()
sleep(0.2)
# MAIN LOOP
end_time = time() + total_time
while time() < end_time:
robot.update(dt, speed)
#print robot.get_agents()
pos = robot.body.get_position()
print pos[0] , pos[1] , robot.body.get_heading()
sleep(dt)
robot.turn_off()
| 1,236 | 21.490909 | 69 |
py
|
marabunta
|
marabunta-master/examples/spreading/spreading.py
|
from marabunta import MockBody, MockNetwork
from marabunta.models import PerimeterDefenseRobot
dt=1.0
log = open("the_cloud.dat","w+")
settings = []
settings.append( {"ID":1 , "pos":[ 0., 0.] , "heading":0.} )
settings.append( {"ID":2 , "pos":[ 2., 0.] , "heading":0.} )
settings.append( {"ID":3 , "pos":[ 0.,-1.] , "heading":0.} )
def new_robot(s, log):
body = MockBody(s.get("pos"), s.get("heading") )
network = MockNetwork(log,s.get("ID"))
bot = PerimeterDefenseRobot( body, network, 0.02)
bot.turn_on()
return bot
robots = [ new_robot(s,log) for s in settings ]
for t in range(10):
[robot.update(dt) for robot in robots]
robots[2].turn_off()
for t in range(10):
[robot.update(dt) for robot in robots]
robots[0].turn_off()
for t in range(40):
[robot.update(dt) for robot in robots]
[robot.turn_off() for robot in robots]
log.close()
| 874 | 26.34375 | 60 |
py
|
marabunta
|
marabunta-master/examples/labyrinth/labyrinth.py
|
from marabunta import BaseRobot, MockBody, MockNetwork
import random
# for visualization
import numpy as np
import pylab as pl
class myRobot(BaseRobot):
def __init__(self, setting):
body = MockBody(setting.get("position") ,setting.get("heading"))
network = MockNetwork(setting.get("ID"))
BaseRobot.__init__(self,body, network)
return
def spread_target(self):
neis = self.get_agents().values()
pos = self.body.get_position()
# Get both neighbors and obstacles, in relative coordinates
obstacles = self.body.obstacle_coordinates()
points = [ [nei[0]-pos[0], nei[1]-pos[1]] for nei in neis] + obstacles
if points:
target = [0.,0.]
for p in points:
d2 = p[0]**2 + p[1]**2
weight = (1.0/d2 )**1.5
if d2>0:
target[0] -= p[0]*weight
target[1] -= p[1]*weight
else:
target= None
return target
def move_towards_target(self, target, deltat, v):
threshold = 5
if target[0]**2 + target[1]**2 > threshold*threshold:
self.align(target)
self.move_forward(deltat, v)
else:
self.move_forward(deltat, 0)
return
def update(self, deltat, v):
self.broadcast_state()
# Perform swarming
target = self.spread_target()
if not target:
h= self.body.get_heading()
target = [10.*cos(h) ,10.*sin(h)]
# Avoid obstacles
target = self.correct_target(target)
self.move_towards_target(target, deltat, v)
return
num_robots = 50
dt=0.5
total_time = 100
speed = 0.4
map_file = "map_points.dat"
obstacles = np.loadtxt(map_file)
#--------------------------------------------------------
def random_position(x0, y0, R0):
"""Return a random vector uniformly distributed
in a circle of radius R0 centered on (x0,y0).
"""
r2 = float('inf')
while r2 > 1.0:
x , y = 2.0*(random.random()-0.5) , 2.0*(random.random()-0.5)
r2 = x*x + y*y
x = x0 + x*R0
y = y0 + y*R0
return [x,y]
def plot(robots, obstacles):
"""Plots the current configuration of robots."""
xs = [robot.body.get_position()[0] for robot in robots]
ys = [robot.body.get_position()[1] for robot in robots]
pl.plot(obstacles[:,0],obstacles[:,1],'gs',markersize=20.)
pl.plot(xs,ys,'ro')
pl.show(block=False)
pl.pause(0.0000001)
pl.close()
return
#-----------------------------------------------------
settings = [ {"ID":"R%02i"%i, "position":random_position(1.5,0.5,0.2), "heading":0.} for i in range(num_robots)]
robots = [ myRobot(s) for s in settings]
[robot.body.load_obstacles(map_file) for robot in robots]
[robot.turn_on() for robot in robots]
try:
[robot.broadcast_state() for robot in robots]
# MAIN LOOP
for it in range(int(total_time/dt)):
for robot in robots:
if robot.is_working():
robot.update(dt, speed)
plot(robots, obstacles)
finally:
[robot.turn_off() for robot in robots]
| 3,153 | 28.754717 | 112 |
py
|
marabunta
|
marabunta-master/marabunta/BaseRobot.py
|
# import math as m
from math import pi, sin, cos, atan2, sqrt
import threading
from time import sleep, time
from utils import clean_angle
class BaseRobot(object):
"""Base class of Robot containing the
basic tools to operate a swarming robot.
It requires a *body* instance that is
inherits from BaseBody and a *network*
instance that inherits from BaseNetwork.
Initializing the robot will NOT open channels
for broadcast or turn on the physical body
of the robot (that's what turn_on() is for.)
It is recommended that robot models inherit
from BaseRobot and call its __init__ method
if it has to be expanded, like this:
class MyRobot(BaseRobot):
def __init__(self, body, network, args):
do_something(args)
BaseRobot.__init__(self,body,network)
return
"""
def __init__(self, body, network):
"""As a way to make sure the body and network
instances have the required method, this class
only accept bodies that inherit from BaseBody
and networks that inherit from BaseNetwork.
"""
if isinstance(body, BaseBody):
self.body = body
else:
raise Exception(
"body is not an instance of BaseRobot.BaseBody()")
if isinstance(network, BaseNetwork):
self.network = network
else:
raise Exception(
"network is not an instance of BaseRobot.BaseNetwork")
self.working = False
self.printing = False
self.last_target = [0., 0.]
return
def is_working(self):
return self.working
def turn_on(self):
"""Call the turn_on methods of body and network
if they exist (not required) and start
broadcasting. Calling this does nothing if the
instance is already turned on.
"""
if not self.is_working():
if "turn_on" in dir(self.body):
self.body.turn_on()
if "turn_on" in dir(self.network):
self.network.turn_on()
self.network.start_broadcasting()
self.working = True
return
def turn_off(self):
"""Call the turn_off methods of body and network
if they exist (not required) and stop
broadcasting. Calling this does nothing if the
instance is already turned off.
"""
if self.is_working():
self.stop_printing()
self.network.stop_broadcasting()
if "turn_off" in dir(self.body):
self.body.turn_off()
if "turn_off" in dir(self.network):
self.network.turn_off()
self.working = False
return
# To use robots with the "with Robot(...) as robot:" statement
def __enter__(self):
self.turn_on()
return self
# To use robots with the "with Robot(...) as robot:" statement
def __exit__(self, type, value, traceback):
self.turn_off()
return
# Kinematic methods:
def move_forward(self, time, v=None):
"""If the robot is working, call body.move_forward.
The speed is set to zero if there is an obstacle
in front.
"""
if self.is_working():
if self.obstacle_infront():
v = 0.0
self.body.move_forward(time, v)
return
def stop(self):
"""Call the body's stop method if it exists,
else just set speed to 0.
"""
if "stop" in dir(self.body):
return self.body.stop()
else:
return self.move_forward(0., 0.)
def rotate(self, dtheta):
"""Calls body.rotate is the robot is working.
Return the time it took to rotate."""
if self.is_working():
time = self.body.rotate(dtheta)
else:
time = 0.
return time
def align(self, direction):
"""Align the heading of the robot to
a given vector *direction*.
Looks for an align method in *self.body*
but one is not required.
"""
if "align" in dir(self.body):
dtheta = self.body.align(direction)
else:
target_theta = atan2(direction[1], direction[0])
dtheta = clean_angle(target_theta - self.body.get_heading())
self.rotate(dtheta)
return dtheta
def go_to(self, target, tol=0.8, max_time=120):
"""Move the robot to *target*.
Calling this will make the robot
move in a straight line towards the
target point and block the main
thread until the point is reached
within *tol* accuracy or *max_time*
seconds have passed.
"""
end_time = time() + max_time
v = self.body.max_speed
while time() < end_time:
delta = [target[0] - self.body.get_position()[0],
target[1] - self.body.get_position()[1]]
distance = sqrt(delta[0] * delta[0] + delta[1] * delta[1])
if distance > tol:
delta = self.correct_target_projecting(delta)
self.align(delta)
self.move_forward(distance / v, v)
sleep(0.1)
else:
self.stop()
break
return
def follow_path(self, targets, tol=0.8, max_time_ppt=120):
"""Move the robot along the path
specified by *targets*.
Uses the self.go_to method.
"""
for target in targets:
self.go_to(target, tol, max_time_ppt)
return
# Communication methods:
def get_agents(self):
"""Return a dictionary with the state of each robot.
"""
return self.network.get_agents_state()
def broadcast_state(self):
"""Broadcast current state (x,y,heading) over
the network.
"""
if self.is_working():
self.network.send_state(
self.body.get_position(),
self.body.get_heading())
return
def broadcast_state_obstacles(self):
"""Broadcast current state and current obstacles detected.
If no obstacles, call send_state instead."""
if self.is_working():
obstacles = self.body.obstacle_global_coordinates()
if obstacles:
self.network.send_state_obstacles(
self.body.get_position(),
self.body.get_heading(),
obstacles)
else:
self.network.send_state(
self.body.get_position(),
self.body.get_heading())
return
def broadcast_obstacles(self):
"""Only send if obstacles are detected."""
if self.is_working():
obstacles = self.body.obstacle_global_coordinates()
if obstacles:
self.network.send_obstacles(obstacles)
return
def broadcast_goto(self, target):
"""Broadcast a signal indicating everyone
to go to a specific target.
"""
if self.is_working():
x, y = target
message = "goto {:.2f} {:.2f}\n".format(*target)
self.network.send_message(message)
return
def broadcast_rendezvous(self):
"""Broadcast a signal to call everyone
to the robot's current location.
"""
return self.broadcast_goto(self.body.get_position())
# Obstacle avoidance methods:
def obstacle_infront(self):
"""Return True if an obstacle is "in front", meaning
that extreme measures such as stopping the movement
have to be taken to avoid a collision.
Used by move_forward() to stop the robot in case
something is too close.
"""
return self.body.obstacle_infront()
def obstacle_near(self):
"""Return True if an obstacle is "near" meaning
that the robot should be aware of the existence
of the obstacle even if it may not collide
directly.
"""
return self.body.obstacle_near()
def frontal_obstacle_coordinates(self, obs_coords):
"""Return the frontal projection of a list of coordinates.
Assuming the obstacles form a straight line, the
frontal projection is the point in that line closer to
the robot.
TODO: NEEDS TESTING!
"""
n = len(obs_coords)
if n:
x = sum(o[0] for o in obs_coords) / n
y = sum(o[1] for o in obs_coords) / n
x2 = sum(o[0] * o[0] for o in obs_coords) / n
y2 = sum(o[1] * o[1] for o in obs_coords) / n
xy = sum(o[0] * o[1] for o in obs_coords) / n
sx2 = x2 - x * x
sy2 = y2 - y * y
cxy = (xy - x * y)
try:
x0 = cxy * (x * xy - x2 * y) / (cxy * cxy + sx2 * sx2)
except:
x0 = x
try:
y0 = cxy * (y * xy - y2 * x) / (cxy * cxy + sy2 * sy2)
except:
y0 = y
else:
x0 = None
y0 = None
return (x0, y0)
def correct_target(self, target):
"""Correct the target vector *target* so that if the
robot moves in that direction it will not collide with
obstacles.
Current implementation: correct if obstacle_near(), not
if obstacle_infront(). In case a correction is needed,
choose the closest obstacle and project the target vector
to be perpendicular to the obstacle position.
"""
if self.body.obstacle_near():
obstacles = self.body.obstacle_coordinates()
# Find the nearest obstacle:
dists = [v[0] * v[0] + v[1] * v[1] for v in obstacles]
idx = dists.index(min(dists))
obs = obstacles[idx]
ot = obs[0] * target[0] + obs[1] * target[1]
olt = obs[0] * self.last_target[1] - obs[1] * self.last_target[0]
o2 = obs[0] * obs[0] + obs[1] * obs[1]
projection = ot / o2
if projection > 0:
if projection < 0.80:
target[0] -= 1.05 * obs[0] * projection
target[1] -= 1.05 * obs[1] * projection
else:
# Choose left or right depending on last_target:
if olt > 0.:
theta = 0.60 * pi
else:
theta = -0.60 * pi
ct = cos(theta)
st = sin(theta)
target[0] = 4. * obs[0] * ct - 4. * obs[1] * st
target[1] = 4. * obs[0] * st + 4. * obs[1] * ct
self.last_target = target[:]
return target
# Data collection methods:
def background_print(self, dt):
"""Print position + obtacles detected in global coordinates.
Format:
#pose iter x y heading
#wall iter x y distance_to_robot
It ignores any obstacle detected at less than 40cm from a known
agent and any obstacle at more than 60cm from the robot.
"""
it = 0
spose = "#pose\t{:}\t{:.3f}\t{:.3f}\t{:.3f}"
swall = "#wall\t{:}\t{:.3f}\t{:.3f}\t{:.3f}"
while self.printing:
x, y = self.body.get_position()
h = self.body.get_heading()
obstacles = self.body.obstacle_coordinates()
# If an obstacle is at less than 25 cm from an
# agent, ignore it.
agents = self.get_agents().values()
walls = []
for o in obstacles:
dists2 = [(o[0] - a[0])**2 + (o[1] - a[1])**2 for a in agents]
if all([d2 > 0.25**2 for d2 in dists2]):
walls.append(o)
# print poses and walls
print(spose.format(it, x, y, h))
for wall in walls:
dist = sqrt(wall[0]**2 + wall[1]**2)
print(swall.format(it, wall[0] + x, wall[1] + y, dist))
sleep(dt)
it += 1
return it
def start_printing(self, dt):
"""Launch a new thread running background_print."""
self.printing = True
self.print_thread = threading.Thread(target=self.background_print,
args=(dt,))
self.print_thread.daemon = True
return self.print_thread.start()
def stop_printing(self):
"""Stop the print thread."""
if self.printing:
self.printing = False
self.print_thread.join(10)
if self.print_thread.is_alive():
raise Exception("Could not stop printing thread properly")
return
class BaseBody(object):
"""Minimal model of Body with the required methods
for use as a body of a robot. Any body
models should inherit from this class to
be accepted by BaseRobot.
All the methods below should be overwritten
by each body model.
"""
def get_position(self):
"""Returns the x,y coordinates of the robot.
"""
raise Exception("body.get_position() not implemented")
return [0., 0.]
def get_heading(self):
"""Returns the angle between the robot orientation
and the x-axis in radians between [-pi/2,pi/2].
"""
raise Exception("body.get_heading() not implemented")
return 0.
def move_forward(self, time, v=None):
"""Move forward the robot during *time* seconds at
a speed *v* (m/s). If no speed is given, use a
predefined one from the body class.
"""
raise Exception("body.move_forward() not implemented")
return
def rotate(self, dtheta):
"""Rotate the robot an angle *dtheta* in radians between [-pi/2,pi/2].
Return the time in seconds it took to perform the rotation.
"""
raise Exception("body.rotate() not implemented")
return 0.
def obstacle_coordinates(self):
"""Return a list with the coordinates of the obstacles detected
in relation to the robot. This coordinates are relative to the
robot but using the global orientation of the axis, i.e. this
returns the displacement vector from the robot to the obstacles
using global coordinates.
"""
raise Exception("body.obstacle_coordinates() not implemented")
x1 = [0., 0.]
x2 = [0., 0.]
x3 = [0., 0.]
return [x1, x2, x3]
def obstacle_global_coordinates(self):
"""Same as obstacle_coordinates but setting
the origin of coordinates to the ground truth
origin and not relative to the robot position.
"""
pos = self.get_position()
return [(ob[0] + pos[0], ob[1] + pos[1])
for ob in self.obstacle_coordinates()]
def obstacle_infront(self):
"""Return True if an obstacle is "in front", meaning
that extreme measures such as stopping the movement
have to be taken to avoid a collision.
Used by move_forward() to stop the robot in case
something is too close.
"""
raise Exception("body.obstacle_infront() not implemented")
return False
def obstacle_near(self):
"""Return True if an obstacle is "near" meaning
that the robot should be aware of the existence
of the obstacle even if it may not collide
directly.
"""
raise Exception("body.obstacle_near() not implemented")
return False
class BaseNetwork(object):
"""Minimal model of Network with the required methods
for use as a network of a robot. Any network
models should inherit from this class to
be accepted by BaseRobot.
All the methods below should be overwritten
by each body model.
"""
def start_broadcasting(self):
"""Open the channel and set up whatever is needed to start broadcasting
(send AND receive). Return the 'channel' used for communication,
typically a Serial.Serial or some other class with read() and write().
"""
raise Exception("network.start_broadcasting() not implemented")
return {}
def stop_broadcasting(self):
"""Stop the transmission and close the required channels."""
raise Exception("network.stop_broadcasting() not implemented")
return
def get_agents_state(self):
"""Return a dictionary with the state (x,y,heading) of each robot
detected in the network."""
raise Exception("network.get_agents_state() not implemented")
return {"robot1": [0., 0., 0.],
"robot2": [0., 0., 0.],
"robotN": [0., 0., 0.]}
def send_state(self, position, heading):
"""Broadcast the current state (position[0], position[1], heading)
of the robot over the network.
Return the message sent.
"""
raise Exception("network.send_position() not implemented")
return "Robot1\t0.\t0.\t0."
# To use network with the "with Network(...) as network:" statement
def __enter__(self):
self.start_broadcasting()
return
# To use network with the "with Network(...) as network:" statement
def __exit__(self, type, value, traceback):
self.stop_broadcasting()
return
| 17,457 | 34.411765 | 79 |
py
|
marabunta
|
marabunta-master/marabunta/MockBody.py
|
from math import sin, cos, sqrt
from BaseRobot import BaseBody
from Map import Map2D
class MockBody(BaseBody):
"""Simulation of a body. Locomotion
with this body is simply updating
the values of *pos* and *heading*.
Sensors simulated through a Map instance
that contains the obstacles to be detected.
"""
def __init__(self, pos, heading,
max_speed=0.15, LRdist=0.1, aperture=0.7854):
# State
self.pos = [pos[0], pos[1]]
self.heading = heading
# Parameters
self.max_speed = max_speed
self.LRdist = LRdist
self.aperture = aperture
return
def move_forward(self, dt, v=None):
"""Move in current direction for dt time.
"""
if v is None or v > self.max_speed:
v = self.max_speed
self.pos[0] += v * cos(self.heading) * dt
self.pos[1] += v * sin(self.heading) * dt
return
def rotate(self, dtheta):
"""Rotate robot an angle dtheta.
"""
time = self.LRdist * abs(dtheta) / (2 * self.max_speed)
self.heading += dtheta
return time
def move(self, dt, v, omega):
"""Move the robot with linear velocity v
and angular velocity omega.
"""
dtheta = omega * dt
self.rotate(0.5 * dtheta)
self.move_forward(dt, v)
self.rotate(0.5 * dtheta)
return
def get_position(self):
"""Return current estimate for position.
"""
return (self.pos[0], self.pos[1])
def get_heading(self):
"""Return current estimate for heading.
"""
return self.heading
def load_obstacles(self, filename):
"""Load the obstacles stored in *filename*
using a Map2D instance. Using a Map2D will
automatically store the obstacles in a grid
for fast access to nearby obstacles.
"""
self.obstacles = Map2D(filename, 0.5)
return
def get_ultrasound(self):
"""Return the distance to all the
nearby obstacles (as defined by the
Map2D instance). If no instance is
stored in self.obstacles, return []
"""
try:
obs = self.obstacles.obstacles_near(self.pos)
except:
return []
x, y = self.pos
return [sqrt((o[0] - x)**2 + (o[1] - y)**2) for o in obs]
def obstacle_coordinates(self):
"""Return the relative position of all the
nearby obstacles (as defined by the
Map2D instance). If no instance is
stored in self.obstacles, return []
"""
try:
obs = self.obstacles.obstacles_near(self.pos)
except:
return []
x, y = self.pos
return [[o[0] - x, o[1] - y]
for o in obs if (o[0] - x)**2 + (o[1] - y)**2 < 1.4 * 1.4]
def obstacle_infront(self):
"""Return True if an obstacle is "in front", meaning
that extreme measures such as stopping the movement
have to be taken to avoid a collision.
Used by move_forward() to stop the robot in case
something is too close.
"""
return any(d < 0.1 for d in self.get_ultrasound() if d)
def obstacle_near(self):
"""Return True if an obstacle is "near" meaning
that the robot should be aware of the existence
of the obstacle even if it may not collide
directly.
"""
return any(d < 0.6 for d in self.get_ultrasound() if d)
def get_wheel_distance(self):
return self.LRdist
def get_sensor_aperture(self):
return self.aperture # = 72o in radians
| 3,667 | 30.084746 | 74 |
py
|
marabunta
|
marabunta-master/marabunta/eBotBody.py
|
from math import atan2, sin, cos, pi
from eBot import eBot
from BaseRobot import BaseBody
import threading
from time import time, sleep
import sys
from utils import clean_angle
class eBotBody(BaseBody, eBot.eBot):
"""Body class for controlling an eBot (see https://github.com/EdgeBotix/docs)
by using its python API.
A background movement is implemented where a separate thread continuously
moves the robot at a linear speed set by *self.target_speed* while aligning
the heading to *self.target_heading*.
Alternativaly, if a *self.target_omega* is set, the thread uses a PID
controller to rotate at frequency omega.
The background movement is started when the body is turned on, but it can
be stopped at any point and the class is still completely usable without
this background thread.
This class assumes the eBot-API provides a position() method that
returns (x, y, theta) with x, y in meters and theta in degrees between 0
and 360, and a robot_uS() method that returns the readings from the
ultrasound sensors.
"""
def __init__(self, pos, heading,
lock=None, max_speed=0.15, LRdist=0.10, aperture=0.7854):
# State (in robot coordinates)
self.pos = [pos[0], pos[1]]
self.heading = heading
# Parameters
self.max_speed = max_speed
self.LRdist = LRdist
self.aperture = aperture
# Controller parameters
self.control_Kp = 3.
self.control_Ti = 11.
self.control_Td = 0.01
self.awake = threading.Event()
self.aligned = threading.Event()
self.awake.set()
self.moving_background = False
self.target_heading = None
self.target_speed = None
self.target_omega = None
eBot.eBot.__init__(self, pos, heading, lock)
return
def start_move_background(self):
"""Start a background thread controlling
the speed and the heading of the robot.
If the thread is alive (self.moving_background=True)
several other functions rely on this
instead of implementing the movement
themselves.
"""
if not self.moving_background:
self.moving_background = True
self.thread = threading.Thread(target=self.move_background)
self.thread.daemon = True
self.thread.start()
return
def stop_move_background(self):
"""Stop background movement.
The function returns when the thread
has been succesfully finished.
Else an Exception is raised.
"""
if self.moving_background:
self.moving_background = False
self.awake.set() # force wakeup to finish the thread
self.thread.join(10)
if self.thread.is_alive():
raise Exception("Unable to stop move_background thread.")
return
def turn_on(self):
"""Connect to the eBot using the
API method and start the background movement.
"""
self.connect() # from eBot.eBot
self.target_heading = self.get_heading()
self.aligned.set()
self.target_speed = 0.
self.start_move_background()
return
def turn_off(self):
"""Stop the background movement and
disconnect using the API method.
"""
self.stop_move_background()
self.halt() # from eBot.eBot
self.disconnect() # from eBot.eBot
return
def __enter__(self):
self.turn_on()
return self
def __exit__(self, type, value, traceback):
self.turn_off()
return
def wakeup(self):
if not self.awake.is_set():
self.awake.set()
return
def sleep(self):
self.awake.clear()
self.halt()
return
# Movement
def wheel_speed(self, v):
"""Convert velocity v from m/s
to the parameter between -1 and 1
that the wheels method from the
API expects.
"""
wheel = v / self.max_speed
wheel = max(-1., wheel)
wheel = min(wheel, 1.)
return wheel
def move_forward(self, dt, v=None):
"""Move in current direction for *dt* time
at a speed of *v* if given, or *max_speed*
if not.
If the background move is activated,
*dt* is ignored and this returns inmediately.
"""
if self.moving_background:
if v is not None:
self.target_speed = v
else:
if v is None:
v = self.max_speed
wheel = self.wheel_speed(v)
self.wheels(wheel, wheel)
sleep(dt)
self.wheels(0, 0)
return
def rotate(self, dtheta):
"""Rotate robot an angle dtheta
If the background move is activated,
this returns inmediately.
"""
if self.moving_background:
self.target_heading = self.get_heading() + dtheta
time = 0.
else:
time = self.LRdist * abs(dtheta) / (2 * self.max_speed)
if dtheta > 0:
self.wheels(-1, 1)
else:
self.wheels(1, -1)
sleep(time)
self.wheels(0, 0)
return time
def align(self, direction, block=False):
"""Align the heading of the robot to
a given vector *direction*.
Use self_target_heading if the background
move is active, if not use rotate() instead.
If block is set to true, wait until the robot
is aligned before returning.
"""
h = atan2(direction[1], direction[0])
if self.moving_background:
self.target_heading = h
if block:
self.aligned.wait()
else:
dtheta = clean_angle(h - self.get_heading())
self.rotate(dtheta)
return
def move(self, dt, v, omega, stop_flag=True):
"""Move the robot with linear velocity v
and angular velocity omega. In the case
that max wheel speed doesnt allow to fulfill
both requirements, the angular velocity omega
is given priority over the linear velocity,
i.e. if |v|+|omega| > max_speed, then *v* is
reduced so that |v|+|omega| = max_speed.
The stop_flag (default True) controls whether
the robot should stop after *dt* or keep
the current speed.
"""
vrot = 0.5 * omega * self.LRdist
if vrot > self.max_speed:
vrot = self.max_speed
if vrot < -self.max_speed:
vrot = -self.max_speed
if abs(v) > 0 and abs(v) + abs(vrot) > self.max_speed:
v = (v / abs(v)) * (self.max_speed - abs(vrot))
left_wheel = self.wheel_speed(v - vrot)
right_wheel = self.wheel_speed(v + vrot)
self.wheels(left_wheel, right_wheel)
sleep(dt)
if stop_flag:
self.wheels(0, 0)
return
def move_background(self):
"""Continuosly adjust orientation
to self.target_heading while
cruising at self.target_speed if possible.
Uses PID to align.
"""
dt = 0.1
dtheta = 0.0
time_now = time()
errorint = 0.
while self.moving_background:
self.update_state()
omega = self.target_omega
if omega is None:
# Update old values
old_dtheta = dtheta
old_time = time_now
# Update new values
dtheta = clean_angle(self.target_heading - self.get_heading())
time_now = time()
if abs(dtheta) > 0.1:
self.aligned.clear()
else:
self.aligned.set()
# Proportional
omega = self.control_Kp * dtheta
# Integral
real_dt = time_now - old_time
if self.control_Ti:
errorint += real_dt * dtheta
omega += (self.control_Kp * errorint) / self.control_Ti
# Differential
if self.control_Td:
derrordt = clean_angle(dtheta - old_dtheta) / real_dt
omega += self.control_Kp * self.control_Td * derrordt
self.move(dt, self.target_speed, omega, stop_flag=False) # sleeps
self.awake.wait()
self.halt()
return
# Sensors
def light_detected(self):
"""Check if light is detected.
If so, sound the buzzer.
"""
lfront, ltop = self.light() # from eBot.eBot
light = ltop > 0.99 or lfront > 0.60 # from eBot.eBot
if light:
self.buzzer(200, 2000) # from eBot.eBot
return light
def get_position(self):
"""Return the position in
global coordinates.
"""
if not self.moving_background:
self.update_state()
return self.pos
def get_heading(self):
"""Return the heading in
global coordinates.
"""
if not self.moving_background:
self.update_state()
return self.heading
def update_state(self):
"""Update the values of self.pos and self.heading
according to the robot's readings.
If an error occurs when calling eBot.position(),
write to stderr and set the self.pos and self.heading
to None so that any threard accesing them will fail
and complain loudly (or at least get nonsense).
"""
try:
p = self.position()
self.pos[0], self.pos[1] = float(p[0]), float(p[1])
self.heading = clean_angle(pi * float(p[2]) / 180.)
except:
sys.stderr.write("problem calling eBot.eBot.position\n")
self.pos = None
self.heading = None
return
def get_ultrasound(self):
"""Ultrasound readings, ignoring the
sensor on the back.
If the sensors dont detect anything,
return 2.5 or so (eBot inner workings.)
"""
return self.robot_uS()[0:5] # from eBot.eBot
def obstacle_coordinates(self):
"""Coordinates of the five obstacle points
with respect to the robot (ignore the back).
"""
h = self.get_heading()
theta = self.aperture
[dLL, dL, dC, dR, dRR] = self.get_ultrasound()
obs = []
if 0.15 < dLL < 1.0:
obs.append((dLL * cos(h + 2. * theta), dLL * sin(h + 2. * theta)))
if 0.15 < dL < 1.0:
obs.append((dL * cos(h + theta), dL * sin(h + theta)))
if 0.15 < dC < 1.0:
obs.append((dC * cos(h), dC * sin(h)))
if 0.15 < dR < 1.0:
obs.append((dR * cos(h - theta), dR * sin(h - theta)))
if 0.15 < dRR < 1.0:
obs.append((dRR * cos(h - 2. * theta), dRR * sin(h - 2. * theta)))
return obs
def obstacle_infront(self):
"""Return True if an obstacle is "in front", meaning
that extreme measures such as stopping the movement
have to be taken to avoid a collision.
Used by move_forward() to stop the robot in case
something is too close.
"""
# TODO Explore the optimal ranges where of each sensor.
us = self.get_ultrasound()
return us[2] < 0.20 or us[1] < 0.25 or us[3] < 0.20
def obstacle_near(self):
"""Return True if an obstacle is "near" meaning
that the robot should be aware of the existence
of the obstacle even if it may not collide
directly.
"""
# TODO Explore the optimal ranges where of each sensor.
us = self.get_ultrasound()
# return us[2] < 0.50 or us[1] < 0.45 or \
# us[3] < 0.45 or us[0] < 0.40 or us[4] < 0.40
return us[2] < 0.70 or us[1] < 0.70 or \
us[3] < 0.70 or us[0] < 0.70 or us[4] < 0.70
def get_wheel_distance(self):
"""Return the distance between
the wheels of the eBot.
"""
return self.LRdist
def get_sensor_aperture(self):
"""Return the angle between the
sensors in the eBot.
"""
return self.aperture
| 12,305 | 32.440217 | 81 |
py
|
marabunta
|
marabunta-master/marabunta/utils.py
|
from serial import Serial
import thread
import threading
from math import pi
def clean_angle(th):
"""Return the angle th
wrapped into the range [-pi,pi].
"""
while th > pi:
th -= 2 * pi
while th < -pi:
th += 2 * pi
return th
class SafeSerial(Serial):
"""Extension of serialSerial that uses a
threading.Lock lock to make sure that reads
and writes are thread-safe. The lock can be
given at initialization so that different
serial port can share the same lock.
Sharing the lock is useful when dealing
with multiplexed ports such as in the
Raspberry Pi 2.
"""
def __init__(self, *args, **kws):
lock = kws.pop("lock", None)
if isinstance(lock, thread.LockType):
self.lock = lock
else:
self.lock = threading.Lock()
super(SafeSerial, self).__init__(*args, **kws)
return
def readline(self):
with self.lock:
m = super(SafeSerial, self).readline()
return m
def write(self, *args, **kws):
with self.lock:
m = super(SafeSerial, self).write(*args, **kws)
return m
def flushInput(self):
with self.lock:
m = super(SafeSerial, self).flushInput()
return m
def flushOutput(self):
with self.lock:
m = super(SafeSerial, self).flushOutput()
return m
| 1,410 | 24.196429 | 59 |
py
|
marabunta
|
marabunta-master/marabunta/Map.py
|
class Map2D(object):
"""Store the position of obstacles in a grid.
Right now it can only load from a single file.
The list of all obstacles is stored in self.obstacles
and a grid with the obstacles near each part
of the space is stored in self.grid.
The obstacles are input through *data*.
This can be either a list of points or
a filename / file-object with the data in it.
"""
def __init__(self, data, radius, x0=None, xf=None, y0=None, yf=None):
self.minLx = x0
self.maxLx = xf
self.minLy = y0
self.maxLy = yf
self.obstacles = []
self.grid_updated = False
self.load_obstacles(data)
nx = int(self.Lx / radius)
ny = int(self.Ly / radius)
self.setup_boxes(nx, ny)
self.fill_grid()
return
def load_obstacles(self, obstacles):
if type(obstacles) == str:
self.add_from_name(obstacles)
elif type(obstacles) == file:
self.add_from_file(obstacles)
elif type(obstacles) == list:
self.add_from_list(obstacles)
else:
raise Exception("Map2D: unknown data type {:}".format(obstacles))
return
def add_from_name(self, filename):
"""Read a collection of *x y* pairs
from the file *filename*.
The dimensions of the available space
are deduced from these points.
"""
with open(filename, 'r') as f:
self.add_from_file(f)
return
def add_from_file(self, f):
"""Read a collection of *x y* pairs
from the file object *f*.
The dimensions of the available space
are deduced from these points.
"""
obstacles = []
while True:
try:
x, y = [float(o) for o in f.readline().split()]
obstacles.append([x, y])
except:
break
self.add_from_list(obstacles)
return
def add_from_list(self, obstacles):
"""Load the obstacles from *obstacles*."""
xs = ([o[0] for o in obstacles])
ys = ([o[1] for o in obstacles])
# Determine the limits of the box
if self.minLx is None:
self.minLx = min(xs) - 0.01 * abs(min(xs))
if self.maxLx is None:
self.maxLx = max(xs) + 0.01 * abs(max(xs))
self.Lx = self.maxLx - self.minLx
if self.minLy is None:
self.minLy = min(ys) - 0.01 * abs(min(ys))
if self.maxLy is None:
self.maxLy = max(ys) + 0.01 * abs(max(ys))
self.Ly = self.maxLy - self.minLy
self.obstacles.extend(obstacles)
self.grid_updated = False
return
def setup_boxes(self, nx, ny):
""" Define how many boxes per axis the grid will have.
Total number of boxes = nx * ny.
nx = number of boxes along x. (int > 3)
ny = number of boxes along y. (int > 3)
"""
self.nx = int(nx)
self.ny = int(ny)
self.grid = [[] for i in range(nx * ny)] # initialize empty grid
return
def which_box(self, pos):
""" Gives the (i,j) indexes corresponding to position pos.
"""
i = int((pos[0] - self.minLx) * self.nx / self.Lx)
j = int((pos[1] - self.minLy) * self.ny / self.Ly)
assert i >= 0 and i < self.nx
assert j >= 0 and j < self.ny
return (i, j)
def obstacles_in_box(self, i, j):
"""Returns a list with the agents in a given box.
Periodic boundaries implemented, so if i (j) is larger than
nx (ny) it is replaced by i%nx (j%ny).
i = inner-most index of the box. (int)
j = outer-most index of the box. (int)
"""
i, j = i % self.nx, j % self.ny
return self.grid[i + j * self.nx]
def obstacles_near(self, pos):
i, j = self.which_box(pos)
return self.obstacles_in_box(i, j)
def fill_grid(self):
""" Fill the grid[] with a list of the obstacles contained
in each element. If obstacle is stored in its box + the 8
surrounding ones, so that a robot that senses obstacles in
its box will have information of its surrounding.
"""
nx, ny = self.nx, self.ny
for i in range(nx * ny):
self.grid[i] = []
for o in self.obstacles:
i, j = self.which_box(o)
for dj in (-1, 0, 1):
jj = j + dj
if jj >= 0 and jj < ny:
for di in (-1, 0, 1):
ii = i + di
if ii >= 0 and ii < nx:
self.grid[ii + jj * nx].append(o)
self.grid_updated = True
return self.grid
def filtered_map(self, threshold=1):
"""For each grid box that contains
at least *treshold* obstacles,
return their mean position.
"""
fmap = []
if not self.grid_updated:
self.fill_grid()
for i in range(self.nx * self.ny):
obs = self.grid[i]
if len(obs) >= threshold:
x = sum(o[0] for o in obs) / len(obs)
y = sum(o[1] for o in obs) / len(obs)
fmap.append([x, y])
return fmap
| 5,286 | 33.555556 | 77 |
py
|
marabunta
|
marabunta-master/marabunta/MockNetwork.py
|
from random import randint
from time import time
from BaseRobot import BaseNetwork
import glob
import sys
class MockNetwork(BaseNetwork):
"""Simulate communication between
agents by using the filesystem.
All the agents should share a common
file where they check who is currently
broadcasting and where.
Each agent uses its own file to broadcast
its state.
"""
basechannel = "radio_{:}.net"
def __init__(self, ID=None):
"""Start MockNetwork.
If an ID is not given, just assign a
random number.
"""
if ID:
self.ID = str(ID)
else:
self.ID = str(randint(0, 999999))
self.parser = {"xx": self.parse_position,
"tt": self.parse_heading,
"oo": self.parse_obstacles,
"xo": self.parse_position_obstacles,
"mm": self.parse_message}
self.poses = {}
self.obstacles = {}
self.inbox = []
return
def start_broadcasting(self):
"""Open a file named *logname* to
send messages to other agents in
the network. Write a line containing
its *ID* and *logname* so that other
agents know where to look.
"""
self.logname = self.basechannel.format(self.ID)
self.log = open(self.logname, 'w', 0) # 0 buffsize = dont buffer
return self.log
def stop_broadcasting(self):
"""Close the broadcasting file and
erase this agent's signature in the
common file *global_log*.
This sends some string to the log to
simulate the possibility that fractions
of a message or other kinds of garbage
data could be sent when the network
device is turned off abruptly.
To erase the agent presence in the common
file, it reads lines one by one and
re-writes all lines that do not contain
its own ID. This is NOT thread-safe, and
may cause strange behavior if other agent
is accessing the data.
"""
self.log.write("#End of transmission")
self.log.close()
return
# Sending methods:
def send_state(self, pos, heading):
"""Send string of the form:
"xx*x* *y* *heading* *time* *ID*"
This is a low priority message, it is only scheduled
to send if there is no other message in the stack.
"""
message = "xx\t{:.5f}\t{:.5f}\t{:.5f}\t{:.5f}\t{:}\n".format(
pos[0], pos[1], heading, time(), self.ID)
self.log.write(message)
return message
def send_heading(self, heading):
"""Send string of the form:
"tt*heading* *time* *ID*"
This is a low priority message, it is only scheduled
to send if there is no other message in the stack.
"""
message = "tt\t{:.5f}\t{:.5f}\t{:}\n".format(heading, time(), self.ID)
self.log.write(message)
return message
def send_obstacles(self, obstacles):
"""Send string of the form:
""oo*x1*:*y1* *x2*:*y2* (...) *time* *ID*"
The message can contain an arbitrary number of obstacles
(but it is not guaranteed to be sent correctly if there
are too many).
"""
obstacles_str = "".join("{:.2f}:{:.2f}".format(*o) for o in obstacles)
message = "oo{:}{:.5f}\t{:}".format(obstacles_str, time(), self.ID)
self.log.write(message)
return message
def send_wakeup(self):
"""Send wakeup signal to everyone.
Message includes the ID and the time.
"""
message = "up\t{:.5f}\t{:}\n".format(time(), self.ID)
self.log.write(message)
return message
def send_sleep(self):
"""Send sleep signal to everyone.
Message includes the ID and the time.
"""
message = "ss\t{:.5f}\t{:}\n".format(time(), self.ID)
self.log.write(message)
return message
def send_message(self, text):
"""Sends a generic message given
as input.
"""
message = "mm" + str(text)
self.log.write(message)
return message
# Processing incoming methods:
def parse_position(self, message):
"""Parse a message containing x, y, theta, time, ID"""
try:
x, y, theta, time, ID = message.rstrip('\n').split()
self.poses[ID] = (float(x), float(y), float(theta))
except:
sys.stderr.write("parse_position(): Bad data:\n" + message + "\n")
return
def parse_heading(self, message):
"""Parse a message containing theta, time, ID"""
try:
theta, time, ID = message.rstrip('\n').split()
self.poses[ID] = float(theta)
except:
sys.stderr.write("parse_heading(): Bad data:\n" + message + "\n")
return
def parse_obstacles(self, message):
"""Parse a message containing a set of obstacle
coordinates. Not implemented yet.
"""
try:
data = message.rstrip('\n').split()
ID = data.pop()
self.obstacles[ID] = [[float(x) for x in point.split(':')]
for point in data]
except:
sys.stderr.write("parse_obstacles(): Bad data:\n" + message + "\n")
return
def parse_position_obstacles(self, message):
"""Parse a message containing x, y, theta and
a set of obstacle coordinates.
Not implemented yet.
"""
raise Exception("parse_position_obstacles: Not implemented")
return
def parse_message(self, message):
self.inbox.append(message)
return
def read_all(self):
"""Read the last 5 lines broadcasted by
each agent and parse the contents.
"""
logfiles = glob.glob(self.basechannel.format("*"))
self.poses = {}
self.obstacles = {}
for logfile in logfiles:
if logfile != self.logname:
with open(logfile, 'r') as f:
lines = f.readlines()
if len(lines) > 5:
lines = lines[-5:]
for line in lines:
try:
key = line[0:2]
message = line[2:]
except:
key = None
if key in self.parser:
self.parser[key](message)
return
def get_agents_state(self):
"""Gathers all the agents' state.
Returns a dictionary of the form:
{ ID: [x, y, heading] }
"""
self.read_all()
return self.poses
def get_obstacles(self):
"""Gathers all the agents' detected obtacles.
Returns a dictionary of the form:
{ ID: [ [x1, y1], [x2, y2], [x3, y3], ...] }
"""
self.read_all()
return self.obstacles
def get_messages(self):
incomings = list(reversed(self.inbox))
self.inbox = []
return incomings
| 7,194 | 32.156682 | 79 |
py
|
marabunta
|
marabunta-master/marabunta/example_ePuckBody.py
|
from math import *
from time import sleep, time
from BaseRobot import BaseBody
from ePuck import ePuck
class ePuckBody(BaseBody,ePuck):
"""Example of a minimal Body implementation
for the ePuck using its Python API, see
https://github.com/mmartinortiz/pyePuck .
This is meant to illustrate how to implement a
Body class for commercial robots.
This implementation is UNTESTED.
"""
def __init__(self, mac):
ePuck.__init__(self, mac)
self.max_speed = 0.18
self.last_update = -float('inf')
self.turn_on()
return
def update_sensors(self):
"""Connect to the ePuck to get the
sensors. Avoids calling step()
too often.
"""
dtime = time() - self.last_update
if dtime > 0.25:
self.step()
self.last_update = time()
return dtime
def turn_on(self):
self.connect() # from pyePuck
self.enable('proximity','light')
return
def get_position(self):
"""Returns the x,y coordinates of the robot.
"""
# missing localization
return [0., 0.]
def get_heading(self):
"""Returns the angle between the robot orientation
and the x-axis in radians between [-pi/2,pi/2].
"""
# missing localization
return 0.
def wheel_speed(self, v):
"""Convert velocity v from m/s
to the parameter between -1000 and 1000
that set_motors_speed expects.
"""
wheel = int( 1000. * v / self.max_speed)
wheel = max(-1000, wheel)
wheel = min( wheel, 1000)
return wheel
def move_forward(self,time,v=None):
"""Move forward the robot during *time* seconds at
a speed *v* (m/s). If no speed is given, use a
the max speed.
"""
if v is None:
v = self.max_speed
wheel = self.wheel_speed(v)
self.set_motors_speed(wheel, wheel)
sleep(time)
return
def rotate(self,dtheta):
"""Rotate the robot an angle *dtheta* in radians between [-pi/2,pi/2].
Return the time in seconds it took to perform the rotation.
"""
# naive rotate, don't expect to be very precise
time = self.LRdist * abs(dtheta) / (2*self.max_speed)
if dtheta>0:
self.set_motor_speed(-1000, 1000)
else:
self.set_motor_speed(1000, -1000)
sleep(time)
self.set_motors_speed(0,0)
return time
def get_ultrasound(self):
"""Return an array with the distances of the obstacles
detected. Right now, it is assumed this will return at least 3
values (this may change).
"""
self.update_sensors()
return self.get_proximity()
def obstacle_coordinates(self):
"""Return a list with the coordinates of the obstacles detected
in relation to the robot. This coordinates are relative to the
robot but using the global orientation of the axis, i.e. this
returns the displacement vector from the robot to the obstacles
using global coordinates. (in other words, sets the [0.,0.]
at the position of the robot but ignores its heading.)
"""
self.update_sensors()
h = self.get_heading()
ps = self.get_proximity()
p0 = [ ps[0]*cos(h + 0*pi/4.), ps[0]*sin(h + 0*pi/4.) ]
p1 = [ ps[1]*cos(h + 1*pi/4.), ps[1]*sin(h + 1*pi/4.) ]
p2 = [ ps[2]*cos(h + 2*pi/4.), ps[2]*sin(h + 2*pi/4.) ]
p3 = [ ps[3]*cos(h + 3*pi/4.), ps[3]*sin(h + 3*pi/4.) ]
p4 = [ ps[4]*cos(h + 4*pi/4.), ps[4]*sin(h + 4*pi/4.) ]
p5 = [ ps[5]*cos(h + 5*pi/4.), ps[5]*sin(h + 5*pi/4.) ]
p6 = [ ps[6]*cos(h + 6*pi/4.), ps[6]*sin(h + 6*pi/4.) ]
p7 = [ ps[7]*cos(h + 7*pi/4.), ps[7]*sin(h + 7*pi/4.) ]
return [p0, p1, p2, p3, p4, p5, p6, p7]
def obstacle_global_coordinates(self):
"""Same as obstacle_coordinates but setting
the origin of coordinates to the ground truth
origin and not relative to the robot position.
"""
pos = self.get_position()
return [ (ob[0]+pos[0], ob[1]+pos[1]) for ob in self.obstacle_coordinates()]
def obstacle_infront(self):
"""Return True if an obstacle is "in front", meaning
that extreme measures such as stopping the movement
have to be taken to avoid a collision.
Used by move_forward() to stop the robot in case
something is too close.
"""
ps = self.get_proximity()
return ps[0] < 0.3 or ps[7] < 0.3 or ps[1] < 0.15 or ps[6] < 0.15
def obstacle_near(self):
"""Return True if an obstacle is "near" meaning
that the robot should be aware of the existence
of the obstacle even if it may not collide
directly.
"""
ps = self.get_proximity()
return ps[0] < 0.6 or ps[7] < 0.6 or \
ps[1] < 0.5 or ps[6] < 0.5 or \
ps[2] < 0.2 or ps[5] < 0.2
| 5,063 | 33.924138 | 84 |
py
|
marabunta
|
marabunta-master/marabunta/__init__.py
|
from BaseRobot import BaseRobot, BaseBody, BaseNetwork
from MockBody import MockBody
from MockNetwork import MockNetwork
from Map import Map2D
import imp
__all__ = ['BaseRobot', 'BaseBody', 'BaseNetwork',
'MockBody', 'MockNetwork',
'Map2D']
# Include eBotBody only if eBot-API is installed
try:
imp.find_module('eBot')
include_eBot = True
except ImportError:
include_eBot = False
if include_eBot:
from eBotBody import eBotBody
__all__.append('eBotBody')
del include_eBot
# Include XBee*Network only if serial is installed
try:
imp.find_module('serial')
include_serial = True
except ImportError:
include_serial = False
if include_serial:
from XBeeNetwork import XBeeNetwork, XBeeExpirationNetwork
__all__.extend(['XBeeNetwork', 'XBeeExpirationNetwork'])
del include_serial
| 839 | 23.705882 | 62 |
py
|
marabunta
|
marabunta-master/marabunta/XBeeNetwork.py
|
from random import randint
from time import time, sleep
import threading
import Queue
import glob
import sys
from BaseRobot import BaseNetwork
from utils import SafeSerial
from serial import Serial
class XBeeNetwork(BaseNetwork):
"""Network class for communication using XBee series 1
connected as a serial port in /dev/ttyUSB*.
Messages are only sent at particular time slots specified
by *window_start*, *window_end*, and *period*.
The incoming data in the serial port is continually
scanned for new messages.
When receiving a message, its content is assumed to have
a certain structure defined by the first two characters.
"""
def __init__(self, window_start, window_end, period,
ID=None, lock=None, tty='/dev/ttyUSB*'):
assert period > 0.
assert window_start >= 0. and window_start < period
assert window_end > window_start and window_end <= period
self.window_start = window_start
self.window_end = window_end
self.period = period
if ID:
self.ID = str(ID)
else:
self.ID = str(randint(0, 999999))
self.lock = lock
self.tty = tty
self.broadcasting = False
self.port = None
self.poses = {}
self.obstacles = {}
self.obstimes = {}
self.inbox = Queue.LifoQueue()
self.outbox = Queue.LifoQueue()
self.awake = threading.Event()
self.awake.set()
self.parser = {"xx": self.parse_state,
"tt": self.parse_heading,
"oo": self.parse_obstacles,
"xo": self.parse_state_obstacles,
"up": self.parse_wakeup,
"ss": self.parse_sleep,
"mm": self.parse_message}
return
def start_broadcasting(self):
"""Open a serial connection to XBee and start
one thread for sending messages and one thread
for reading messages.
The connection to XBee is done by opening
a SafeSerial port connection to /dev/ttyUSB*
This method does nothing if the network is
already broadcasting (self.broadcasting=True).
"""
if not self.broadcasting:
connected = False
if '*' in self.tty:
port_paths = glob.glob(self.tty)
else:
port_paths = [self.tty]
for port_path in port_paths:
try:
if self.lock == 'no':
port = Serial(port_path, baudrate=115200,
timeout=0.100, writeTimeout=0.100)
else:
port = SafeSerial(port_path, baudrate=115200,
timeout=0.100, writeTimeout=0.100,
lock=self.lock)
connected = True
except:
connected = False
if connected:
self.port = port
for i in range(7):
self.port.flushInput()
self.port.flushOutput()
break
if not connected:
self.port = None
raise Exception("start_broadcasting: Xbee not found")
self.broadcasting = True
self.send_thread = threading.Thread(target=self.send_background)
self.send_thread.daemon = True
self.send_thread.start()
self.read_thread = threading.Thread(target=self.read_background)
self.read_thread.daemon = True
self.read_thread.start()
return self.port
def stop_broadcasting(self):
"""Stop the send and read threads and turn off
broadcasting. This function returns when the
threads have been succesfully terminated or else
raises and Exception.
This method does nothing if the network is
not broadcasting already (self.broadcasting=False).
Returns the number of messages left to send.
"""
if self.broadcasting:
self.broadcasting = False
self.awake.set() # force wakeup to finish the threads
if self.send_thread.is_alive():
self.send_thread.join(5)
if self.read_thread.is_alive():
self.read_thread.join(5)
if self.send_thread.is_alive() or self.read_thread.is_alive():
raise Exception(
"stop_broadcasting: Could not stop background threads")
self.port.close()
return self.outbox.qsize()
def __enter__(self):
self.start_broadcasting()
return self
def __exit__(self, type, value, traceback):
self.stop_broadcasting()
return
def standby(self):
"""If the robot is asleep, check periodically
for a wakeup signal (signal starting with "up").
Ignores any other message received.
Returns the time spent in standby mode.
"""
init_time = time()
while not self.is_awake():
sleep(2)
while self.port.inWaiting() > 0:
new_message = self.port.readline()
if len(new_message) > 1 and new_message[0:2] == "up":
self.parse_wakeup("")
return time() - init_time
def is_awake(self):
return self.awake.is_set()
# Sending methods:
def send_state(self, pos, heading):
"""Send string of the form:
"xx*x* *y* *heading* *time* *ID*"
This is a low priority message, it is only scheduled
to send if there is no other message in the stack.
"""
message = "xx{:.5f}\t{:.5f}\t{:.5f}\t{:.5f}\t{:}\n".format(
pos[0], pos[1], heading, time(), self.ID)
if self.outbox.empty():
self.outbox.put(message)
return message
def send_heading(self, heading):
"""Send string of the form:
"tt*heading* *time* *ID*"
This is a low priority message, it is only scheduled
to send if there is no other message in the stack.
"""
message = "tt{:.5f}\t{:.5f}\t%{:}\n".format(heading, time(), self.ID)
if self.outbox.empty():
self.outbox.put(message)
return message
def send_obstacles(self, obstacles):
"""Send string of the form:
"oo*x1*:*y1* *x2*:*y2* (...) *time* *ID*"
The message can contain an arbitrary number of obstacles
(but it is not guaranteed to be sent correctly if there
are too many).
"""
obstacles_str = "".join("{:.2f}:{:.2f}".format(*o) for o in obstacles)
message = "oo{:}{:.5f}\t{:}".format(obstacles_str, time(), self.ID)
self.outbox.put(message)
return message
def send_state_obstacles(self, pos, heading, obstacles):
"""Send string of the form:
"xo*x* *y* *heading* *x1*:*y1* *x2*:*y2* (...) *time* *ID*"
The message can contain an arbitrary number of obstacles
(but it is not guaranteed to be sent correctly if there
are too many).
"""
obstacles_str = "".join("{:.2f}:{:.2f}".format(*o) for o in obstacles)
message = "xo{:.5f}\t{:.5f}\t{:.5f}\t{:}\t{:.5f}\t{:}\n".format(
pos[0], pos[1], heading, obstacles_str, time(), self.ID)
if self.outbox.empty():
self.outbox.put(message)
return message
def send_wakeup(self):
"""Send wakeup signal to everyone.
Message includes the ID and the time.
"""
message = "up{:.5f}\t{:}\n".format(time(), self.ID)
self.outbox.put(message)
return message
def send_sleep(self):
"""Send sleep signal to everyone.
Message includes the ID and the time.
"""
message = "ss{:.5f}\t{:}\n".format(time(), self.ID)
self.outbox.put(message)
return message
def send_message(self, text):
"""Sends a generic message given
as input.
"""
message = "mm" + str(text)
self.outbox.put(message)
return message
# Processing incoming methods:
def parse_state(self, message):
"""Parse a message containing x, y, theta, time, ID"""
try:
x, y, theta, time, ID = message.rstrip('\n').split()
self.poses[ID] = (float(x), float(y), float(theta))
except:
sys.stderr.write("parse_state(): Bad data:\n" + message + "\n")
return
def parse_heading(self, message):
"""Parse a message containing theta, time, ID"""
try:
theta, time, ID = message.rstrip('\n').split()
self.poses[ID] = float(theta)
except:
sys.stderr.write("parse_heading(): Bad data:\n" + message + "\n")
return
def parse_obstacles(self, message):
"""Parse a message containing a set of obstacle
coordinates. Not implemented yet.
"""
try:
data = message.rstrip('\n').split()
ID = data.pop()
time = float(data.pop())
self.obstacles[ID] = [[float(p) for p in point.split(':')]
for point in data]
self.obstimes[ID] = time
except:
sys.stderr.write("parse_obstacles(): Bad data:\n" + message + "\n")
return
def parse_state_obstacles(self, message):
"""Parse a message containing x, y, theta and
a set of obstacle coordinates.
Not implemented yet.
"""
try:
data = message.rstrip('\n').split()
ID = data.pop()
time = float(data.pop())
x, y, theta = data[:3]
self.poses[ID] = (float(x), float(y), float(theta))
self.obstacles[ID] = [[float(p) for p in point.split(':')]
for point in data]
self.obstimes[ID] = time
except:
sys.stderr.write(
"parse_state_obstacles(): Bad data:\n" + message + "\n")
return
def parse_wakeup(self, message):
"""Wakes up the device."""
self.awake.set()
return
def parse_sleep(self, message):
"""If device is awake, set to sleep and
put on standby mode. This method returns
only after the device is awake again.
"""
if self.is_awake():
self.awake.clear()
self.standby()
return
def parse_message(self, message):
self.inbox.put(message)
return
def get_messages(self):
"""Returns all incoming messages received since
last call to this method. The messages are
returned in a list sorted from newest to oldest
(FILO stack).
"""
incomings = []
while not self.inbox.empty():
incomings.append(self.inbox.get())
self.inbox.task_done()
return incomings
def get_agents_state(self):
"""Returns the dictionary with the
data gathered from the network through the
read_background thread regarding the
state (position and heading) of the agents.
"""
return self.poses
def get_obstacles(self):
"""Returns the dictionary with the
data gathered from the network through the
read_background thread regarding the
obstacles detected by other agents.
"""
return self.obstacles, self.obstimes
# User should not need to call any function below this point
def send(self):
"""Write the most recent item put into
the outbox into the serial port.
Return the item.
"""
m = self.outbox.get()
self.port.write(m)
self.outbox.task_done()
return m
def send_background(self):
"""Function meant to be called in a separate
thread to continuosly check for the time and
send the most recent message whenever the
time slot is right.
When the proper time slot is reached, this
sends the last item put in the Queue.
and erases the rest.
"""
while self.broadcasting:
t = time() % self.period
if t < self.window_start:
sleep(self.window_start - t)
elif t >= self.window_end:
sleep(self.period + self.window_start - t)
else:
if self.outbox.empty():
sleep((self.window_end - self.window_start) * 0.2)
else:
self.send()
# make sure only one message per window is sent:
sleep(self.window_end - time() % self.period)
self.awake.wait() # wait until the device is awake.
return
def read(self):
"""If there is an incoming message wait until
a whole line is receive, then parse using
the appropiate parser function according to
the "key" of the message (first two characters).
A certain structure for the message is assumed,
if the message fails to follow the structure a
warning is sent to stderr and the message is
ignored.
New keys should be added to the keys-to-parsers
dict, self.parser.
Returns the received message.
"""
message = ''
if self.port.inWaiting() > 0:
message = self.port.readline()
if len(message) > 1:
key = message[0:2]
try:
self.parser[key](message[2:])
except KeyError:
sys.stderr.write("read(): unknown key:\n" + key + "\n")
return message
def read_background(self):
"""Function meant to be called in a separate
thread to continuosly check for incoming
messages. The frequency at which new
messages are checked could require some
tweaking depending on the hardware limitations.
and the number of robots broadcasting.
"""
while self.broadcasting:
self.read()
sleep(self.period / 15.) # assuming is enough to get all messages
self.awake.wait()
return
class XBeeExpirationNetwork(XBeeNetwork):
"""Extension of XBeeNetwork where the data
received is ignored after *expiration_time*
number of seconds since it was first sent
(according to the sender).
"""
def __init__(self, expiration_time, window_start, window_end,
period=1, ID=None, lock=None):
self.expiration_time = expiration_time
self.expirations = {}
XBeeNetwork.__init__(self, window_start, window_end, period, ID, lock)
return
def parse_state(self, message):
"""Parse a message containing x, y, theta, time, ID.
Store the expiration date of the message in self.expirations."""
try:
x, y, theta, time, ID = message.rstrip('\n').split()
self.poses[ID] = (float(x), float(y), float(theta))
self.expirations[ID] = float(time) + self.expiration_time
except:
sys.stderr.write("parse_state(): Bad data:\n" + message + "\n")
return
def get_agents_state(self):
"""Build a new dictionary that only contains the entries from
self.poses that have not yet reached their expiration time.
"""
t = time()
ids = [ID for ID, exp_time in self.expirations.items() if t < exp_time]
g = {ID: self.poses[ID] for ID in ids}
return g
| 15,757 | 35.476852 | 79 |
py
|
marabunta
|
marabunta-master/marabunta/models/HeadingConsensusRobot.py
|
from marabunta import BaseRobot
from math import sin,cos,pi
class HeadingConsensusRobot(BaseRobot):
"""Robot model for heading consensus.
By iteratively calling the update() method,
this robot will communicate with the rest
of the swarm and align its heading to the
swarm's mean heading.
Obstacle avoidance (implemented in BaseRobot)
will take precence over consensus reaching.
"""
#def __init__(self, body, network):
# BaseRobot.__init__(self, body, network)
# return
def heading_target(self):
"""Get the other agent's state and
compute the mean heading. Note that
for periodic quantities such as the
heading, the mean is defined as
< x_i > = atan( sum_i sin(x_i)/sum_i cos(x_i) )
Returns a vector pointing to the
mean heading. If no agents are
detected, returns None.
"""
neis = self.get_agents().values()
if neis:
sint = sum( [sin(nei[2]) for nei in neis])
cost = sum( [cos(nei[2]) for nei in neis])
target = [cost, sint]
else:
target = None
return target
def move_to_target(self, target, deltat, v):
"""Align the robot to *target* and
move forward for *deltat* at a speed *v*.
"""
self.align(target)
self.move_forward(deltat, v)
return
def update(self, deltat, v=None):
"""Perform one step of the consensus
protocol. This is the main "behavior"
of the robot. It consists of 4 steps:
1. Broadcast its state.
2. Perform swarming. In practice,
this means computing the desired
target direction of motion.
(in this case, perform heading
consensus)
3. Correct the desired target
in order to avoid obstacles.
4. Move in the desired target direction.
"""
self.broadcast_state()
# Perform swarming
target = self.heading_target()
if not target:
h= self.body.get_heading()
target = [cos(h) ,sin(h)]
# Avoid obstacles
target = self.correct_target(target)
self.move_to_target(target, deltat, v)
return
| 2,310 | 31.549296 | 59 |
py
|
marabunta
|
marabunta-master/marabunta/models/PerimeterDefenseRobot.py
|
from marabunta import BaseRobot
from math import *
class PerimeterDefenseRobot(BaseRobot):
"""Robot model for perimeter defense.
By iteratively calling the update() method,
this robot will communicate with the rest
of the swarm and move away from the others
as far as possible. Takes a *threshold*
parameter to determine when it has gone
far enough and reached consensus. Can be
set to 0.
Obstacle avoidance (implemented in BaseRobot)
will take precence over consensus reaching.
"""
def __init__(self, body, network, threshold):
BaseRobot.__init__(self, body, network)
self.threshold = threshold
self.rendezvous_point = None
self.path = []
self.known_lights = []
self.num_lights = 0
return
def set_path(self, path):
self.path = path[:]
return self.path
def spread_target(self):
"""Get the other agent's state and
compute the direction of motion that
will maximize distance with them.
This is computed as a linear combination
of the positions of each neighbor
relative to the agent, where each
position is weighted by the inverse
of the distance**2 to that robot,
t_i = sum_j (r_j - r_i)/|r_j - r_i|^2 ,
so that higher priority is given to
move away from the closer agents, but
still taking all into account and
allowing for neighbors to "cancel each
other out."
Returns a vector pointing to the
mean heading. If no agents are
detected, returns None.
"""
neis = self.get_agents().values()
pos = self.body.get_position()
if neis:
target = [0.,0.]
for nei in neis:
d2 = (nei[0]-pos[0])**2 + (nei[1]-pos[1])**2
if d2>0:
target[0] += (pos[0] - nei[0])/d2
target[1] += (pos[1] - nei[1])/d2
norm2 = target[0]*target[0] + target[1]*target[1]
if norm2 < self.threshold:
target = None
else:
target = None
return target
def rendezvous_target(self):
"""Compute the target direction of movement
that allows the robot to reach the rendezvous point
(stored in self.rendezvous_point).
When the robot is close enough to the point this
sets self.rendezvous_point to None and also returns
None as the target.
"""
if self.rendezvous_point:
pos = self.body.get_position()
target = [ self.rendezvous_point[0]-pos[0] , self.rendezvous_point[1]-pos[1] ]
distance = sqrt(target[0]*target[0]+target[1]*target[1])
if distance < 0.10: # rendezvous point reached
try:
self.rendezvous_point = self.path.pop(0)
target = self.rendezvous_target()
except:
target = [0., 0.]
self.rendezvous_point = None
else:
try:
self.rendezvous_point = self.path.pop(0)
target = self.rendezvous_target()
except:
target = None
self.rendezvous_point = None
return target
def move_to_target(self, target, deltat, v, block=False):
"""If the norm2 of *target* is is larger
than *threshold*, align the robot to
*target* and move forward for *deltat*
at a speed *v*.
Else, stop for *deltat*.
"""
if target[0]**2 + target[1]**2 > self.threshold*self.threshold:
# Some robots allow for a block argument in
# the align method.
try:
self.body.align(target, block)
except (TypeError,AttributeError):
self.align(target)
self.move_forward(deltat, v)
else:
self.move_forward(deltat, 0)
return
def light_detected(self):
"""If light is detected and is a
new light, broadcast its positon
and add it to the list of known
light sources.
"""
try:
light = self.body.light_detected()
except AttributeError:
light = False
if light:
x, y = self.body.get_position()
self.add_light(x,y)
return light
def process_messages(self):
messages = self.network.get_messages()
for message in messages:
if len(message)>3:
mesdata = message.split()
if mesdata[0]=="stop":
raise Exception("Stop!")
elif mesdata[0]=="goto":
try:
self.rendezvous_point = (float(mesdata[1]), float(mesdata[2]))
except:
print("#PerimenterDefenseRobot: Strange message received: ",message)
elif mesdata[0]=="light":
try:
x, y = float(mesdata[1]), float(mesdata[2])
except:
x, y = None, None
print("#PerimenterDefenseRobot: Strange message received: ",message)
self.add_light(x,y)
return messages
def add_light(self, x, y):
"""Only add light to the list of known lights if
this new one is at least 0.8 from any other
previously known light.
"""
if all( (x-light[0])**2 + (y-light[1])**2 > 0.8 * 0.8 for light in self.known_lights):
self.known_lights.append( (x,y) )
self.num_lights += 1
self.network.send_message("light\t%.2f\t%.2f\n"%(x,y))
return
def update(self, deltat, v=None):
"""Perform one step of the consensus
protocol. This is the main "behavior"
of the robot. It consists of 4 steps:
1. Broadcast its state.
2. Perform swarming. In practice,
this means computing the desired
target direction of motion.
(in this case, perform perimeter
defense)
3. Correct the desired target
in order to avoid obstacles.
4. Move in the desired target direction.
"""
self.broadcast_state()
self.process_messages()
# If goto message received, go there
target = self.rendezvous_target()
# check if rendezvous point has been reached
if target and target[0]==0 and target[1]==0:
return False, True # STOP HERE!
if not target:
# Perform swarming
target = self.spread_target()
if not target:
h= self.body.get_heading()
target = [10.*sqrt(self.threshold)*cos(h) ,10.*sqrt(self.threshold)*sin(h)]
# Avoid obstacles
target = self.correct_target(target)
obstacle = self.obstacle_near()
if obstacle and v:
v *= 0.6
self.move_to_target(target, deltat, v, obstacle)
light = self.light_detected()
return light, False
| 7,197 | 35.170854 | 94 |
py
|
marabunta
|
marabunta-master/marabunta/models/AreaCoverageRobot.py
|
from math import *
from marabunta.models import PerimeterDefenseRobot
class AreaCoverageRobot(PerimeterDefenseRobot):
"""Robot model for perimeter defense.
By iteratively calling the update() method,
this robot will communicate with the rest
of the swarm and move away from the others
as far as possible. Takes a *threshold*
parameter to determine when it has gone
far enough and reached consensus. Can be
set to 0.
Obstacle avoidance (implemented in BaseRobot)
will take precence over consensus reaching.
"""
def spread_target(self):
"""Get the other agent's state and
compute the direction of motion that
will maximize distance with them.
This is computed as a linear combination
of the positions of each neighbor
relative to the agent, where each
position is weighted by the inverse
of the distance**2 to that robot,
t_i = sum_j (r_j - r_i)/|r_j - r_i|^2 ,
so that higher priority is given to
move away from the closer agents, but
still taking all into account and
allowing for neighbors to "cancel each
other out."
Returns a vector pointing to the
mean heading. If no agents are
detected, returns None.
"""
neis = self.get_agents().values()
pos = self.body.get_position()
# Get both neighbors and obstacles, in relative coordinates
points = [ [nei[0]-pos[0], nei[1]-pos[1]] for nei in neis] + self.body.obstacle_coordinates()
if points:
target = [0.,0.]
for p in points:
d2 = p[0]**2 + p[1]**2
weight = (1.0/d2 )**1.5
if d2>0:
target[0] -= p[0]*weight
target[1] -= p[1]*weight
else:
target= None
return target
def update(self, deltat, v=None):
"""Perform one step of the consensus
protocol. This is the main "behavior"
of the robot. It consists of 4 steps:
1. Broadcast its state.
2. Perform swarming. In practice,
this means computing the desired
target direction of motion.
(in this case, perform perimeter
defense)
3. Correct the desired target
in order to avoid obstacles.
4. Move in the desired target direction.
"""
self.broadcast_state()
self.process_messages()
# Perform swarming
target = self.spread_target()
if not target:
h= self.body.get_heading()
target = [10.*cos(h) ,10.*sin(h)]
# Avoid obstacles
target = self.correct_target(target)
self.move_to_target(target, deltat, v)
light = self.light_detected()
return light
| 2,879 | 35 | 101 |
py
|
marabunta
|
marabunta-master/marabunta/models/MarchingRobot.py
|
from marabunta import BaseRobot
from math import *
class MarchingRobot(BaseRobot):
"""Robot model for marching algorithm.
By iteratively calling the update() method,
this robot will communicate with the rest
of the swarm and move in a way that
simulatenouslty tries to
[Spread] stay away from the closest neighbor ,
[Heading] achieve heading consensus , and
[Group] stay close to the group of neighbors.
The importance of each of these three aspects can
be set with the variables S, H and G.
Typical values are given by default but the optimal
parameters can drastically change depending on the
properties of the agents and the desired outcome.
Takes a *threshold* parameter to determine when it
has reached a "good enough" state. Can be set to 0.
Obstacle avoidance (implemented in BaseRobot)
will take precence over consensus reaching.
"""
def __init__(self, body, network, threshold=0.5, w_spread=2., w_heading=1., w_group=0.2):
BaseRobot.__init__(self, body, network)
self.threshold = threshold
self.S = w_spread
self.H = w_heading
self.G = w_group
return
def march_target(self):
"""Compute the target direction of motion.
This is computed as a linear combination
of three vectors:
1. The *spread* vector is computed as the
direction that will maximize the distance
between agents.
(same as in PerimeterDefenseRobot)
2. The *heading* vector is computed as the
mean of the swarm alignment.
(same as in HeadingConsensusRobot)
3. The *group* vector is computed as the
distance to the center of mass of the
swarm, normalized with the standard
deviation of the positions. This means
that the concept of being near or far
from the swarm is relative to its size
and spread.
Returns a vector pointing to the target
direction. If no agents are detected,
returns None.
"""
neis = self.get_agents().values()
if not neis:
return None
n_neis = len(neis)
pos = self.body.get_position()
s2, group_x , group_y , spread_x, spread_y , heading_x , heading_y = 0, 0, 0, 0, 0, 0, 0
R0 = 0.35
for nei in neis:
dx = nei[0]-pos[0]
dy = nei[1]-pos[1]
d2 = dx*dx + dy*dy
s2 += d2
group_x += dx
group_y += dy
spread_x -= dx * R0 / d2
spread_y -= dy * R0 / d2
heading_x += cos(nei[2])
heading_y += sin(nei[2])
heading_x /= n_neis
heading_y /= n_neis
if n_neis > 1:
# Only consider the COM if two or more
# neighbors are detected. (otherwise s2=0)
s2 = sqrt( s2/n_neis - (group_x/n_neis)**2 - (group_y/n_neis)**2 )
group_x /= s2
group_y /= s2
else:
group_x = 0.0
group_y = 0.0
return [ self.S*spread_x + self.H*heading_x + self.G*group_x , \
self.S*spread_y + self.H*heading_y + self.G*group_y ]
def move_to_target(self, target, deltat, v):
"""If the norm2 of *target* is is larger
than *threshold*, align the robot to
*target* and move forward for *deltat*
at a speed *v*.
Else, stop for *deltat*.
"""
d2 = target[0]*target[0] + target[1]*target[1]
if d2 > self.threshold:
self.align(target)
self.move_forward(deltat, v)
else:
self.move_forward(deltat, 0.)
return
def update(self, deltat, v=None):
"""Perform one step of the consensus
protocol. This is the main "behavior"
of the robot. It consists of 4 steps:
1. Broadcast its state.
2. Perform swarming. In practice,
this means computing the desired
target direction of motion.
(in this case, march in formation)
3. Correct the desired target
in order to avoid obstacles.
4. Move in the desired target direction.
"""
self.broadcast_state()
# Perform swarming
target = self.march_target()
if not target:
h= self.body.get_heading()
target = [1.5*sqrt(self.threshold)*cos(h) ,1.5*sqrt(self.threshold)*sin(h)]
# Avoid obstacles
target = self.correct_target(target)
self.move_to_target(target, deltat, v)
return
| 4,687 | 36.206349 | 96 |
py
|
marabunta
|
marabunta-master/marabunta/models/__init__.py
|
from HeadingConsensusRobot import HeadingConsensusRobot
from PerimeterDefenseRobot import PerimeterDefenseRobot
from AreaCoverageRobot import AreaCoverageRobot
from MarchingRobot import MarchingRobot
__all__ = ['HeadingConsensusRobot', 'PerimeterDefenseRobot',
'AreaCoverageRobot', 'MarchingRobot']
| 315 | 38.5 | 60 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/extend_with_constants.py
|
import pandas as pd
HP_DEFAULT_VALUES = {
"hepnos_num_threads": 31,
"hepnos_num_databases": 1,
"busy_spin": False,
"loader_progress_thread": False,
"loader_batch_size": 1024,
"enable_pep": False,
"pep_num_threads": 31,
"pep_ibatch_size": 32,
"pep_obatch_size": 32,
"pep_use_preloading": False,
"pep_pes_per_node": 16,
"pep_cores_per_pe": 4
}
def transform_with_constants(source_csv: str, output_csv: str, hp_list: list, hp_default: dict):
source_df = pd.read_csv(source_csv)
for hp in hp_list:
if not(hp in source_df.columns):
source_df[hp] = hp_default[hp] # create new column with default value
source_df.to_csv(output_csv)
if __name__ == "__main__":
source_csv = "exp-1/results.csv"
output_csv = "exp/results.csv"
from problem import Problem
hp_list = Problem.space.get_hyperparameter_names()
transform_with_constants(source_csv, output_csv, hp_list, HP_DEFAULT_VALUES)
| 984 | 24.25641 | 96 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/run_exp.py
|
"""
python -m hepnos_theta.run_exp -w exp/exp-test -q debug-cache-quad -t 60 -A radix-io -n 8 --nodes-per-task 4 -as ./SetUpEnv.sh --run hepnos_theta.run.run --problem hepnos_theta.problem.Problem --fit-search-space exp/
"""
import os
import argparse
import pathlib
import stat
from jinja2 import Template
HERE = os.path.dirname(os.path.abspath(__file__))
JOB_TEMPLATE = os.path.join(HERE, "job.qsub.tmpl")
def run(w, q, A, t, n, step, nodes_per_task, activation_script, run,
problem, fit_surrogate, fit_search_space, transfer_learning_strategy, transfer_learning_epsilon):
w = w.encode("ascii").decode("ascii")
num_dh_workers = n // nodes_per_task # N_T
num_cpus_driver = 4 # N_R
num_cpus_per_task = num_cpus_driver / num_dh_workers # N_{R/T}
print(f"Detected {num_dh_workers} DeepHyper parallel evaluations with {n} nodes for the total allocation and {nodes_per_task} nodes per evaluation.")
print(f" num_cpus_driver: {num_cpus_driver}")
print(f" num_cpus_per_task: {num_cpus_per_task}")
step = int(step)
# for transfer learning
if fit_surrogate:
fit_surrogate = os.path.abspath(fit_surrogate)
if fit_search_space:
fit_search_space = os.path.abspath(fit_search_space)
# create exp directory
exp_dir = os.path.abspath(w)
pathlib.Path(exp_dir).mkdir(parents=True, exist_ok=False)
activation_script = os.path.abspath(activation_script)
# load template
with open(JOB_TEMPLATE, "r") as f:
job_template = Template(f.read())
submission_path = os.path.join(w, "job.qsub")
with open(submission_path, "w") as fp:
fp.write(
job_template.render(q=q,
A=A,
t=t,
n=n,
hepnos_exp_step=step,
nodes_per_task=nodes_per_task,
num_cpus_driver=num_cpus_driver,
num_cpus_per_task=num_cpus_per_task,
activation_script=activation_script,
exp_dir=exp_dir,
run=run,
problem=problem,
fit_surrogate=fit_surrogate,
fit_search_space=fit_search_space,
transfer_learning_strategy=transfer_learning_strategy,
transfer_learning_epsilon=transfer_learning_epsilon))
# add executable rights
st = os.stat(submission_path)
os.chmod(submission_path, st.st_mode | stat.S_IEXEC)
# Job submission
os.chdir(exp_dir)
print("Performing job submission...")
cmd = f"qsub job.qsub"
os.system(cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='HEPnOS experiment')
parser.add_argument('-w',
required=True,
type=str,
help="Name of the experiment.")
parser.add_argument('-q', required=True, type=str, help="Queue name.")
parser.add_argument('-A',
default="radix-io",
type=str,
help="Project name.")
parser.add_argument('-t',
required=True,
type=str,
help="Duration of the experiment.")
parser.add_argument('-n',
required=True,
type=int,
help="Number of nodes for the total allocation.")
parser.add_argument('--step',
type=int,
default=1,
help='HEPnOS experiment step.')
parser.add_argument('--nodes-per-task',
type=int,
default=None,
help='Number of nodes to use per task.')
parser.add_argument(
'-as',
'--activation-script',
required=True,
type=str,
help="Path to the script activation the conda environment.")
parser.add_argument('--run', required=True, type=str)
parser.add_argument('--problem', required=True, type=str)
parser.add_argument('--fit-surrogate', required=False, type=str, default="")
parser.add_argument('--fit-search-space', required=False, type=str, default="")
parser.add_argument('--transfer-learning-strategy', required=False, type=str, default="best", choices=["best", "epsilon"])
parser.add_argument('--transfer-learning-epsilon', required=False, type=float, default=1.0)
args = parser.parse_args()
run(**vars(args))
| 4,718 | 39.333333 | 216 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/problem.py
|
import os
from deephyper.problem import HpProblem
Problem = HpProblem(seed=2021)
# the following is the old definition of the Problem
"""
# 1. step
Problem.add_hyperparameter((0, 31), "hepnos_num_threads")
Problem.add_hyperparameter((1, 10), "hepnos_num_databases")
Problem.add_hyperparameter([True, False], "busy_spin")
Problem.add_hyperparameter([True, False], "loader_progress_thread")
Problem.add_hyperparameter((1, 2048, "log-uniform"), "loader_batch_size")
# when "enable_step == True"
# 2. step:
enable_pep = bool(int(os.environ.get("DH_HEPNOS_ENABLE_PEP", 0)))
if enable_pep:
Problem.add_hyperparameter((1, 31), "pep_num_threads")
Problem.add_hyperparameter((8, 1024, "log-uniform"), "pep_ibatch_size")
Problem.add_hyperparameter((8, 1024, "log-uniform"), "pep_obatch_size")
Problem.add_hyperparameter([True, False], "pep_use_preloading")
# 3. step:
# Problem.add_hyperparameter((1, 64), "pep_pes_per_node")
# Problem.add_hyperparameter((1, 64), "pep_cores_per_pe")
"""
def add_parameter(problem, name, domain, description=""):
problem.add_hyperparameter(domain, name)
step = int(os.environ.get("DH_HEPNOS_EXP_STEP", 1))
# Step 1: Data-loader only, at small scale
add_parameter(Problem, "busy_spin", [True, False],
"Whether Mercury should busy-spin instead of block")
add_parameter(Problem, "hepnos_progress_thread", [True, False],
"Whether to use a dedicated progress thread in HEPnOS")
add_parameter(Problem, "hepnos_num_threads", (0, 63),
"Number of threads used for serving RPC requests")
add_parameter(Problem, "hepnos_num_event_databases", (1, 16),
"Number of databases per process used to store events")
add_parameter(Problem, "hepnos_num_product_databases", (1, 16),
"Number of databases per process used to store products")
add_parameter(Problem, "hepnos_num_providers", (1, 32),
"Number of database providers per process")
add_parameter(Problem, "hepnos_pool_type", ['fifo','fifo_wait','prio_wait'],
"Thread-scheduling policity used by Argobots pools")
add_parameter(Problem, "hepnos_pes_per_node", [1, 2, 4, 8, 16, 32],
"Number of HEPnOS processes per node")
add_parameter(Problem, "loader_progress_thread", [True, False],
"Whether to use a dedicated progress thread in the Dataloader")
add_parameter(Problem, "loader_batch_size", (1, 2048, "log-uniform"),
"Size of the batches of events sent by the Dataloader to HEPnOS")
add_parameter(Problem, "loader_pes_per_node", [1, 2, 4, 8, 16],
"Number of processes per node for the Dataloader")
# Step 2: We add the PEP step, still at small scall
if step >= 2:
add_parameter(Problem, "pep_progress_thread", [True, False],
"Whether to use a dedicated progress thread in the PEP step")
add_parameter(Problem, "pep_num_threads", (1, 31),
"Number of threads used for processing in the PEP step")
add_parameter(Problem, "pep_ibatch_size", (8, 1024, "log-uniform"),
"Batch size used when PEP processes are loading events from HEPnOS")
add_parameter(Problem, "pep_obatch_size", (8, 1024, "log-uniform"),
"Batch size used when PEP processes are exchanging events among themselves")
add_parameter(Problem, "pep_pes_per_node", [1, 2, 4, 8, 16, 32],
"Number of processes per node for the PEP step")
# Step 3: We add some new parameters
if step >= 3:
add_parameter(Problem, "loader_async", [True, False],
"Whether to use the HEPnOS AsyncEngine in the Dataloader")
add_parameter(Problem, "loader_async_threads", (1, 63, "log-uniform"),
"Number of threads for the AsyncEngine to use")
add_parameter(Problem, "pep_use_preloading", [True, False],
"Whether the PEP step should use product-preloading")
# Step 4: We scale to larger experiments (no new processes)
# Note: in the above, if
# (X_progress_thread + 1 + X_num_threads) * X_pes_per_node > 64,
# then we oversubscribe the nodes with more threads than we should.
# If this leads to performance degradation, the DeepHyper should detect
# it and avoid the corresponding regions of the parameter space.
# However it would be nice for the paper to be able to impose constraints
# like that.
#
# Note: in step 3, if loader_async is False, then the value of
# loader_async_threads is irrelevant, so it would be nice to be able
# to not sample it when loader_async is False (say that "loader_async_threads"
# is a child parameter of "loader_async".
if __name__ == "__main__":
print(Problem)
| 4,467 | 44.131313 | 84 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/run_test.py
|
import re
import subprocess
def __make_node_list(nodes):
if nodes is None:
return None
result = []
for n in nodes:
m = re.search('([0-9]+)', n)
result.append(str(int(str(m.group(0)))))
return result
def run(config, nodes=None):
val = config["loader_batch_size"]
num_nodes = len(nodes)
node_list = ",".join(__make_node_list(nodes))
cmd_temp = "aprun -n {num_nodes} -L {node_list} -N 1 echo 'val({val})'"
cmd = cmd_temp.format(num_nodes=num_nodes, node_list=node_list, val=val).split(" ")
proc = subprocess.run(cmd, capture_output=True, text=True)
output = proc.stdout
error = proc.stderr
print(f"nodes{nodes}\n command: {' '.join(cmd)}\n -- outpout --\n{output}\n\n -- error --\n{error}")
m = re.search('val\([0-9]+\)', output)
return int(m.group(0)[4:-1])
| 842 | 32.72 | 104 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/run.py
|
import os, uuid
import copy
import json
import re
from shutil import copyfile
def __setup_directory(id_=None):
if id_ == None:
id_ = uuid.uuid4()
exp_dir = 'exp-' + str(id_)
os.mkdir(exp_dir)
cwd = os.getcwd()
return cwd + '/' + exp_dir
def __make_node_list(nodes):
if nodes is None:
return None
result = []
for n in nodes:
m = re.search('([0-9]+)', n)
result.append(str(int(str(m.group(0)))))
return result
def __create_settings(exp_dir, hepnos_pes_per_node, loader_batch_size,
loader_async, loader_async_threads, loader_pes_per_node,
enable_pep, pep_num_threads, pep_ibatch_size,
pep_obatch_size, pep_use_preloading, pep_pes_per_node,
nodes):
settings_sh_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/settings.sh.in'
settings_sh = exp_dir + '/settings.sh'
copyfile(settings_sh_in, settings_sh)
with open(settings_sh, 'a+') as f:
f.write('\n')
f.write('HEPNOS_PES_PER_NODE=%d\n' % hepnos_pes_per_node)
if loader_async:
f.write('HEPNOS_LOADER_ASYNC=-a\n')
f.write('HEPNOS_LOADER_ASYNC_THREADS=%d\n' % loader_async_threads)
else:
f.write('HEPNOS_LOADER_ASYNC=\n')
f.write('HEPNOS_LOADER_ASYNC_THREADS=0\n')
f.write('HEPNOS_LOADER_BATCH_SIZE=%d\n' % loader_batch_size)
f.write('HEPNOS_LOADER_PES_PER_NODE=%d\n' % loader_pes_per_node)
if enable_pep:
f.write('HEPNOS_ENABLE_PEP=1\n')
f.write('HEPNOS_PEP_THREADS=%d\n' % pep_num_threads)
f.write('HEPNOS_PEP_IBATCH_SIZE=%d\n' % pep_ibatch_size)
f.write('HEPNOS_PEP_OBATCH_SIZE=%d\n' % pep_obatch_size)
f.write('HEPNOS_PEP_PES_PER_NODE=%d\n' % pep_pes_per_node)
if pep_use_preloading:
f.write('HEPNOS_PEP_PRELOAD=--preload\n')
else:
f.write('HEPNOS_PEP_PRELOAD=\n')
else:
f.write('HEPNOS_ENABLE_PEP=0\n')
if nodes is not None:
f.write('HEPNOS_NODELIST=(%s)\n' % ' '.join(nodes))
def __generate_dataloader_config_file(exp_dir='.',
filename='dataloader.json',
busy_spin=False,
use_progress_thread=False):
dataloader_json_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/dataloader.json.in'
dataloader_json = exp_dir + '/' + filename
with open(dataloader_json_in) as f:
config = json.loads(f.read())
config['mercury']['na_no_block'] = bool(busy_spin)
config['use_progress_thread'] = bool(use_progress_thread)
with open(dataloader_json, 'w+') as f:
f.write(json.dumps(config, indent=4))
def __generate_pep_config_file(exp_dir='.',
filename='pep.json',
busy_spin=False,
use_progress_thread=False):
pep_json_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/pep.json.in'
pep_json = exp_dir + '/' + filename
with open(pep_json_in) as f:
config = json.loads(f.read())
config['mercury']['na_no_block'] = bool(busy_spin)
config['use_progress_thread'] = bool(use_progress_thread)
with open(pep_json, 'w+') as f:
f.write(json.dumps(config, indent=4))
def __generate_hepnos_config_file(exp_dir='.',
filename='hepnos.json',
busy_spin=False,
use_progress_thread=False,
num_threads=0,
num_providers=1,
num_event_dbs=1,
num_product_dbs=1,
pool_type='fifo_wait'):
hepnos_json_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/hepnos.json.in'
hepnos_json = exp_dir + '/' + filename
with open(hepnos_json_in) as f:
config = json.loads(f.read())
config['margo']['mercury']['na_no_block'] = bool(busy_spin)
config['margo']['argobots']['pools'][0]['type'] = pool_type
if use_progress_thread:
config['margo']['argobots']['pools'].append({
'name': '__progress__',
'type': pool_type,
'access': 'mpmc'
})
config['margo']['argobots']['xstreams'].append({
'name': '__progress__',
'scheduler': {
'type': 'basic_wait',
'pools': ['__progress__']
}
})
config['margo']['progress_pool'] = '__progress__'
else:
config['margo']['progress_pool'] = '__primary__'
rpc_pools = []
for i in range(0, num_providers):
config['margo']['argobots']['pools'].append({
'name': ('__rpc_%d__' % i),
'type': pool_type,
'access': 'mpmc'
})
rpc_pools.append('__rpc_%d__' % i)
if num_threads == 0:
config['margo']['argobots']['xstreams'][0]['scheduler'][
'pools'].extend(rpc_pools)
else:
es = []
for i in range(0, min(num_threads, num_providers)):
config['margo']['argobots']['xstreams'].append({
'name': ('rpc_es_%d' % i),
'scheduler': {
'type': 'basic_wait',
'pools': []
}
})
es.append(config['margo']['argobots']['xstreams'][-1])
for i in range(0, len(rpc_pools)):
es[i % len(es)]['scheduler']['pools'].append(rpc_pools[i])
ssg_group = None
for g in config['ssg']:
if g['name'] == 'hepnos':
ssg_group = g
break
ssg_group['group_file'] = exp_dir + '/hepnos.ssg'
event_db_model = {
"type": "map",
"comparator": "hepnos_compare_item_descriptors",
"no_overwrite": True
}
product_db_model = {"type": "map", "no_overwrite": True}
for i in range(0, num_providers):
p = {
"name": "hepnos_data_%d" % (i + 1),
"type": "sdskv",
"pool": rpc_pools[i % len(rpc_pools)],
"provider_id": i + 1,
"config": {
"comparators": [{
"name": "hepnos_compare_item_descriptors",
"library": "libhepnos-service.so"
}],
"databases": []
}
}
config['providers'].append(p)
p = 0
for i in range(0, num_event_dbs):
event_db_name = 'hepnos-events-' + str(i)
event_db = copy.deepcopy(event_db_model)
event_db['name'] = event_db_name
provider = config['providers'][1 + (p %
(len(config['providers']) - 1))]
provider['config']['databases'].append(event_db)
p += 1
for i in range(0, num_product_dbs):
product_db_name = 'hepnos-products-' + str(i)
product_db = copy.deepcopy(product_db_model)
product_db['name'] = product_db_name
provider = config['providers'][1 + (p %
(len(config['providers']) - 1))]
provider['config']['databases'].append(product_db)
p += 1
with open(hepnos_json, 'w+') as f:
f.write(json.dumps(config, indent=4))
def __parse_result(exp_dir):
dataloader_time = 99999999
pep_time = 0
if os.path.isfile(exp_dir + '/dataloader-output.txt'):
for line in open(exp_dir + '/dataloader-output.txt'):
if 'ESTIMATED' in line:
dataloader_time = int(float(line.split()[-1]))
break
if 'RUNTIME' in line:
dataloader_time = int(float(line.split()[-1]))
break
if os.path.isfile(exp_dir + '/pep-output.txt'):
pep_time = 99999999
for line in open(exp_dir + '/pep-output.txt'):
if 'TIME:' in line:
pep_time = int(line.split()[1])
return (dataloader_time, pep_time)
def run(config, nodes=None):
enable_pep = config.get('enable_pep',
int(os.environ.get("DH_HEPNOS_EXP_STEP", 1)) >= 2)
hepnos_pes_per_node = config.get("hepnos_pes_per_node", 2)
hepnos_progress_thread = config.get("hepnos_progress_thread", False)
hepnos_num_threads = config.get("hepnos_num_threads", 31)
hepnos_num_event_databases = config.get("hepnos_num_event_databases", 1)
hepnos_num_product_databases = config.get("hepnos_num_product_databases",
1)
hepnos_pool_type = config.get("hepnos_pool_type", "fifo_wait")
hepnos_num_providers = config.get("hepnos_num_providers", 1)
busy_spin = config.get("busy_spin", False)
loader_progress_thread = config.get("loader_progress_thread", False)
loader_async = config.get("loader_async", False)
loader_async_threads = config.get("loader_async_threads", 1)
loader_batch_size = config.get("loader_batch_size", 1024)
loader_pes_per_node = config.get("loader_pes_per_node", 1)
pep_progress_thread = config.get("pep_progress_thread", False)
pep_num_threads = config.get("pep_num_threads", 31)
pep_ibatch_size = config.get("pep_ibatch_size", 32)
pep_obatch_size = config.get("pep_obatch_size", 32)
pep_use_preloading = config.get("pep_use_preloading", False)
pep_pes_per_node = config.get("pep_pes_per_node", 16)
nodes = __make_node_list(nodes)
print('Setting up experiment\'s directory')
exp_dir = __setup_directory(config.get("id"))
print('Creating settings.sh')
__create_settings(exp_dir, hepnos_pes_per_node, loader_batch_size,
loader_async, loader_async_threads, loader_pes_per_node,
enable_pep, pep_num_threads, pep_ibatch_size,
pep_obatch_size, pep_use_preloading, pep_pes_per_node,
nodes)
print('Creating hepnos.json')
__generate_hepnos_config_file(exp_dir,
busy_spin=busy_spin,
use_progress_thread=hepnos_progress_thread,
num_threads=hepnos_num_threads,
num_providers=hepnos_num_providers,
num_event_dbs=hepnos_num_event_databases,
num_product_dbs=hepnos_num_product_databases,
pool_type=hepnos_pool_type)
print('Creating dataloader.json')
__generate_dataloader_config_file(
exp_dir,
busy_spin=busy_spin,
use_progress_thread=loader_progress_thread)
if enable_pep:
print('Creating pep.json')
__generate_pep_config_file(exp_dir,
busy_spin=busy_spin,
use_progress_thread=pep_progress_thread)
print('Submitting job')
submit_sh = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/submit.sh'
os.system(submit_sh + ' ' + exp_dir)
print('Parsing result')
t = __parse_result(exp_dir)
print('Done (loading time = %f, processing time = %f)' % (t[0], t[1]))
return -(t[0] + t[1])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='HEPnOS experiment')
parser.add_argument('--hepnos-pes-per-node',
type=int,
default=2,
help='number of PE per node for HEPnOS')
parser.add_argument('--hepnos-progress-thread',
action='store_true',
default=False,
help='whether to use a progress thread in HEPnOS')
parser.add_argument(
'--hepnos-num-threads',
type=int,
default=31,
help='number of RPC handling threads per process for HEPnOS')
parser.add_argument(
'--hepnos-num-providers',
type=int,
default=1,
help='number of providers managing databases in HEPnOS')
parser.add_argument(
'--hepnos-num-event-databases',
type=int,
default=1,
help='number of databases per process for events in HEPnOS')
parser.add_argument(
'--hepnos-num-product-databases',
type=int,
default=1,
help='number of databases per process for products in HEPnOS')
# pool type can be "fifo", "fifo_wait", or "prio_wait"
parser.add_argument('--hepnos-pool-type',
type=str,
default='fifo_wait',
help='type of Argobots pools to use in HEPnOS')
parser.add_argument('--busy-spin',
action='store_true',
default=False,
help='whether to use busy spinning or not')
parser.add_argument(
'--loader-progress-thread',
action='store_true',
default=False,
help='whether to use a progress thread or not in dataloader clients')
parser.add_argument(
'--loader-async',
action='store_true',
default=False,
help='whether to use async progress in dataloader clients')
parser.add_argument(
'--loader-async-threads',
type=int,
default=1,
help='number of threads for async operation in clients')
parser.add_argument('--loader-batch-size',
type=int,
default=1024,
help='batch size for the dataloader')
parser.add_argument(
'--loader-pes-per-node',
type=int,
default=1,
help='number of PES per node (must be between 1 and 64) for loader')
parser.add_argument('--enable-pep',
action='store_true',
default=False,
help='enable PEP benchmark')
parser.add_argument('--pep-progress-thread',
action='store_true',
default=False,
help='whether to use a progress thread or not in PEP')
parser.add_argument(
'--pep-num-threads',
type=int,
default=31,
help='number of processing threads per benchmark process (must be > 0)'
)
parser.add_argument('--pep-ibatch-size',
type=int,
default=32,
help='batch size when loading from HEPnOS')
parser.add_argument('--pep-obatch-size',
type=int,
default=32,
help='batch size when loading from another rank')
parser.add_argument('--pep-use-preloading',
action='store_true',
default=False,
help='whether to use product-preloading')
parser.add_argument(
'--pep-pes-per-node',
type=int,
default=16,
help='number of PES per node (must be between 1 and 64)')
parser.add_argument(
'--pep-cores-per-pe',
type=int,
default=-1,
help='number of cores per PE (must be between 1 and 64)')
parser.add_argument('--nodes', type=str, default=None, help='nodes to use')
# The product of the last wo parameters should not exceed 64.
# Additionally, the number of processing threads should be
# the number of cores per PE minus 2 (so effectively the number
# cores per PE must be at least 3).
ns = parser.parse_args()
if ns.nodes is not None:
ns.nodes = ns.nodes.split(',')
run(vars(ns))
| 15,785 | 37.881773 | 79 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/test_corr.py
|
import os
import ray
import pandas as pd
from hepnos_theta.run import run
from hepnos_theta.problem import Problem
settings = {
"num_cpus": 0.25,
"num_samples": 50
}
# settings = {
# "num_cpus": 1,
# "num_samples": 5
# } # debug
ray.init(address="auto")
run_func = ray.remote(num_cpus=settings["num_cpus"])(run)
configs = Problem.space.sample_configuration(settings["num_samples"])
configs = [config.get_dictionary() for config in configs]
os.environ["DH_HEPNOS_ENABLE_PEP"] = "0"
solutions = ray.get([run_func.remote(c) for c in configs])
results = []
for conf, sol in zip(configs, solutions):
conf = conf.copy()
conf["objective"] = sol
results.append(conf)
df1 = pd.DataFrame(results)
df1.to_csv("results-no-pep.csv")
os.environ["DH_HEPNOS_ENABLE_PEP"] = "1"
solutions = ray.get([run_func.remote(c) for c in configs])
results = []
for conf, sol in zip(configs, solutions):
conf = conf.copy()
conf["objective"] = sol
results.append(conf)
df2 = pd.DataFrame(results)
df2.to_csv("results-pep.csv")
| 1,044 | 23.302326 | 69 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/__init__.py
| 0 | 0 | 0 |
py
|
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/scripts/old_scripts/latest/problem_v31_constraint.py
|
from deephyper.problem import HpProblem
import ConfigSpace as cs
import ConfigSpace.hyperparameters as CSH
Problem = HpProblem(seed=45)
#Problem.add_dim('units', (1, 100))
#Problem.add_dim('activation', ['NA', 'relu', 'sigmoid', 'tanh'])
#Problem.add_dim('lr', (0.0001, 1.))
# parameters
'''
Number of threads: the number of threads that a server can use, not including the progress thread. Since 2 servers are deployed on each node, this number can range from 0 to 31.
Number of databases: the number of databases per server for event data and for product data. For example if set to 16, each server will create 16 databases for event data and 16 other databases for produåct data.
Busy spin: true or false, indicating whether Mercury should be set to busy-spin.
Async: true or false, indicating whether the client will execute store operation asynchronously, trying to overlap with file read operations.
Batch size: the batch size on clients (int).
'''
Problem.add_dim('num_threads', (0, 31)) # int x1
Problem.add_dim('num_databases', (1,10)) # int x2
Problem.add_dim('busy_spin', [True, False]) # True of false x3
Problem.add_dim('progress_thread', [True, False]) # True of false Async x4
Problem.add_dim('batch_size', (1, 2048)) # int x5
# benchmark
num_threads_b_choice = [0.0]
a = 0
for _ in range(32):
a += 1/32.
num_threads_b_choice.append(a)
ord_hp_1 = CSH.OrdinalHyperparameter(name='num_threads_b', sequence=num_threads_b_choice,default_value=1.0)
Problem.add_hyperparameter(ord_hp_1)
# Problem.add_dim('num_threads_b', (0., 1.)) # int x6
Problem.add_dim('batch_size_in', (8, 1024)) # int x7
Problem.add_dim('batch_size_out', (8, 1024)) # int x8
Problem.add_dim('pep_pes_per_node', (1, 64)) # x9
pep_cores_per_pe_choice = [0.0]
a = 0
for _ in range(64):
a += 1/64.
pep_cores_per_pe_choice.append(a)
# Problem.add_dim('pep_cores_per_pe', (0., 1.)) # x10
ord_hp_2 = CSH.OrdinalHyperparameter(name='pep_cores_per_pe', sequence=pep_cores_per_pe_choice,default_value=1.0)
Problem.add_hyperparameter(ord_hp_2)
'''
Number of threads used by each benchmark process (should be between 1 and 31)
Size of the batches read from HEPnOS (I would suggest between 8 and 1024, to start with)
Size of the batches exchanged by benchmark processes (same range)
x6<=x10 and x9*x10<= 64
x6_0 = [0,1] # actual range [1,31] integer: the number of processing threads per benchmark process
x9 = [1,64] # actual range [1,64] integer: the number of PE per node for the benchmark
x10_0 = [0,1] # actual range [1,64] integer: the number of cores per PE for the benchmark
x10 = max(1, int(x10_0*64/x9)) where x10 ranges in [1,64] and x9*x10 <= 64.
x6 = min(31, int(1+(x6_0*0.5*(x10-1)))) where x6 ranges in [1,31] and x6 <= x10
'''
Problem.add_starting_point(
num_threads=31,
num_databases=1,
busy_spin=False,
progress_thread=False,
batch_size=1024,
num_threads_b=1.0,
batch_size_in=32,
batch_size_out=1024,
pep_pes_per_node = 2,
pep_cores_per_pe = 1.0
)
if __name__ == '__main__':
print(Problem)
| 3,074 | 37.4375 | 212 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/scripts/old_scripts/latest/model_run_v3.py
|
import os, uuid
import yaml
from shutil import copyfile
import numpy as np
import time
def __setup_directory():
exp_dir = 'exp-' + str(uuid.uuid4())[0:8]
os.mkdir(exp_dir)
cwd = os.getcwd()
return cwd + '/' + exp_dir
def __create_settings(exp_dir, loader_batch_size, loader_progress_thread,
pep_num_threads, pep_ibatch_size, pep_obatch_size,
pep_pes_per_node, pep_cores_per_pe):
# settings_sh_in = os.path.dirname(os.path.abspath(__file__)) + '/scripts/settings.sh.in'
settings_sh_in = '/lus/theta-fs0/projects/OptADDN/hepnos/github/HEPnOS-Autotuning/theta/scripts/settings.sh.in'
settings_sh = exp_dir + '/settings.sh'
copyfile(settings_sh_in, settings_sh)
with open(settings_sh, 'a+') as f:
f.write('\n')
if loader_progress_thread:
f.write('HEPNOS_LOADER_CLIENT_USE_PROGRESS_THREAD=-a\n')
else:
f.write('HEPNOS_LOADER_CLIENT_USE_PROGRESS_THREAD=\n')
f.write('HEPNOS_LOADER_CLIENT_BATCH_SIZE=%d\n' % loader_batch_size)
if None not in [pep_num_threads, pep_ibatch_size, pep_obatch_size]:
f.write('HEPNOS_ENABLE_PEP=1\n')
f.write('HEPNOS_PEP_THREADS=%d\n' % pep_num_threads)
f.write('HEPNOS_PEP_IBATCH_SIZE=%d\n' % pep_ibatch_size)
f.write('HEPNOS_PEP_OBATCH_SIZE=%d\n' % pep_obatch_size)
f.write('HEPNOS_PEP_PES_PER_NODE=%d\n' % pep_pes_per_node)
f.write('HEPNOS_PEP_CORES_PER_PE=%d\n' % pep_cores_per_pe)
else:
f.write('HEPNOS_ENABLE_PEP=0\n')
def __generate_config_file(
exp_dir='.',
filename='config.yaml',
threads=0,
busy_spin=False,
targets=1):
config = dict()
config['address'] = 'ofi+gni://'
config['threads'] = int(threads)
config['busy-spin'] = bool(busy_spin)
config['databases'] = dict()
config['databases']['datasets'] = dict()
for k in ['datasets', 'runs', 'subruns', 'events', 'products']:
config['databases'][k] = dict()
d = config['databases'][k]
d['name'] = 'hepnos-%s.$RANK.$PROVIDER.$TARGET' % k
d['path'] = '/dev/shm/$RANK'
d['type'] = 'map'
d['targets'] = 1
d['providers'] = 1
config['databases']['events']['targets'] = int(targets)
config['databases']['products']['targets'] = int(targets)
with open(exp_dir+'/'+filename, 'w+') as f:
f.write(yaml.dump(config))
def __parse_result(exp_dir):
dataloader_time = 0
pep_time = 0
for line in open(exp_dir+'/dataloader-output.txt'):
if 'real' in line:
line = line.replace('s','')
x = line.split()[1]
m = int(x.split('m')[0])
s = float(x.split('m')[1])
dataloader_time = m*60 + s
if os.path.isfile(exp_dir+'/pep-output.txt'):
for line in open(exp_dir+'/pep-output.txt'):
if 'real' in line:
line = line.replace('s','')
x = line.split()[1]
m = int(x.split('m')[0])
s = float(x.split('m')[1])
pep_time = m*60 + s
return (dataloader_time, pep_time)
def run(args):
if len(args) == 5:
args.extend([None, None, None, None, None])
if len(args) == 8:
args.extend([2, 32])
if len(args) != 10:
raise RuntimeError("Expected 5 or 10 arguments in list, found %d" % len(args))
hepnos_num_threads = args['num_threads'] # args[0] x1
hepnos_num_databases = args['num_databases'] # args[1] x2
busy_spin = args['busy_spin'] # args[2] x3
loader_progress_thread = args['progress_thread'] # args[3] x4
loader_batch_size = args['batch_size'] # args[4] x5
# pep_num_threads = args['num_threads_b'] #args[5] x6
pep_cores_per_pe = int(max(1,int(args['pep_cores_per_pe']*64/float(args['pep_pes_per_node'])))) #args[9] x10
pep_num_threads = min(31, int(1+(args['num_threads_b']*0.5*(pep_cores_per_pe-1)))) #args[5] x6 int(1+(x6_0*0.5*(x10-1))
pep_ibatch_size = args['batch_size_in'] #args[6] x7
pep_obatch_size = args['batch_size_out'] #args[7] x8
pep_pes_per_node = args['pep_pes_per_node'] #args[8] x9
print('Setting up experiment\'s directory')
exp_dir = __setup_directory()
print('Creating settings.sh')
__create_settings(exp_dir,
loader_batch_size,
loader_progress_thread,
pep_num_threads,
pep_ibatch_size,
pep_obatch_size,
pep_pes_per_node,
pep_cores_per_pe)
print('Creating config.yaml')
__generate_config_file(
exp_dir,
threads=hepnos_num_threads,
busy_spin=busy_spin,
targets=hepnos_num_databases)
print('Submitting job')
# submit_sh = os.path.dirname(os.path.abspath(__file__)) + '/scripts/submit.sh'
submit_sh ='/lus/theta-fs0/projects/OptADDN/hepnos/github/HEPnOS-Autotuning/theta/scripts/submit.sh'
os.system(submit_sh + ' ' + exp_dir)
print('Parsing result')
t = __parse_result(exp_dir)
print('Done (loading time = %f, processing time = %f)' % (t[0], t[1]))
return 1 / (t[0]+t[1])
'''
Number of threads used by each benchmark process (should be between 1 and 31)
Size of the batches read from HEPnOS (I would suggest between 8 and 1024, to start with)
Size of the batches exchanged by benchmark processes (same range)
x6<=x10 and x9*x10<= 64
x6_0 = [0,1] # actual range [1,31] integer: the number of processing threads per benchmark process
x9 = [1,64] # actual range [1,64] integer: the number of PE per node for the benchmark
x10_0 = [0,1] # actual range [1,64] integer: the number of cores per PE for the benchmark
x10 = max(1, int(x10_0*64/x9)) where x10 ranges in [1,64] and x9*x10 <= 64.
x6 = min(31, int(1+(x6_0*0.5*(x10-1)))) where x6 ranges in [1,31] and x6 <= x10
'''
if __name__ == '__main__':
# run([ 31, 16, False, False, 1024 ])
config = {
'num_threads': 31,
'num_databases': 1,
'busy_spin': False,
'progress_thread': False,
'batch_size': 1024,
'num_threads_b': 1.0,
'batch_size_in': 32,
'batch_size_out': 1024,
'pep_pes_per_node': 2,
'pep_cores_per_pe': 1.0
}
objective = run(config)
print('objective: ', objective)
print('real run time: ', 1 / objective)
# import matplotlib.pyplot as plt
# plt.plot(HISTORY['val_r2'])
# plt.xlabel('Epochs')
# plt.ylabel('Objective: $R^2$')
# plt.grid()
# plt.show()
| 6,642 | 39.754601 | 123 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/scripts/old_scripts/latest/problem_v3.py
|
from deephyper.problem import HpProblem
Problem = HpProblem()
#Problem.add_dim('units', (1, 100))
#Problem.add_dim('activation', ['NA', 'relu', 'sigmoid', 'tanh'])
#Problem.add_dim('lr', (0.0001, 1.))
# parameters
'''
Number of threads: the number of threads that a server can use, not including the progress thread. Since 2 servers are deployed on each node, this number can range from 0 to 31.
Number of databases: the number of databases per server for event data and for product data. For example if set to 16, each server will create 16 databases for event data and 16 other databases for produåct data.
Busy spin: true or false, indicating whether Mercury should be set to busy-spin.
Async: true or false, indicating whether the client will execute store operation asynchronously, trying to overlap with file read operations.
Batch size: the batch size on clients (int).
'''
Problem.add_dim('num_threads', (0, 31)) # int x1
Problem.add_dim('num_databases', (1,10)) # int x2
Problem.add_dim('busy_spin', [True, False]) # True of false x3
Problem.add_dim('progress_thread', [True, False]) # True of false Async x4
Problem.add_dim('batch_size', (1, 2048)) # int x5
# benchmark
Problem.add_dim('num_threads_b', (0., 1.)) # int x6
Problem.add_dim('batch_size_in', (8, 1024)) # int x7
Problem.add_dim('batch_size_out', (8, 1024)) # int x8
Problem.add_dim('pep_pes_per_node', (1, 64)) # x9
Problem.add_dim('pep_cores_per_pe', (0., 1.)) # x10
'''
Number of threads used by each benchmark process (should be between 1 and 31)
Size of the batches read from HEPnOS (I would suggest between 8 and 1024, to start with)
Size of the batches exchanged by benchmark processes (same range)
x6<=x10 and x9*x10<= 64
x6_0 = [0,1] # actual range [1,31] integer: the number of processing threads per benchmark process
x9 = [1,64] # actual range [1,64] integer: the number of PE per node for the benchmark
x10_0 = [0,1] # actual range [1,64] integer: the number of cores per PE for the benchmark
x10 = max(1, int(x10_0*64/x9)) where x10 ranges in [1,64] and x9*x10 <= 64.
x6 = min(31, int(1+(x6_0*0.5*(x10-1)))) where x6 ranges in [1,31] and x6 <= x10
'''
Problem.add_starting_point(
num_threads=31,
num_databases=1,
busy_spin=False,
progress_thread=False,
batch_size=1024,
num_threads_b=1.0,
batch_size_in=32,
batch_size_out=1024,
pep_pes_per_node = 2,
pep_cores_per_pe = 1.0
)
if __name__ == '__main__':
print(Problem)
| 2,473 | 39.557377 | 212 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/scripts/old_scripts/working_ray_not_setup/problem.py
|
from deephyper.problem import HpProblem
Problem = HpProblem()
# parameters
'''
Number of threads: the number of threads that a server can use, not including the progress thread. Since 2 servers are deployed on each node, this number can range from 0 to 31.
Number of databases: the number of databases per server for event data and for product data. For example if set to 16, each server will create 16 databases for event data and 16 other databases for produåct data.
Busy spin: true or false, indicating whether Mercury should be set to busy-spin.
Async: true or false, indicating whether the client will execute store operation asynchronously, trying to overlap with file read operations.
Batch size: the batch size on clients (int).
Number of threads used by each benchmark process (should be between 1 and 31)
Size of the batches read from HEPnOS (I would suggest between 8 and 1024, to start with)
Size of the batches exchanged by benchmark processes (same range)
'''
Problem.add_dim('num_threads', (0, 31)) # int
Problem.add_dim('num_databases', (1,10)) # int
Problem.add_dim('busy_spin', [True, False]) # True of false
Problem.add_dim('progress_thread', [True, False]) # True of false Async
Problem.add_dim('batch_size', (1, 2048)) # int
# benchmark
Problem.add_dim('num_threads_b', (1, 31)) # int
Problem.add_dim('batch_size_in', (8, 1024)) # int
Problem.add_dim('batch_size_out', (8, 1024)) # int
Problem.add_starting_point(
num_threads=31,
num_databases=1,
busy_spin=False,
progress_thread=False,
batch_size=1024,
num_threads_b=31,
batch_size_in=32,
batch_size_out=1024
)
if __name__ == '__main__':
print(Problem)
| 1,681 | 41.05 | 212 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_theta/scripts/old_scripts/working_ray_not_setup/model_run.py
|
import os, uuid
import yaml
from shutil import copyfile
import numpy as np
import time
def __setup_directory():
exp_dir = 'exp-' + str(uuid.uuid4())[0:8]
os.mkdir(exp_dir)
cwd = os.getcwd()
return cwd + '/' + exp_dir
def __create_settings(exp_dir, loader_batch_size, loader_progress_thread,
pep_num_threads, pep_ibatch_size, pep_obatch_size):
# settings_sh_in = os.path.dirname(os.path.abspath(__file__)) + '/scripts/settings.sh.in'
settings_sh_in = '/lus/theta-fs0/projects/OptADDN/hepnos/github/HEPnOS-Autotuning/theta/scripts/settings.sh.in'
settings_sh = exp_dir + '/settings.sh'
copyfile(settings_sh_in, settings_sh)
with open(settings_sh, 'a+') as f:
f.write('\n')
if loader_progress_thread:
f.write('HEPNOS_LOADER_CLIENT_USE_PROGRESS_THREAD=-a\n')
else:
f.write('HEPNOS_LOADER_CLIENT_USE_PROGRESS_THREAD=\n')
f.write('HEPNOS_LOADER_CLIENT_BATCH_SIZE=%d\n' % loader_batch_size)
if None not in [pep_num_threads, pep_ibatch_size, pep_obatch_size]:
f.write('HEPNOS_ENABLE_PEP=1\n')
f.write('HEPNOS_PEP_THREADS=%d\n' % pep_num_threads)
f.write('HEPNOS_PEP_IBATCH_SIZE=%d\n' % pep_ibatch_size)
f.write('HEPNOS_PEP_OBATCH_SIZE=%d\n' % pep_obatch_size)
else:
f.write('HEPNOS_ENABLE_PEP=0\n')
def __generate_config_file(
exp_dir='.',
filename='config.yaml',
threads=0,
busy_spin=False,
targets=1):
config = dict()
config['address'] = 'ofi+gni://'
config['threads'] = int(threads)
config['busy-spin'] = bool(busy_spin)
config['databases'] = dict()
config['databases']['datasets'] = dict()
for k in ['datasets', 'runs', 'subruns', 'events', 'products']:
config['databases'][k] = dict()
d = config['databases'][k]
d['name'] = 'hepnos-%s.$RANK.$PROVIDER.$TARGET' % k
d['path'] = '/dev/shm/$RANK'
d['type'] = 'map'
d['targets'] = 1
d['providers'] = 1
config['databases']['events']['targets'] = int(targets)
config['databases']['products']['targets'] = int(targets)
with open(exp_dir+'/'+filename, 'w+') as f:
f.write(yaml.dump(config))
def __parse_result(exp_dir):
dataloader_time = 0
pep_time = 0
for line in open(exp_dir+'/dataloader-output.txt'):
if 'real' in line:
line = line.replace('s','')
x = line.split()[1]
m = int(x.split('m')[0])
s = float(x.split('m')[1])
dataloader_time = m*60 + s
if os.path.isfile(exp_dir+'/pep-output.txt'):
for line in open(exp_dir+'/pep-output.txt'):
if 'real' in line:
line = line.replace('s','')
x = line.split()[1]
m = int(x.split('m')[0])
s = float(x.split('m')[1])
pep_time = m*60 + s
return (dataloader_time, pep_time)
def run(args):
if len(args) == 5:
args.extend([None, None, None])
if len(args) != 8:
raise RuntimeError("Expected 5 or 8 arguments in list, found %d" % len(args))
hepnos_num_threads = args['num_threads'] # args[0]
hepnos_num_databases = args['num_databases'] # args[1]
busy_spin = args['busy_spin'] # args[2]
loader_progress_thread = args['progress_thread'] # args[3]
loader_batch_size = args['batch_size'] # args[4]
pep_num_threads = args['num_threads_b'] #args[5]
pep_ibatch_size = args['batch_size_in'] #args[6]
pep_obatch_size = args['batch_size_out'] #args[7]
print('Setting up experiment\'s directory')
exp_dir = __setup_directory()
print('Creating settings.sh')
__create_settings(exp_dir,
loader_batch_size,
loader_progress_thread,
pep_num_threads,
pep_ibatch_size,
pep_obatch_size)
print('Creating config.yaml')
__generate_config_file(
exp_dir,
threads=hepnos_num_threads,
busy_spin=busy_spin,
targets=hepnos_num_databases)
print('Submitting job')
# submit_sh = os.path.dirname(os.path.abspath(__file__)) + '/scripts/submit.sh'
submit_sh ='/lus/theta-fs0/projects/OptADDN/hepnos/github/HEPnOS-Autotuning/theta/scripts/submit.sh'
os.system(submit_sh + ' ' + exp_dir)
print('Parsing result')
t = __parse_result(exp_dir)
print('Done (loading time = %f, processing time = %f)' % (t[0], t[1]))
return 1 / (t[0]+t[1])
if __name__ == '__main__':
# run([ 31, 16, False, False, 1024 ])
config = {
'num_threads': 31,
'num_databases': 1,
'busy_spin': False,
'progress_thread': False,
'batch_size': 1024,
'num_threads_b': 31,
'batch_size_in': 32,
'batch_size_out': 1024
}
objective = run(config)
print('objective: ', objective)
print('real run time: ', 1 / objective)
# import matplotlib.pyplot as plt
# plt.plot(HISTORY['val_r2'])
# plt.xlabel('Epochs')
# plt.ylabel('Objective: $R^2$')
# plt.grid()
# plt.show()
| 5,212 | 36.775362 | 115 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/plots/generate_plot.py
|
import os
import pathlib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import yaml
import pandas as pd
import inspect
from scipy import stats
import numpy as np
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
width = 8
height = width / 1.618
matplotlib.rcParams.update({
'font.size': 21,
'figure.figsize': (width, height),
'figure.facecolor': 'white',
'savefig.dpi': 72,
'figure.subplot.bottom': 0.125,
'figure.edgecolor': 'white',
'xtick.labelsize': 21,
'ytick.labelsize': 21
})
HERE = os.path.dirname(os.path.abspath(__file__))
FILE_EXTENSION = "png"
def yaml_load(path):
with open(path, "r") as f:
yaml_data = yaml.load(f, Loader=Loader)
return yaml_data
def load_results(exp_root: str, exp_config: dict) -> dict:
data = {}
for exp_folder in exp_config["data"]:
if "rep" in exp_config["data"][exp_folder]:
dfs = []
for rep in exp_config["data"][exp_folder].get("rep"):
exp_results_path = os.path.join(exp_root,
f"{exp_folder}-rep{rep}",
"results.csv")
df = pd.read_csv(exp_results_path)
dfs.append(df)
data[exp_folder] = dfs
else:
exp_results_path = os.path.join(exp_root, exp_folder,
"results.csv")
df = pd.read_csv(exp_results_path)
data[exp_folder] = df
return data
@ticker.FuncFormatter
def hour_major_formatter(x, pos):
x = float(f"{x/3600:.1f}")
if x % 1 == 0:
x = str(int(x))
else:
x = f"{x:.1f}"
return x
def plot_scatter_multi(df, exp_config, output_dir):
output_file_name = f"{inspect.stack()[0][3]}.{FILE_EXTENSION}"
output_path = os.path.join(output_dir, output_file_name)
plt.figure()
for exp_name, exp_df in df.items():
if "rep" in exp_config["data"][exp_name]:
exp_dfs = exp_df
for i, exp_df in enumerate(exp_dfs):
x, y = exp_df.elapsed_sec.to_numpy(
), -exp_df.objective.to_numpy()
plt_kwargs = dict(color=exp_config["data"][exp_name]["color"],
s=10,
alpha=0.5)
if i == 0:
plt_kwargs["label"] = exp_config["data"][exp_name]["label"]
plt.scatter(x, y, **plt_kwargs)
else:
x, y = exp_df.elapsed_sec.to_numpy(), -exp_df.objective.to_numpy()
plt.scatter(x,
y,
color=exp_config["data"][exp_name]["color"],
label=exp_config["data"][exp_name]["label"],
s=10)
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.MultipleLocator(900))
ax.xaxis.set_major_formatter(hour_major_formatter)
if exp_config.get("title"):
plt.title(exp_config.get("title"))
plt.legend()
plt.ylabel("Instance run time (sec)")
plt.xlabel("Search time (hour)")
if exp_config.get("ylim"):
plt.ylim(*exp_config.get("ylim"))
plt.xlim(0, 3600)
plt.grid()
plt.tight_layout()
plt.savefig(output_path)
plt.show()
def only_min(values):
res = [values[0]]
for value in values[1:]:
res.append(min(res[-1], value))
return np.array(res)
def plot_objective_multi(df, exp_config, output_dir):
output_file_name = f"{inspect.stack()[0][3]}.{FILE_EXTENSION}"
output_path = os.path.join(output_dir, output_file_name)
plt.figure()
for exp_name, exp_df in df.items():
if "rep" in exp_config["data"][exp_name]:
exp_dfs = exp_df
times = np.unique(
np.concatenate([df.elapsed_sec.to_numpy() for df in exp_dfs],
axis=0))
times = np.concatenate([[0], times, [3600]])
series = []
for exp_df in exp_dfs:
exp_df = exp_df.sort_values("elapsed_sec")
x, y = exp_df.elapsed_sec.to_numpy(
), -exp_df.objective.to_numpy()
y = only_min(y)
s = pd.Series(data=y, index=x)
s = s.reindex(times).fillna(method="ffill").fillna(method="bfill")
series.append(s)
array = np.array([s.to_numpy() for s in series])
loc = np.nanmean(array, axis=0)
# scale = np.nanstd(array, axis=0)
loc_max = np.nanmax(array, axis=0)
loc_min = np.nanmin(array, axis=0)
plt.plot(
times,
loc,
label=exp_config["data"][exp_name]["label"],
color=exp_config["data"][exp_name]["color"],
linestyle=exp_config["data"][exp_name].get("linestyle", "-"),
)
plt.fill_between(times,
loc_min,
loc_max,
facecolor=exp_config["data"][exp_name]["color"],
alpha=0.3)
else:
exp_df = exp_df.sort_values("elapsed_sec")
x, y = exp_df.elapsed_sec.to_numpy(), -exp_df.objective.to_numpy()
y = only_min(y)
plt.plot(x,
y,
label=exp_config["data"][exp_name]["label"],
color=exp_config["data"][exp_name]["color"],
linestyle=exp_config["data"][exp_name].get(
"linestyle", "-"))
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.MultipleLocator(900))
ax.xaxis.set_major_formatter(hour_major_formatter)
if exp_config.get("title"):
plt.title(exp_config.get("title"))
plt.legend()
plt.ylabel("Instance run time (sec)")
plt.xlabel("Search time (hour)")
if exp_config.get("ylim"):
plt.ylim(*exp_config.get("ylim"))
plt.xlim(0, 3600)
plt.grid()
plt.tight_layout()
plt.savefig(output_path)
plt.show()
def plot_objective_multi_iter(df, exp_config, output_dir):
output_file_name = f"{inspect.stack()[0][3]}.{FILE_EXTENSION}"
output_path = os.path.join(output_dir, output_file_name)
plt.figure()
for exp_name, exp_df in df.items():
if "rep" in exp_config["data"][exp_name]:
exp_dfs = exp_df
for i, exp_df in enumerate(exp_dfs):
exp_df = exp_df.sort_values("elapsed_sec")
x, y = list(range(1,
len(exp_df.elapsed_sec.to_list()) +
1)), (-exp_df.objective).to_list()
y = only_min(y)
plt_kwargs = dict(color=exp_config["data"][exp_name]["color"],
linestyle=exp_config["data"][exp_name].get(
"linestyle", "-"))
if i == 0:
plt_kwargs["label"] = label = exp_config["data"][exp_name][
"label"]
plt.plot(x, y, **plt_kwargs)
else:
exp_df = exp_df.sort_values("elapsed_sec")
x, y = list(range(1,
len(exp_df.elapsed_sec.to_list()) +
1)), (-exp_df.objective).to_list()
y = only_min(y)
plt.plot(x,
y,
label=exp_config["data"][exp_name]["label"],
color=exp_config["data"][exp_name]["color"],
linestyle=exp_config["data"][exp_name].get(
"linestyle", "-"))
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.MultipleLocator(50))
if exp_config.get("title"):
plt.title(exp_config.get("title"))
plt.legend()
plt.ylabel("Experiment Duration (sec.)")
plt.xlabel("#Evaluation")
if exp_config.get("ylim"):
plt.ylim(*exp_config.get("ylim"))
plt.grid()
plt.tight_layout()
plt.savefig(output_path)
plt.show()
def generate_figures(config):
exp_root = config["root"]
figures_dir = os.path.join(HERE, "figures")
for exp_num, exp_config in config["experiments"].items():
exp_dirname = str(exp_num)
output_dir = os.path.join(figures_dir, exp_dirname)
pathlib.Path(output_dir).mkdir(parents=False, exist_ok=True)
df = load_results(exp_root, exp_config)
plot_scatter_multi(df, exp_config, output_dir)
plot_objective_multi(df, exp_config, output_dir)
plot_objective_multi_iter(df, exp_config, output_dir)
if __name__ == "__main__":
yaml_path = os.path.join(HERE, "plot.yaml")
config = yaml_load(yaml_path)
generate_figures(config)
print("Done!")
| 8,900 | 29.27551 | 82 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_bebop/extend_with_constants.py
|
import pandas as pd
HP_DEFAULT_VALUES = {
"hepnos_num_threads": 31,
"hepnos_num_databases": 1,
"busy_spin": False,
"loader_progress_thread": False,
"loader_batch_size": 1024,
"enable_pep": False,
"pep_num_threads": 31,
"pep_ibatch_size": 32,
"pep_obatch_size": 32,
"pep_use_preloading": False,
"pep_pes_per_node": 16,
"pep_cores_per_pe": 4
}
def transform_with_constants(source_csv: str, output_csv: str, hp_list: list, hp_default: dict):
source_df = pd.read_csv(source_csv)
for hp in hp_list:
if not(hp in source_df.columns):
source_df[hp] = hp_default[hp] # create new column with default value
source_df.to_csv(output_csv)
if __name__ == "__main__":
source_csv = "exp-1/results.csv"
output_csv = "exp/results.csv"
from problem import Problem
hp_list = Problem.space.get_hyperparameter_names()
transform_with_constants(source_csv, output_csv, hp_list, HP_DEFAULT_VALUES)
| 984 | 24.25641 | 96 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_bebop/run_exp.py
|
"""
python -m hepnos_bebop.run_exp -w exp/exp-test -q bdw -t 60 -A radix-io -n 8 --nodes-per-task 4 -as ./SetUpEnv.sh --run hepnos_bebop.run.run --problem hepnos_bebop.problem.Problem --fit-search-space exp/
"""
import os
import argparse
import pathlib
import stat
from jinja2 import Template
HERE = os.path.dirname(os.path.abspath(__file__))
JOB_TEMPLATE = os.path.join(HERE, "job.sbatch.tmpl")
def run(w, q, A, t, n, step, nodes_per_task, activation_script, run,
problem, fit_surrogate, fit_search_space, transfer_learning_strategy, transfer_learning_epsilon):
w = w.encode("ascii").decode("ascii")
num_dh_workers = n // nodes_per_task # N_T
num_cpus_driver = 4 # N_R
num_cpus_per_task = num_cpus_driver / num_dh_workers # N_{R/T}
print(f"Detected {num_dh_workers} DeepHyper parallel evaluations with {n} nodes for the total allocation and {nodes_per_task} nodes per evaluation.")
print(f" num_cpus_driver: {num_cpus_driver}")
print(f" num_cpus_per_task: {num_cpus_per_task}")
step = int(step)
# for transfer learning
if fit_surrogate:
fit_surrogate = os.path.abspath(fit_surrogate)
if fit_search_space:
fit_search_space = os.path.abspath(fit_search_space)
# create exp directory
exp_dir = os.path.abspath(w)
pathlib.Path(exp_dir).mkdir(parents=True, exist_ok=False)
activation_script = os.path.abspath(activation_script)
# load template
with open(JOB_TEMPLATE, "r") as f:
job_template = Template(f.read())
submission_path = os.path.join(w, "job.sbatch")
with open(submission_path, "w") as fp:
fp.write(
job_template.render(q=q,
A=A,
t=t,
n=n,
hepnos_exp_step=step,
nodes_per_task=nodes_per_task,
num_cpus_driver=num_cpus_driver,
num_cpus_per_task=num_cpus_per_task,
activation_script=activation_script,
exp_dir=exp_dir,
run=run,
problem=problem,
fit_surrogate=fit_surrogate,
fit_search_space=fit_search_space,
transfer_learning_strategy=transfer_learning_strategy,
transfer_learning_epsilon=transfer_learning_epsilon))
# add executable rights
st = os.stat(submission_path)
os.chmod(submission_path, st.st_mode | stat.S_IEXEC)
# Job submission
os.chdir(exp_dir)
print("Performing job submission...")
cmd = f"sbatch job.sbatch"
os.system(cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='HEPnOS experiment')
parser.add_argument('-w',
required=True,
type=str,
help="Name of the experiment.")
parser.add_argument('-q', required=True, type=str, help="Queue name.")
parser.add_argument('-A',
default="radix-io",
type=str,
help="Project name.")
parser.add_argument('-t',
required=True,
type=str,
help="Duration of the experiment.")
parser.add_argument('-n',
required=True,
type=int,
help="Number of nodes for the total allocation.")
parser.add_argument('--step',
type=int,
default=1,
help='HEPnOS experiment step.')
parser.add_argument('--nodes-per-task',
type=int,
default=None,
help='Number of nodes to use per task.')
parser.add_argument(
'-as',
'--activation-script',
required=True,
type=str,
help="Path to the script activation the conda environment.")
parser.add_argument('--run', required=True, type=str)
parser.add_argument('--problem', required=True, type=str)
parser.add_argument('--fit-surrogate', required=False, type=str, default="")
parser.add_argument('--fit-search-space', required=False, type=str, default="")
parser.add_argument('--transfer-learning-strategy', required=False, type=str, default="best", choices=["best", "epsilon"])
parser.add_argument('--transfer-learning-epsilon', required=False, type=float, default=1.0)
args = parser.parse_args()
run(**vars(args))
| 4,714 | 38.957627 | 203 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_bebop/problem.py
|
import os
from deephyper.problem import HpProblem
Problem = HpProblem(seed=2021)
# the following is the old definition of the Problem
"""
# 1. step
Problem.add_hyperparameter((0, 31), "hepnos_num_threads")
Problem.add_hyperparameter((1, 10), "hepnos_num_databases")
Problem.add_hyperparameter([True, False], "busy_spin")
Problem.add_hyperparameter([True, False], "loader_progress_thread")
Problem.add_hyperparameter((1, 2048, "log-uniform"), "loader_batch_size")
# when "enable_step == True"
# 2. step:
enable_pep = bool(int(os.environ.get("DH_HEPNOS_ENABLE_PEP", 0)))
if enable_pep:
Problem.add_hyperparameter((1, 31), "pep_num_threads")
Problem.add_hyperparameter((8, 1024, "log-uniform"), "pep_ibatch_size")
Problem.add_hyperparameter((8, 1024, "log-uniform"), "pep_obatch_size")
Problem.add_hyperparameter([True, False], "pep_use_preloading")
# 3. step:
# Problem.add_hyperparameter((1, 64), "pep_pes_per_node")
# Problem.add_hyperparameter((1, 64), "pep_cores_per_pe")
"""
def add_parameter(problem, name, domain, description=""):
problem.add_hyperparameter(domain, name)
step = int(os.environ.get("DH_HEPNOS_EXP_STEP", 1))
# Step 1: Data-loader only, at small scale
add_parameter(Problem, "busy_spin", [True, False],
"Whether Mercury should busy-spin instead of block")
add_parameter(Problem, "hepnos_progress_thread", [True, False],
"Whether to use a dedicated progress thread in HEPnOS")
add_parameter(Problem, "hepnos_num_threads", (0, 63),
"Number of threads used for serving RPC requests")
add_parameter(Problem, "hepnos_num_event_databases", (1, 16),
"Number of databases per process used to store events")
add_parameter(Problem, "hepnos_num_product_databases", (1, 16),
"Number of databases per process used to store products")
add_parameter(Problem, "hepnos_num_providers", (1, 32),
"Number of database providers per process")
add_parameter(Problem, "hepnos_pool_type", ['fifo','fifo_wait','prio_wait'],
"Thread-scheduling policity used by Argobots pools")
add_parameter(Problem, "hepnos_pes_per_node", [1, 2, 4, 8, 16, 32],
"Number of HEPnOS processes per node")
add_parameter(Problem, "loader_progress_thread", [True, False],
"Whether to use a dedicated progress thread in the Dataloader")
add_parameter(Problem, "loader_batch_size", (1, 2048, "log-uniform"),
"Size of the batches of events sent by the Dataloader to HEPnOS")
add_parameter(Problem, "loader_pes_per_node", [1, 2, 4, 8, 16],
"Number of processes per node for the Dataloader")
# Step 2: We add the PEP step, still at small scall
if step >= 2:
add_parameter(Problem, "pep_progress_thread", [True, False],
"Whether to use a dedicated progress thread in the PEP step")
add_parameter(Problem, "pep_num_threads", (1, 31),
"Number of threads used for processing in the PEP step")
add_parameter(Problem, "pep_ibatch_size", (8, 1024, "log-uniform"),
"Batch size used when PEP processes are loading events from HEPnOS")
add_parameter(Problem, "pep_obatch_size", (8, 1024, "log-uniform"),
"Batch size used when PEP processes are exchanging events among themselves")
add_parameter(Problem, "pep_pes_per_node", [1, 2, 4, 8, 16, 32],
"Number of processes per node for the PEP step")
# Step 3: We add some new parameters
if step >= 3:
add_parameter(Problem, "loader_async", [True, False],
"Whether to use the HEPnOS AsyncEngine in the Dataloader")
add_parameter(Problem, "loader_async_threads", (1, 63, "log-uniform"),
"Number of threads for the AsyncEngine to use")
add_parameter(Problem, "pep_use_preloading", [True, False],
"Whether the PEP step should use product-preloading")
# Step 4: We scale to larger experiments (no new processes)
# Note: in the above, if
# (X_progress_thread + 1 + X_num_threads) * X_pes_per_node > 64,
# then we oversubscribe the nodes with more threads than we should.
# If this leads to performance degradation, the DeepHyper should detect
# it and avoid the corresponding regions of the parameter space.
# However it would be nice for the paper to be able to impose constraints
# like that.
#
# Note: in step 3, if loader_async is False, then the value of
# loader_async_threads is irrelevant, so it would be nice to be able
# to not sample it when loader_async is False (say that "loader_async_threads"
# is a child parameter of "loader_async".
if __name__ == "__main__":
print(Problem)
| 4,467 | 44.131313 | 84 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_bebop/run.py
|
import os, uuid
import copy
import json
import re
from shutil import copyfile
def __setup_directory(id_=None):
if id_ == None:
id_ = uuid.uuid4()
exp_dir = 'exp-' + str(id_)
os.mkdir(exp_dir)
cwd = os.getcwd()
return cwd + '/' + exp_dir
def __make_node_list(nodes):
if nodes is None:
return None
result = []
for n in nodes:
m = re.search('([0-9]+)', n)
result.append(str(int(str(m.group(0)))))
return result
def __create_settings(exp_dir, hepnos_pes_per_node, loader_batch_size,
loader_async, loader_async_threads, loader_pes_per_node,
enable_pep, pep_num_threads, pep_ibatch_size,
pep_obatch_size, pep_use_preloading, pep_pes_per_node,
nodes):
settings_sh_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/settings.sh.in'
settings_sh = exp_dir + '/settings.sh'
copyfile(settings_sh_in, settings_sh)
with open(settings_sh, 'a+') as f:
f.write('\n')
f.write('HEPNOS_PES_PER_NODE=%d\n' % hepnos_pes_per_node)
if loader_async:
f.write('HEPNOS_LOADER_ASYNC=-a\n')
f.write('HEPNOS_LOADER_ASYNC_THREADS=%d\n' % loader_async_threads)
else:
f.write('HEPNOS_LOADER_ASYNC=\n')
f.write('HEPNOS_LOADER_ASYNC_THREADS=0\n')
f.write('HEPNOS_LOADER_BATCH_SIZE=%d\n' % loader_batch_size)
f.write('HEPNOS_LOADER_PES_PER_NODE=%d\n' % loader_pes_per_node)
if enable_pep:
f.write('HEPNOS_ENABLE_PEP=1\n')
f.write('HEPNOS_PEP_THREADS=%d\n' % pep_num_threads)
f.write('HEPNOS_PEP_IBATCH_SIZE=%d\n' % pep_ibatch_size)
f.write('HEPNOS_PEP_OBATCH_SIZE=%d\n' % pep_obatch_size)
f.write('HEPNOS_PEP_PES_PER_NODE=%d\n' % pep_pes_per_node)
if pep_use_preloading:
f.write('HEPNOS_PEP_PRELOAD=--preload\n')
else:
f.write('HEPNOS_PEP_PRELOAD=\n')
else:
f.write('HEPNOS_ENABLE_PEP=0\n')
if nodes is not None:
f.write('HEPNOS_NODELIST=(%s)\n' % ' '.join(nodes))
def __generate_dataloader_config_file(exp_dir='.',
filename='dataloader.json',
busy_spin=False,
use_progress_thread=False):
dataloader_json_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/dataloader.json.in'
dataloader_json = exp_dir + '/' + filename
with open(dataloader_json_in) as f:
config = json.loads(f.read())
config['mercury']['na_no_block'] = bool(busy_spin)
config['use_progress_thread'] = bool(use_progress_thread)
with open(dataloader_json, 'w+') as f:
f.write(json.dumps(config, indent=4))
def __generate_pep_config_file(exp_dir='.',
filename='pep.json',
busy_spin=False,
use_progress_thread=False):
pep_json_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/pep.json.in'
pep_json = exp_dir + '/' + filename
with open(pep_json_in) as f:
config = json.loads(f.read())
config['mercury']['na_no_block'] = bool(busy_spin)
config['use_progress_thread'] = bool(use_progress_thread)
with open(pep_json, 'w+') as f:
f.write(json.dumps(config, indent=4))
def __generate_hepnos_config_file(exp_dir='.',
filename='hepnos.json',
busy_spin=False,
use_progress_thread=False,
num_threads=0,
num_providers=1,
num_event_dbs=1,
num_product_dbs=1,
pool_type='fifo_wait'):
hepnos_json_in = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/hepnos.json.in'
hepnos_json = exp_dir + '/' + filename
with open(hepnos_json_in) as f:
config = json.loads(f.read())
config['margo']['mercury']['na_no_block'] = bool(busy_spin)
config['margo']['argobots']['pools'][0]['type'] = pool_type
if use_progress_thread:
config['margo']['argobots']['pools'].append({
'name': '__progress__',
'type': pool_type,
'access': 'mpmc'
})
config['margo']['argobots']['xstreams'].append({
'name': '__progress__',
'scheduler': {
'type': 'basic_wait',
'pools': ['__progress__']
}
})
config['margo']['progress_pool'] = '__progress__'
else:
config['margo']['progress_pool'] = '__primary__'
rpc_pools = []
for i in range(0, num_providers):
config['margo']['argobots']['pools'].append({
'name': ('__rpc_%d__' % i),
'type': pool_type,
'access': 'mpmc'
})
rpc_pools.append('__rpc_%d__' % i)
if num_threads == 0:
config['margo']['argobots']['xstreams'][0]['scheduler'][
'pools'].extend(rpc_pools)
else:
es = []
for i in range(0, min(num_threads, num_providers)):
config['margo']['argobots']['xstreams'].append({
'name': ('rpc_es_%d' % i),
'scheduler': {
'type': 'basic_wait',
'pools': []
}
})
es.append(config['margo']['argobots']['xstreams'][-1])
for i in range(0, len(rpc_pools)):
es[i % len(es)]['scheduler']['pools'].append(rpc_pools[i])
ssg_group = None
for g in config['ssg']:
if g['name'] == 'hepnos':
ssg_group = g
break
ssg_group['group_file'] = exp_dir + '/hepnos.ssg'
event_db_model = {
"type": "map",
"comparator": "hepnos_compare_item_descriptors",
"no_overwrite": True
}
product_db_model = {"type": "map", "no_overwrite": True}
for i in range(0, num_providers):
p = {
"name": "hepnos_data_%d" % (i + 1),
"type": "sdskv",
"pool": rpc_pools[i % len(rpc_pools)],
"provider_id": i + 1,
"config": {
"comparators": [{
"name": "hepnos_compare_item_descriptors",
"library": "libhepnos-service.so"
}],
"databases": []
}
}
config['providers'].append(p)
p = 0
for i in range(0, num_event_dbs):
event_db_name = 'hepnos-events-' + str(i)
event_db = copy.deepcopy(event_db_model)
event_db['name'] = event_db_name
provider = config['providers'][1 + (p %
(len(config['providers']) - 1))]
provider['config']['databases'].append(event_db)
p += 1
for i in range(0, num_product_dbs):
product_db_name = 'hepnos-products-' + str(i)
product_db = copy.deepcopy(product_db_model)
product_db['name'] = product_db_name
provider = config['providers'][1 + (p %
(len(config['providers']) - 1))]
provider['config']['databases'].append(product_db)
p += 1
with open(hepnos_json, 'w+') as f:
f.write(json.dumps(config, indent=4))
def __parse_result(exp_dir):
dataloader_time = 99999999
pep_time = 0
if os.path.isfile(exp_dir + '/dataloader-output.txt'):
for line in open(exp_dir + '/dataloader-output.txt'):
if 'ESTIMATED' in line:
dataloader_time = int(float(line.split()[-1]))
break
if 'RUNTIME' in line:
dataloader_time = int(float(line.split()[-1]))
break
if os.path.isfile(exp_dir + '/pep-output.txt'):
pep_time = 99999999
for line in open(exp_dir + '/pep-output.txt'):
if 'Benchmark completed' in line:
pep_time = int(line.split()[-2].split('.')[0])
break
if 'TIME:' in line:
pep_time = int(line.split()[1])
break
return (dataloader_time, pep_time)
def run(config, nodes=None):
enable_pep = config.get('enable_pep',
int(os.environ.get("DH_HEPNOS_EXP_STEP", 1)) >= 2)
hepnos_pes_per_node = config.get("hepnos_pes_per_node", 2)
hepnos_progress_thread = config.get("hepnos_progress_thread", False)
hepnos_num_threads = config.get("hepnos_num_threads", 31)
hepnos_num_event_databases = config.get("hepnos_num_event_databases", 1)
hepnos_num_product_databases = config.get("hepnos_num_product_databases",
1)
hepnos_pool_type = config.get("hepnos_pool_type", "fifo_wait")
hepnos_num_providers = config.get("hepnos_num_providers", 1)
busy_spin = config.get("busy_spin", False)
loader_progress_thread = config.get("loader_progress_thread", False)
loader_async = config.get("loader_async", False)
loader_async_threads = config.get("loader_async_threads", 1)
loader_batch_size = config.get("loader_batch_size", 1024)
loader_pes_per_node = config.get("loader_pes_per_node", 1)
pep_progress_thread = config.get("pep_progress_thread", False)
pep_num_threads = config.get("pep_num_threads", 31)
pep_ibatch_size = config.get("pep_ibatch_size", 32)
pep_obatch_size = config.get("pep_obatch_size", 32)
pep_use_preloading = config.get("pep_use_preloading", False)
pep_pes_per_node = config.get("pep_pes_per_node", 16)
# print('Using nodes '+str(nodes))
# nodes = __make_node_list(nodes)
# print('Using nodes '+str(nodes))
print('Setting up experiment\'s directory')
exp_dir = __setup_directory(config.get("id"))
print('Creating settings.sh')
__create_settings(exp_dir, hepnos_pes_per_node, loader_batch_size,
loader_async, loader_async_threads, loader_pes_per_node,
enable_pep, pep_num_threads, pep_ibatch_size,
pep_obatch_size, pep_use_preloading, pep_pes_per_node,
nodes)
print('Creating hepnos.json')
__generate_hepnos_config_file(exp_dir,
busy_spin=busy_spin,
use_progress_thread=hepnos_progress_thread,
num_threads=hepnos_num_threads,
num_providers=hepnos_num_providers,
num_event_dbs=hepnos_num_event_databases,
num_product_dbs=hepnos_num_product_databases,
pool_type=hepnos_pool_type)
print('Creating dataloader.json')
__generate_dataloader_config_file(
exp_dir,
busy_spin=busy_spin,
use_progress_thread=loader_progress_thread)
if enable_pep:
print('Creating pep.json')
__generate_pep_config_file(exp_dir,
busy_spin=busy_spin,
use_progress_thread=pep_progress_thread)
print('Submitting job')
submit_sh = os.path.dirname(
os.path.abspath(__file__)) + '/scripts/submit.sh'
os.system(submit_sh + ' ' + exp_dir)
print('Parsing result')
t = __parse_result(exp_dir)
print('Done (loading time = %f, processing time = %f)' % (t[0], t[1]))
return -(t[0] + t[1])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='HEPnOS experiment')
parser.add_argument('--hepnos-pes-per-node',
type=int,
default=2,
help='number of PE per node for HEPnOS')
parser.add_argument('--hepnos-progress-thread',
action='store_true',
default=False,
help='whether to use a progress thread in HEPnOS')
parser.add_argument(
'--hepnos-num-threads',
type=int,
default=31,
help='number of RPC handling threads per process for HEPnOS')
parser.add_argument(
'--hepnos-num-providers',
type=int,
default=1,
help='number of providers managing databases in HEPnOS')
parser.add_argument(
'--hepnos-num-event-databases',
type=int,
default=1,
help='number of databases per process for events in HEPnOS')
parser.add_argument(
'--hepnos-num-product-databases',
type=int,
default=1,
help='number of databases per process for products in HEPnOS')
# pool type can be "fifo", "fifo_wait", or "prio_wait"
parser.add_argument('--hepnos-pool-type',
type=str,
default='fifo_wait',
help='type of Argobots pools to use in HEPnOS')
parser.add_argument('--busy-spin',
action='store_true',
default=False,
help='whether to use busy spinning or not')
parser.add_argument(
'--loader-progress-thread',
action='store_true',
default=False,
help='whether to use a progress thread or not in dataloader clients')
parser.add_argument(
'--loader-async',
action='store_true',
default=False,
help='whether to use async progress in dataloader clients')
parser.add_argument(
'--loader-async-threads',
type=int,
default=1,
help='number of threads for async operation in clients')
parser.add_argument('--loader-batch-size',
type=int,
default=1024,
help='batch size for the dataloader')
parser.add_argument(
'--loader-pes-per-node',
type=int,
default=1,
help='number of PES per node (must be between 1 and 64) for loader')
parser.add_argument('--enable-pep',
action='store_true',
default=False,
help='enable PEP benchmark')
parser.add_argument('--pep-progress-thread',
action='store_true',
default=False,
help='whether to use a progress thread or not in PEP')
parser.add_argument(
'--pep-num-threads',
type=int,
default=31,
help='number of processing threads per benchmark process (must be > 0)'
)
parser.add_argument('--pep-ibatch-size',
type=int,
default=32,
help='batch size when loading from HEPnOS')
parser.add_argument('--pep-obatch-size',
type=int,
default=32,
help='batch size when loading from another rank')
parser.add_argument('--pep-use-preloading',
action='store_true',
default=False,
help='whether to use product-preloading')
parser.add_argument(
'--pep-pes-per-node',
type=int,
default=16,
help='number of PES per node (must be between 1 and 64)')
parser.add_argument(
'--pep-cores-per-pe',
type=int,
default=-1,
help='number of cores per PE (must be between 1 and 64)')
parser.add_argument('--nodes', type=str, default=None, help='nodes to use')
# The product of the last wo parameters should not exceed 64.
# Additionally, the number of processing threads should be
# the number of cores per PE minus 2 (so effectively the number
# cores per PE must be at least 3).
ns = parser.parse_args()
if ns.nodes is not None:
ns.nodes = ns.nodes.split(',')
run(vars(ns), ns.nodes)
| 16,025 | 37.898058 | 79 |
py
|
HEPnOS-Autotuning
|
HEPnOS-Autotuning-main/hepnos_bebop/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/setup.py
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from pathlib import Path
import subprocess
from setuptools import setup, find_packages
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = Path()
# Get the long description from the README file
with open((here / "README.md"), encoding="utf-8") as f:
long_description = f.read()
# get all the git tags from the cmd line that follow our versioning pattern
git_tags = subprocess.Popen(['git', 'tag', '--list', 'v*[0-9]', '--sort=version:refname'], stdout=subprocess.PIPE)
# get the most recent tag after it's been sorted 👆
latest_git_tag = subprocess.Popen(['tail', '-1'], stdin=git_tags.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
git_tags.stdout.close()
latest_version = latest_git_tag.communicate()[0]
# PEP 440 won't accept the v in front, so here we remove it, strip the new line and decode the byte stream
VERSION_FROM_GIT_TAG = latest_version[1:].strip().decode("utf-8")
with open((here / "requirements.txt"), encoding="utf-8") as f:
install_requires = f.read().splitlines()
# removes comments in the requirements file
dependencies = [dependency for dependency in install_requires if (dependency[0] != "#")]
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='statistical-clear-sky', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION_FROM_GIT_TAG,
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Statistical estimation of a clear sky signal from PV system power data', # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/bmeyers/StatisticalClearSky', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='SLAC National Accelerator Laboratory - Bennet Meyers', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='solar pv photovoltaic', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['tests', 'contrib', 'docs', 'clearsky', 'dataviewer', 'notebooks']), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
#python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
python_requires='>=3.6, <4',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=dependencies,
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
#package_data={ # Optional
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
#entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
entry_points={
'console_scripts': [
'statistical_clear_sky=statistical_clear_sky.command_line:main',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/bmeyers/StatisticalClearSky/issues',
},
)
| 9,720 | 42.397321 | 122 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/dataviewer.py
|
# -*- coding: utf-8 -*-
"""
This module contains the a data viewer class for data set investigation.
"""
from statistical_clear_sky.utilities.data_loading import load_results
from statistical_clear_sky.utilities.data_loading import load_sys
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
from statistical_clear_sky.configuration import CONFIG1
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.widgets import TextBox, Button
from matplotlib.gridspec import GridSpec
import seaborn as sns
sns.set(context='paper', style='darkgrid', palette='colorblind')
import s3fs
import logging, warnings, time, os
logging.basicConfig(filename='data_viewer.log', level=logging.INFO)
TZ_LOOKUP = {
'America/Anchorage': 9,
'America/Chicago': 6,
'America/Denver': 7,
'America/Los_Angeles': 8,
'America/New_York': 5,
'America/Phoenix': 7,
'Pacific/Honolulu': 10
}
class PointBrowser(object):
"""
See "Event Handling" example from matplotlib documentation:
https://matplotlib.org/examples/event_handling/data_browser.html
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'a'
and 's' keys to browse through the next and previous points along x-axis (ordered by RdTools estimate).
"""
def __init__(self, data, xlim=None, ylim=None, prcntl=95):
logging.info('NEW SESSION')
warnings.filterwarnings("ignore")
self.scsf_cache_dir = './local_cache/'
if not os.path.exists(self.scsf_cache_dir):
os.makedirs(self.scsf_cache_dir)
ordering = np.argsort(data['rd']).values
self.data = data.iloc[ordering]
self.xs = self.data['rd'].values
self.ys = self.data['deg'].values
gs = GridSpec(4, 3)
fig = plt.figure('DataViewer', figsize=(8, 16))
ax = [plt.subplot(gs[0, :2])] # Main scatter plot
with sns.axes_style('white'):
ax.append(plt.subplot(gs[0, -1])) # Record viewing panel
ax[-1].set_axis_off()
ax.append(plt.subplot(gs[1, :])) # Timeseries heatmap view
ax.append(plt.subplot(gs[2, :])) # ClearSky heatmap view
ax.append(plt.subplot(gs[3, :])) # Daily Energy view
self.fig = fig
self.ax = ax
self.ax[0].set_title('click on point to view record')
self.ax[0].set_xlabel('RdTools Estimate YoY deg (%)')
self.ax[0].set_ylabel('SCSF Estimate YoY deg (%)')
self.ax[2].set_title('Measured power')
self.ax[2].set_xlabel('Day number')
self.ax[2].set_yticks([])
self.ax[2].set_ylabel('(sunset) Time of day (sunrise)')
self.line, = self.ax[0].plot(self.xs, self.ys, '.', picker=5) # 5 points tolerance
m = np.logical_and(
np.logical_and(
self.data['res-median'] < np.percentile(self.data['res-median'], prcntl),
self.data['res-var'] < np.percentile(self.data['res-var'], prcntl)
),
self.data['res-L0norm'] < np.percentile(self.data['res-L0norm'], prcntl)
)
m = np.logical_not(m.values)
self.ax[0].plot(self.xs[m], self.ys[m], '.')
if xlim is None:
xlim = self.ax[0].get_xlim()
if ylim is None:
ylim = self.ax[0].get_ylim()
pts = (
min(xlim[0], ylim[0]),
max(xlim[1], ylim[1])
)
self.ax[0].plot(pts, pts, ls='--', color='red')
self.ax[0].set_xlim(xlim)
self.ax[0].set_ylim(ylim)
self.text = self.ax[0].text(0.05, 0.95, 'system ID: none',
transform=self.ax[0].transAxes, va='top')
self.selected, = self.ax[0].plot([self.xs[0]], [self.ys[0]], 'o', ms=6, alpha=0.4,
color='yellow', visible=False)
with sns.axes_style('white'):
ax.append(plt.axes([.77, .5 * (1 + .57), .2, .05 / 2])) # Text box entry
ax.append(plt.axes([.82, .5 * (1 + .5), .1, .05 / 2])) # run SCSF button
self.text_box = TextBox(self.ax[-2], 'ID Number')
self.button = Button(self.ax[-1], 'run SCSF', color='red')
self.lastind = None
self._power_signals_d = None
self._iterative_fitting = None
self.cb = None
self.cb2 = None
self.local_cash = {}
self.prcntl = prcntl
plt.tight_layout()
self.fig.canvas.mpl_connect('pick_event', self.onpick)
self.fig.canvas.mpl_connect('key_press_event', self.onpress)
self.text_box.on_submit(self.submit)
self.button.on_clicked(self.clicked)
plt.show()
def submit(self, text):
logging.info('submit: ' + str(text))
asrt = np.argsort(np.abs(self.data.index - float(text)))
sysid = self.data.index[asrt[0]]
bool_list = self.data.index == sysid
# bool_list = self.data.index == int(text)
index_lookup = np.arange(self.data.shape[0])
self.lastind = int(index_lookup[bool_list])
logging.info('selected index: ' + str(self.lastind))
self.update()
def clicked(self, event):
if self.lastind is None:
logging.info('button click: nothing selected!')
return
sysid = self.data.iloc[self.lastind].name
logging.info('button click: current ID: {}'.format(sysid))
self.ax[3].cla()
self.ax[3].text(0.05, 0.95, 'initializing algorithm...', transform=self.ax[3].transAxes,
va='top', fontname='monospace')
self.ax[3].set_xlabel('Day number')
self.ax[3].set_yticks([])
self.ax[3].set_ylabel('(sunset) Time of day (sunrise)')
plt.tight_layout()
self.fig.canvas.draw()
self.ax[4].cla()
power_signals_d = self._power_signals_d
cached_files = os.listdir(self.scsf_cache_dir)
fn = 'pvo_' + str(sysid) + '.scsf'
if fn in cached_files:
iterative_fitting = IterativeFitting.load_instance(
self.scsf_cache_dir + fn)
self._iterative_fitting = iterative_fitting
self.ax[4].plot(np.sum(iterative_fitting.power_signals_d, axis=0)
* 24 / iterative_fitting.power_signals_d.shape[0],
linewidth=1, label='raw data')
use_day = iterative_fitting.weights > 1e-1
days = np.arange(iterative_fitting.power_signals_d.shape[1])
self.ax[4].scatter(days[use_day],
np.sum(iterative_fitting.power_signals_d, axis=0)[use_day]
* 24 / iterative_fitting.power_signals_d.shape[0],
color='orange', alpha=0.7, label='days selected')
self.ax[4].legend()
self.ax[4].set_title('Daily Energy')
self.ax[4].set_xlabel('Day Number')
self.ax[4].set_ylabel('kWh')
self.ax[3].cla()
self.ax[3].text(0.05, 0.95, 'loading cached results...',
transform=self.ax[3].transAxes,
va='top', fontname='monospace')
self.ax[3].set_xlabel('Day number')
self.ax[3].set_yticks([])
self.ax[3].set_ylabel('(sunset) Time of day (sunrise)')
self.show_ticks(self.ax[2])
plt.tight_layout()
self.fig.canvas.draw()
else:
iterative_fitting = IterativeClearSky(power_signals_d)
self._iterative_fitting = iterative_fitting
self.ax[4].plot(np.sum(iterative_fitting.power_signals_d, axis=0) * 24 / iterative_fitting.power_signals_d.shape[0], linewidth=1, label='raw data')
use_day = iterative_fitting.weights > 1e-1
days = np.arange(iterative_fitting.power_signals_d.shape[1])
self.ax[4].scatter(days[use_day], np.sum(iterative_fitting.power_signals_d, axis=0)[use_day] * 24 / iterative_fitting.power_signals_d.shape[0],
color='orange', alpha=0.7, label='days selected')
self.ax[4].legend()
self.ax[4].set_title('Daily Energy')
self.ax[4].set_xlabel('Day Number')
self.ax[4].set_ylabel('kWh')
self.ax[3].cla()
self.ax[3].text(0.05, 0.95, 'running algorithm...', transform=self.ax[3].transAxes,
va='top', fontname='monospace')
self.ax[3].set_xlabel('Day number')
self.ax[3].set_yticks([])
self.ax[3].set_ylabel(
'(sunset) Time of day (sunrise)')
self.show_ticks(self.ax[2])
plt.tight_layout()
self.fig.canvas.draw()
logging.info('starting algorithm')
config_l = CONFIG1.copy()
config_l['max_iteration'] = 1
obj_vals = iterative_fitting.calculate_objective_with_result(False)
old_obj = np.sum(obj_vals)
ti = time.time()
for cntr in range(CONFIG1['max_iteration']):
iterative_fitting.execute(**config_l)
logging.info('min iteration {} complete'.format(cntr + 1))
obj_vals = iterative_fitting.calculate_objective_with_result(
False)
new_obj = np.sum(obj_vals)
improvement = (old_obj - new_obj) * 1. / old_obj
self.ax[3].cla()
self.ax[3].set_xlabel('Day number')
self.ax[3].set_yticks([])
self.ax[3].set_ylabel('(sunset) Time of day (sunrise)')
s1 = 'Iteration {} complete: obj = {:.2f}, f1 = {:.2f}'.format(cntr + 1, new_obj, obj_vals[0])
s2 = 'Improvement: {:.2f}%'.format(100 * improvement)
tf = time.time()
s3 = 'Time elapsed: {:.2f} minutes'.format((tf - ti) / 60.)
textout = '\n'.join([s1, s2, s3])
logging.info(textout)
self.ax[3].text(0.05, 0.95, textout, transform=self.ax[3].transAxes,
va='top', fontname='monospace')
plt.tight_layout()
self.fig.canvas.draw()
old_obj = new_obj
if improvement <= CONFIG1['eps']:
break
iterative_fitting.save_instance(self.scsf_cache_dir + fn)
logging.info('algorithm complete')
self.ax[4].plot((iterative_fitting.r_cs_value[0] *
np.sum(iterative_fitting.l_cs_value[:, 0])) * 24 /
iterative_fitting.power_signals_d.shape[0],
linewidth=1, label='clear sky estimate')
self.ax[4].legend()
logging.info('first plot complete')
with sns.axes_style('white'):
self.ax[3].cla()
bar = self.ax[3].imshow(
iterative_fitting.clear_sky_signals(), cmap='hot',
vmin=0, vmax=np.max(iterative_fitting.power_signals_d),
interpolation='none', aspect='auto')
if self.cb2 is not None:
self.cb2.remove()
self.cb2 = plt.colorbar(bar, ax=self.ax[3], label='kW')
self.show_ticks(self.ax[3])
self.ax[3].set_title('Estimated clear sky power')
self.ax[3].set_xlabel('Day number')
self.ax[3].set_yticks([])
self.ax[3].set_ylabel('(sunset) Time of day (sunrise)')
logging.info('second plot complete')
plt.tight_layout()
self.fig.canvas.draw()
return
def show_ticks(self, ax):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
use_day = self._iterative_fitting.weights > 1e-1
days = np.arange(self._iterative_fitting.power_signals_d.shape[1])
y1 = np.ones_like(days[use_day]) * self._power_signals_d.shape[0] * .99
ax.scatter(days[use_day], y1, marker='|', color='yellow', s=2)
ax.scatter(days[use_day], .995 * y1, marker='|', color='yellow', s=2)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
return
def onpress(self, event):
if self.lastind is None:
return
logging.info('press event: ' + str(event.key))
if event.key == 'a':
inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(self.xs) - 1)
elif event.key == 's':
inc = 1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(self.xs) - 1)
else:
return
self.update()
def onpick(self, event):
if event.artist != self.line:
return True
N = len(event.ind)
if not N:
return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
logging.info('pick: ' + str(x) + ', ' + str(y))
distances = np.hypot(x - self.xs[event.ind], y - self.ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None:
return
dataind = self.lastind
prcntl = self.prcntl
logging.info('updating, ID = {}'.format(self.data.iloc[dataind].name))
self.selected.set_visible(True)
self.selected.set_data(self.xs[dataind], self.ys[dataind])
out1 = 'system ID: {:d}'.format(self.data.iloc[dataind].name)
out2 = str(self.data.iloc[dataind])
# self.text_box.set_val('')
idxs = np.arange(len(self.data.columns))
if self.data.iloc[dataind]['res-median'] > np.percentile(self.data['res-median'], prcntl):
l1 = out2.split('\n')
i = idxs[self.data.columns == 'res-median'][0]
l1[i] = '*' + l1[i][:-2] + '*'
out2 = '\n'.join(l1)
if self.data.iloc[dataind]['res-var'] > np.percentile(self.data['res-var'], prcntl):
l1 = out2.split('\n')
i = idxs[self.data.columns == 'res-var'][0]
l1[i] = '*' + l1[i][:-2] + '*'
out2 = '\n'.join(l1)
if self.data.iloc[dataind]['res-L0norm'] > np.percentile(self.data['res-L0norm'], prcntl):
l1 = out2.split('\n')
i = idxs[self.data.columns == 'res-L0norm'][0]
l1[i] = '*' + l1[i][:-2] + '*'
out2 = '\n'.join(l1)
self.text.set_text(out1)
self.ax[1].cla()
self.ax[1].text(0.00, 0.95, out2, transform=self.ax[1].transAxes, va='top', fontname='monospace')
self.ax[1].set_axis_off()
self.ax[2].cla()
self.ax[2].text(0.05, 0.95, 'data loading...', transform=self.ax[2].transAxes, va='top', fontname='monospace')
self.ax[2].set_xlabel('Day number')
self.ax[2].set_yticks([])
self.ax[2].set_ylabel('(sunset) Time of day (sunrise)')
self.ax[3].cla()
self.ax[4].cla()
self._iterative_fitting = None
plt.tight_layout()
self.fig.canvas.draw()
with sns.axes_style('white'):
idnum = self.data.iloc[dataind].name
if idnum in self.local_cash.keys():
df = self.local_cash[idnum]
else:
df = load_sys(idnum=idnum, local=False)
self.local_cash[idnum] = df
days = df.resample('D').max().index[1:-1]
start = days[0]
end = days[-1]
power_signals_d = df.loc[start:end].iloc[:-1].values.reshape(
288, -1, order='F')
self._power_signals_d = power_signals_d
self.ax[2].cla()
foo = self.ax[2].imshow(power_signals_d, cmap='hot', interpolation='none', aspect='auto')
if self.cb is not None:
self.cb.remove()
self.cb = plt.colorbar(foo, ax=self.ax[2], label='kW')
self.ax[2].set_xlabel('Day number')
self.ax[2].set_yticks([])
self.ax[2].set_ylabel('(sunset) Time of day (sunrise)')
self.ax[2].set_title('Measured power')
self.text_box.set_val('')
self.fig.canvas.draw()
@property
def iterative_fitting(self):
return self._iterative_fitting
def view_ts(pb, clear_day_start=None, day_start=None):
if pb.iterative_fitting is not None:
clear_days = np.arange(
len(pb.iterative_fitting.weights))[
pb.iterative_fitting.weights >= 1e-3]
fig = pb.iterative_fitting.ts_plot_with_weights(
num_days=len(pb.iterative_fitting.weights), figsize=(9, 6),
fig_title='System ID: {}'.format(pb.data.iloc[pb.lastind].name))
if clear_day_start is not None:
N = clear_day_start
plt.xlim(clear_days[N] - 2, clear_days[N] - 2 + 5)
elif day_start is not None:
plt.xlim(day_start, day_start+5)
else:
plt.xlim(0, 5)
plt.show()
| 17,012 | 40.800983 | 159 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/configuration.py
|
CONFIG1 = {
'mu_l': 5e3,
'mu_r': 1e3,
'tau': 0.9,
'exit_criterion_epsilon': 5e-3,
'max_iteration': 10,
'min_degradation': None,
'max_degradation': None
}
| 182 | 17.3 | 35 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/__init__.py
|
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting as SCSF
| 165 | 82 | 86 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/utilities/data_loading.py
|
import numpy as np
import pandas as pd
TZ_LOOKUP = {
'America/Anchorage': 9,
'America/Chicago': 6,
'America/Denver': 7,
'America/Los_Angeles': 8,
'America/New_York': 5,
'America/Phoenix': 7,
'Pacific/Honolulu': 10
}
def load_results():
base = 's3://pvinsight.nrel/output/'
nrel_data = pd.read_csv(base + 'pvo_results.csv')
slac_data = pd.read_csv(base + 'scsf-unified-results.csv')
slac_data['all-pass'] = np.logical_and(
np.alltrue(np.logical_not(slac_data[['solver-error', 'f1-increase', 'obj-increase']]), axis=1),
np.isfinite(slac_data['deg'])
)
cols = ['ID', 'rd', 'deg', 'rd_low', 'rd_high', 'all-pass',
'fix-ts', 'num-days', 'num-days-used', 'use-frac',
'res-median', 'res-var', 'res-L0norm']
df = pd.merge(nrel_data, slac_data, how='left', left_on='datastream', right_on='ID')
df = df[cols]
df.set_index('ID', inplace=True)
df = df[df['all-pass'] == True]
df['deg'] = df['deg'] * 100
df['difference'] = df['rd'] - df['deg']
df['rd_range'] = df['rd_high'] - df['rd_low']
cols = ['rd', 'deg', 'difference', 'rd_range',
'res-median', 'res-var', 'res-L0norm', 'rd_low', 'rd_high', 'all-pass',
'fix-ts', 'num-days', 'num-days-used', 'use-frac']
df = df[cols]
return df
def load_sys(n=None, idnum=None, local=True, meta=None):
if local:
base = '../data/PVO/'
if not local:
base = 's3://pvinsight.nrel/PVO/'
if meta is None:
meta = pd.read_csv('s3://pvinsight.nrel/PVO/sys_meta.csv')
if n is not None:
idnum = meta['ID'][n]
elif idnum is not None:
n = meta[meta['ID'] == idnum].index[0]
else:
print('must provide index or ID')
return
df = pd.read_csv(base+'PVOutput/{}.csv'.format(idnum), index_col=0,
parse_dates=[0], usecols=[1, 3])
tz = meta['TimeZone'][n]
df.index = df.index.tz_localize(tz).tz_convert('Etc/GMT+{}'.format(TZ_LOOKUP[tz])) # fix daylight savings
start = df.index[0]
end = df.index[-1]
time_index = pd.date_range(start=start, end=end, freq='5min')
df = df.reindex(index=time_index, fill_value=0)
print(n, idnum)
return df
def resample_index(length=365*5):
indices = np.arange(length)
resampled = np.random.choice(indices, size=length, replace=True)
weights = np.zeros(length)
for index in resampled:
weights[index] += 1
return weights
| 2,483 | 34.485714 | 111 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/utilities/progress.py
|
import sys
def progress(count, total, status='', bar_length=60):
"""
Python command line progress bar in less than 10 lines of code. · GitHub
https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
:param count: the current count, int
:param total: to total count, int
:param status: a message to display
:return:
"""
bar_len = bar_length
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
| 648 | 31.45 | 76 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/utilities/data_conversion.py
|
import pandas as pd
def make_time_series(df, return_keys=True, localize_time=-8, filter_length=200):
'''
Accepts a Pandas data frame extracted from the Cassandra database. Returns a data frame with a single timestamp
index and the data from different systems split into columns.
:param df: A Pandas data from generated from a CQL query to the VADER Cassandra database
:param return_keys: If true, return the mapping from data column names to site and system ID
:param localize_time: If non-zero, localize the time stamps. Default is PST or UTC-8
:param filter_length: The number of non-null data values a single system must have to be included in the output
:return: A time-series data frame
'''
df.sort_values('ts', inplace=True)
start = df.iloc[0]['ts']
end = df.iloc[-1]['ts']
time_index = pd.date_range(start=start, end=end, freq='5min')
output = pd.DataFrame(index=time_index)
site_keys = []
site_keys_a = site_keys.append
grouped = df.groupby(['site', 'sensor'])
keys = grouped.groups.keys()
counter = 1
for key in keys:
df_view = df.loc[grouped.groups[key]]
############## data cleaning ####################################
df_view = df_view[pd.notnull(df_view['meas_val_f'])] # Drop records with nulls
df_view.set_index('ts', inplace=True) # Make the timestamp column the index
df_view.sort_index(inplace=True) # Sort on time
df_view = df_view[~df_view.index.duplicated(keep='first')] # Drop duplicate times
df_view.reindex(index=time_index).interpolate() # Match the master index, interp missing
#################################################################
meas_name = str(df_view['meas_name'][0])
col_name = meas_name + '_{:02}'.format(counter)
output[col_name] = df_view['meas_val_f']
if output[col_name].count() > filter_length: # final filter on low data count relative to time index
site_keys_a((key, col_name))
counter += 1
else:
del output[col_name]
if localize_time:
output.index = output.index + pd.Timedelta(hours=localize_time) # Localize time
if return_keys:
return output, site_keys
else:
return output
| 2,379 | 49.638298 | 115 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/utilities/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/utilities/filters.py
|
import numpy as np
import matplotlib.pyplot as plt
import cvxpy as cvx
def lowpass_2d(data, r=25):
fs = np.fft.fft2(data)
fltr = np.zeros_like(data, dtype=np.float)
m, n = data.shape
c = (m // 2, n // 2)
if m % 2 == 0:
di = 0
else:
di = 1
if n % 2 == 0:
dj = 0
else:
dj = 1
y, x = np.ogrid[-c[0]:c[0] + di, -c[1]:c[1] + dj]
mask = x ** 2 + y ** 2 <= r ** 2
fltr[mask] = 1
fs_filtered = np.fft.fftshift(np.multiply(np.fft.fftshift(fs), fltr))
data_filtered = np.abs(np.fft.ifft2(fs_filtered))
return data_filtered
def edge_find_1d(s1, tol=5e-2, ixs=None, ix0=0, w=30, mu=3, debug=False):
# Returns the indices of edges in a 1-D array. This algorithm recursively segments the input array until all edges
# have been found.
if ixs is None:
ixs = []
x = cvx.Variable(len(s1))
mu = cvx.Constant(mu)
obj = cvx.Minimize(cvx.norm(s1[np.isfinite(s1)] - x[np.isfinite(s1)]) + mu * cvx.norm1(x[:-1] - x[1:]))
prob = cvx.Problem(obj)
prob.solve(solver='MOSEK')
if debug:
plt.plot(x.value)
plt.show()
s2 = np.abs(x.value[:-1] - x.value[1:])
if debug:
print(s2.max() - s2.min())
if s2.max() - s2.min() < tol:
# There are no step shifts in this data segment
return ixs
else:
# There is a step shift in this data segment
ix = np.argsort(-s2)[0]
vr_best = -np.inf
for j in range(ix - w, ix + w):
jx = max(0, j)
jx = min(jx, len(s1))
sa = s1[:jx][np.isfinite(s1)[:jx]]
sb = s1[jx:][np.isfinite(s1)[jx:]]
vr = (np.std(s1[np.isfinite(s1)]) ** 2
- (len(sa) / len(s1[np.isfinite(s1)])) * np.std(sa)
- (len(sb) / len(s1[np.isfinite(s1)])) * np.std(sb))
if vr > vr_best:
vr_best = vr
ix_best = jx
ixs.append(ix_best + ix0)
ixs1 = edge_find_1d(s1[:ix_best], tol=tol, ixs=ixs, ix0=ix0)
ixs2 = edge_find_1d(s1[ix_best:], tol=tol, ixs=ixs1, ix0=ix0+ix_best)
ixs2.sort()
return ixs2
| 2,156 | 32.703125 | 118 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/iterative_fitting.py
|
"""
This module defines "Statistical Clear Sky Fitting" algorithm.
"""
from time import time
import numpy as np
from numpy.linalg import norm
import cvxpy as cvx
from collections import defaultdict
from\
statistical_clear_sky.algorithm.initialization.singular_value_decomposition\
import SingularValueDecomposition
from statistical_clear_sky.algorithm.initialization.linearization_helper\
import LinearizationHelper
from statistical_clear_sky.algorithm.initialization.weight_setting\
import WeightSetting
from statistical_clear_sky.algorithm.exception import ProblemStatusError
from statistical_clear_sky.algorithm.minimization.left_matrix\
import LeftMatrixMinimization
from statistical_clear_sky.algorithm.minimization.right_matrix\
import RightMatrixMinimization
from statistical_clear_sky.algorithm.serialization.state_data import StateData
from statistical_clear_sky.algorithm.serialization.serialization_mixin\
import SerializationMixin
from statistical_clear_sky.algorithm.plot.plot_mixin import PlotMixin
from statistical_clear_sky.utilities.data_loading import resample_index
from statistical_clear_sky.utilities.progress import progress
class IterativeFitting(SerializationMixin, PlotMixin):
"""
Implementation of "Statistical Clear Sky Fitting" algorithm.
"""
def __init__(self, data_matrix=None, data_handler_obj=None, rank_k=6,
solver_type='MOSEK', reserve_test_data=False):
"""
:param data_matrix:
:param data_handler_obj:
:param rank_k:
:param solver_type:
:param reserve_test_data:
"""
self._solver_type = solver_type
self._rank_k = rank_k
if data_handler_obj is None and data_matrix is None:
print('Please initialize class with a data set')
elif data_handler_obj is not None:
data_matrix = data_handler_obj.filled_data_matrix
self._power_signals_d = data_matrix
self._capacity = data_handler_obj.capacity_estimate
# Set the weighting now, to use the error flagging feature
weights = self._get_weight_setting().obtain_weights(data_matrix)
weights *= data_handler_obj.daily_flags.no_errors
else:
self._power_signals_d = data_matrix
self._decomposition = SingularValueDecomposition()
self._decomposition.decompose(data_matrix, rank_k=rank_k)
self._matrix_l0 = self._decomposition.matrix_l0
self._matrix_r0 = self._decomposition.matrix_r0
self._bootstrap_samples = None
self._set_testdays(data_matrix, reserve_test_data)
# Handle both DataHandler objects and reserving test data
if data_handler_obj is not None and self._test_days is not None:
weights[self._test_days] = 0
# Stores the current state of the object:
self._state_data = StateData()
self._store_initial_state_data()
if data_handler_obj is not None:
self._weights = weights
self._state_data.weights = weights
self._set_residuals()
def execute(self, mu_l=None, mu_r=None, tau=None,
exit_criterion_epsilon=1e-3,
max_iteration=10, is_degradation_calculated=True,
max_degradation=None, min_degradation=None,
non_neg_constraints=False, verbose=True, bootstraps=None):
mu_l, mu_r, tau = self._obtain_hyper_parameters(mu_l, mu_r, tau)
l_cs_value, r_cs_value, beta_value = self._obtain_initial_values()
weights = self._obtain_weights(verbose=verbose)
component_r0 = self._obtain_initial_component_r0(verbose=verbose)
self.__left_first = True
self._minimize_objective(l_cs_value, r_cs_value, beta_value,
component_r0, weights, mu_l=mu_l, mu_r=mu_r, tau=tau,
exit_criterion_epsilon=exit_criterion_epsilon,
max_iteration=max_iteration,
is_degradation_calculated=is_degradation_calculated,
max_degradation=max_degradation, min_degradation=min_degradation,
non_neg_constraints=non_neg_constraints, verbose=verbose,
bootstraps=bootstraps)
self._keep_supporting_parameters_as_properties(weights)
self._store_final_state_data(weights)
def calculate_objective_with_result(self, sum_components=True):
return self._calculate_objective(self._state_data.mu_l,
self._state_data.mu_r, self._state_data.tau, self._l_cs_value,
self._r_cs_value, self._beta_value, self._weights,
sum_components=sum_components)
@property
def measured_power_matrix(self):
return self._power_signals_d
@property
def estimated_power_matrix(self):
left = self._l_cs_value
right = self._r_cs_value
mat = left.dot(right)
return mat
@property
def estimated_clear_sky(self):
return self._l_cs_value @ self._r_cs_value
@property
def deg_rate(self):
return self._beta_value.item()
@property
def left_matrix(self):
return self._l_cs_value
@property
def right_matrix(self):
return self._r_cs_value
@property
def left_problem(self):
return self._l_problem
@property
def right_problem(self):
return self._r_problem
@property
def l_cs_value(self):
return self._l_cs_value
@property
def r_cs_value(self):
return self._r_cs_value
@property
def beta_value(self):
return self._beta_value
@property
def weights(self):
return self._weights
@property
def residuals_median(self):
return self._residuals_median
@property
def residuals_variance(self):
return self._residuals_variance
@property
def residual_l0_norm(self):
return self._residual_l0_norm
@property
def fixed_time_stamps(self):
return self._fixed_time_stamps
@property
def test_days(self):
return self._test_days
@property
def state_data(self):
return self._state_data
@property
def bootstrap_samples(self):
return self._bootstrap_samples
# Alias method for l_cs_value accessor (with property decorator):
def left_low_rank_matrix(self):
return self.l_cs_value
# Alias method for r_cs_value accessor (with property decorator):
def right_low_rank_matrix(self):
return self.r_cs_value
# Alias method for beta_value accessor (with property decorator):
def degradation_rate(self):
return self.beta_value
def clear_sky_signals(self):
return self._l_cs_value.dot(self._r_cs_value)
def _minimize_objective(self, l_cs_value, r_cs_value, beta_value,
component_r0, weights,
mu_l=None, mu_r=None, tau=None,
exit_criterion_epsilon=1e-3, max_iteration=100,
is_degradation_calculated=True,
max_degradation=None, min_degradation=None,
non_neg_constraints=True, verbose=True,
bootstraps=None):
left_matrix_minimization = self._get_left_matrix_minimization(
weights, tau, mu_l, non_neg_constraints=non_neg_constraints)
right_matrix_minimization = self._get_right_matrix_minimization(
weights, tau, mu_r, non_neg_constraints=non_neg_constraints,
is_degradation_calculated=is_degradation_calculated,
max_degradation=max_degradation,
min_degradation=min_degradation)
self._l_problem = left_matrix_minimization
self._r_problem = right_matrix_minimization
ti = time()
objective_values = self._calculate_objective(mu_l, mu_r, tau,
l_cs_value, r_cs_value, beta_value, weights,
sum_components=False)
if verbose:
print('----------------------\nSCSF Problem Setup\n----------------------')
msg1 = 'Matrix Size: {} x {} = {} power measurements'.format(
self._power_signals_d.shape[0],
self._power_signals_d.shape[1],
self._power_signals_d.size
)
print(msg1)
reduced_mat = self._power_signals_d[:, self._weights > 0]
try:
real_meas = reduced_mat > 0.005 * self._capacity
except:
real_meas = reduced_mat > 0.005 * np.nanquantile(self._power_signals_d, 0.95)
msg = 'Sparsity: {:.2f}%'.format(
100 * (1 - np.sum(real_meas) / self._power_signals_d.size)
)
print(msg)
msg = '{} non-zero measurements under clear conditions'.format(
np.sum(real_meas)
)
print(msg)
msg2 = 'Model size: {} x {} + {} x {} = {} parameters'.format(
l_cs_value.shape[0],
l_cs_value.shape[1],
r_cs_value.shape[0],
r_cs_value.shape[1],
np.sum([
l_cs_value.shape[0] * l_cs_value.shape[1],
r_cs_value.shape[0] * r_cs_value.shape[1]
])
)
print(msg2)
print('\n')
print('----------------------\nAlgorithm Iterations\n----------------------')
ps = 'Starting at Objective: {:.3e}, f1: {:.3e}, f2: {:.3e},'
ps += ' f3: {:.3e}, f4: {:.3e}'
print(ps.format(
np.sum(objective_values), objective_values[0],
objective_values[1], objective_values[2],
objective_values[3]
))
improvement = np.inf
old_objective_value = np.sum(objective_values)
iteration = 0
f1_last = objective_values[0]
tol_schedule = [] #np.logspace(-4, -8, 6)
while improvement >= exit_criterion_epsilon:
try:
tol = tol_schedule[iteration]
except IndexError:
tol = 1e-8
self._store_minimization_state_data(mu_l, mu_r, tau,
l_cs_value, r_cs_value, beta_value, component_r0)
try:
if self.__left_first:
if verbose:
print(' Minimizing left matrix')
l_cs_value, r_cs_value, beta_value\
= left_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value, component_r0, tol=tol)
if verbose:
print(' Minimizing right matrix')
l_cs_value, r_cs_value, beta_value\
= right_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value, component_r0, tol=tol)
else:
if verbose:
print(' Minimizing right matrix')
l_cs_value, r_cs_value, beta_value\
= right_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value, component_r0, tol=tol)
if verbose:
print(' Minimizing left matrix')
l_cs_value, r_cs_value, beta_value\
= left_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value, component_r0, tol=tol)
except cvx.SolverError:
if self.__left_first:
if verbose:
print('Solver failed! Starting over and reversing minimization order.')
self.__left_first = False
iteration = 0
l_cs_value = self._decomposition.matrix_l0
r_cs_value = self._decomposition.matrix_r0
component_r0 = self._obtain_initial_component_r0(
verbose=verbose)
continue
else:
if verbose:
print('Solver failing again! Exiting...')
self._state_data.is_solver_error = True
break
except ProblemStatusError as e:
if verbose:
print(e)
if self.__left_first:
if verbose:
print('Starting over and reversing minimization order.')
self.__left_first = False
iteration = 0
l_cs_value = self._decomposition.matrix_l0
r_cs_value = self._decomposition.matrix_r0
component_r0 = self._obtain_initial_component_r0(
verbose=verbose)
continue
else:
if verbose:
print('Exiting...')
self._state_data.is_problem_status_error = True
break
component_r0 = r_cs_value[0, :]
objective_values = self._calculate_objective(mu_l, mu_r, tau,
l_cs_value, r_cs_value, beta_value, weights,
sum_components=False)
new_objective_value = np.sum(objective_values)
improvement = ((old_objective_value - new_objective_value)
* 1. / old_objective_value)
old_objective_value = new_objective_value
iteration += 1
if verbose:
ps = '{} - Objective: {:.3e}, f1: {:.3e}, f2: {:.3e},'
ps += ' f3: {:.3e}, f4: {:.3e}'
print(ps.format(
iteration, new_objective_value, objective_values[0],
objective_values[1], objective_values[2],
objective_values[3]
))
if objective_values[0] > f1_last:
self._state_data.f1_increase = True
if verbose:
print('Caution: residuals increased')
if improvement < 0:
if verbose:
print('Caution: objective increased.')
self._state_data.obj_increase = True
improvement *= -1
if objective_values[3] > 1e2:
if self.__left_first:
if verbose:
print('Bad trajectory detected. Starting over and reversing minimization order.')
self.__left_first = False
iteration = 0
l_cs_value = self._decomposition.matrix_l0
r_cs_value = self._decomposition.matrix_r0
component_r0 = self._obtain_initial_component_r0(verbose=verbose)
else:
if verbose:
print('Algorithm Failed!')
improvement = 0
if iteration >= max_iteration:
if verbose:
print('Reached iteration limit. Previous improvement: {:.2f}%'.format(improvement * 100))
improvement = 0.
self._store_minimization_state_data(mu_l, mu_r, tau,
l_cs_value, r_cs_value, beta_value, component_r0)
# except cvx.SolverError:
# if self.__left_first:
# if verbose:
# print('solver failed! Starting over and reversing minimization order.')
#
# self._state_data.is_solver_error = True
# except ProblemStatusError as e:
# if verbose:
# print(e)
# self._state_data.is_problem_status_error = True
tf = time()
if verbose:
print('Minimization complete in {:.2f} minutes'.format(
(tf - ti) / 60.))
self._analyze_residuals(l_cs_value, r_cs_value, weights)
self._keep_result_variables_as_properties(l_cs_value, r_cs_value,
beta_value)
if bootstraps is not None:
if verbose:
print('Running bootstrap analysis...')
ti = time()
self._bootstrap_samples = defaultdict(dict)
for ix in range(bootstraps):
# resample the days with non-zero weights only
bootstrap_weights = resample_index(length=np.sum(weights > 1e-1))
new_weights = np.zeros_like(weights)
new_weights[weights > 1e-1] = bootstrap_weights
new_weights = np.multiply(weights, new_weights)
left_matrix_minimization.update_weights(new_weights)
right_matrix_minimization.update_weights(new_weights)
l_cs_value = self._l_cs_value
r_cs_value = self._r_cs_value
beta_value = self._beta_value
# ti = time()
objective_values = self._calculate_objective(mu_l, mu_r, tau,
l_cs_value,
r_cs_value,
beta_value,
new_weights,
sum_components=False)
if verbose:
progress(ix, bootstraps, status=' {:.2f} minutes'.format(
(time() - ti) / 60
))
# ps = 'Bootstrap Sample {}\n'.format(ix)
# ps += 'Starting at Objective: {:.3e}, f1: {:.3e}, f2: {:.3e},'
# ps += ' f3: {:.3e}, f4: {:.3e}'
# print(ps.format(
# np.sum(objective_values), objective_values[0],
# objective_values[1], objective_values[2],
# objective_values[3]
# ))
improvement = np.inf
old_objective_value = np.sum(objective_values)
iteration = 0
f1_last = objective_values[0]
tol_schedule = [] # np.logspace(-4, -8, 6)
while improvement >= exit_criterion_epsilon:
try:
tol = tol_schedule[iteration]
except IndexError:
tol = 1e-8
# self._store_minimization_state_data(mu_l, mu_r, tau,
# l_cs_value, r_cs_value,
# beta_value,
# component_r0)
try:
if self.__left_first:
# if verbose:
# print(' Minimizing left matrix')
l_cs_value, r_cs_value, beta_value \
= left_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value,
component_r0, tol=tol)
# if verbose:
# print(' Minimizing right matrix')
l_cs_value, r_cs_value, beta_value \
= right_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value,
component_r0, tol=tol)
else:
# if verbose:
# print(' Minimizing right matrix')
l_cs_value, r_cs_value, beta_value \
= right_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value,
component_r0, tol=tol)
# if verbose:
# print(' Minimizing left matrix')
l_cs_value, r_cs_value, beta_value \
= left_matrix_minimization.minimize(
l_cs_value, r_cs_value, beta_value,
component_r0, tol=tol)
except cvx.SolverError:
if self.__left_first:
if verbose:
print(
'Solver failed! Starting over and reversing minimization order.')
self.__left_first = False
iteration = 0
l_cs_value = self._decomposition.matrix_l0
r_cs_value = self._decomposition.matrix_r0
component_r0 = self._obtain_initial_component_r0(
verbose=verbose)
continue
else:
if verbose:
print('Solver failing again! Exiting...')
self._state_data.is_solver_error = True
break
except ProblemStatusError as e:
if verbose:
print(e)
if self.__left_first:
if verbose:
print(
'Starting over and reversing minimization order.')
self.__left_first = False
iteration = 0
l_cs_value = self._decomposition.matrix_l0
r_cs_value = self._decomposition.matrix_r0
component_r0 = self._obtain_initial_component_r0(
verbose=verbose)
continue
else:
if verbose:
print('Exiting...')
self._state_data.is_problem_status_error = True
break
component_r0 = r_cs_value[0, :]
objective_values = self._calculate_objective(mu_l, mu_r,
tau,
l_cs_value,
r_cs_value,
beta_value,
new_weights,
sum_components=False)
new_objective_value = np.sum(objective_values)
improvement = ((old_objective_value - new_objective_value)
* 1. / old_objective_value)
old_objective_value = new_objective_value
iteration += 1
# if verbose:
# ps = '{} - Objective: {:.3e}, f1: {:.3e}, f2: {:.3e},'
# ps += ' f3: {:.3e}, f4: {:.3e}'
# print(ps.format(
# iteration, new_objective_value,
# objective_values[0],
# objective_values[1], objective_values[2],
# objective_values[3]
# ))
if objective_values[0] > f1_last:
self._state_data.f1_increase = True
if verbose:
print('Caution: residuals increased')
if improvement < 0:
if verbose:
print('Caution: objective increased.')
self._state_data.obj_increase = True
improvement *= -1
if objective_values[3] > 1e2:
if self.__left_first:
if verbose:
print(
'Bad trajectory detected. Starting over and reversing minimization order.')
self.__left_first = False
iteration = 0
l_cs_value = self._decomposition.matrix_l0
r_cs_value = self._decomposition.matrix_r0
component_r0 = self._obtain_initial_component_r0(
verbose=verbose)
else:
if verbose:
print('Algorithm Failed!')
improvement = 0
if iteration >= max_iteration:
if verbose:
print(
'Reached iteration limit. Previous improvement: {:.2f}%'.format(
improvement * 100))
improvement = 0.
# tf = time()
# if verbose:
# print('Bootstrap {} complete in {:.2f} minutes'.format(
# ix, (tf - ti) / 60.))
self._bootstrap_samples[ix]['L'] = l_cs_value
self._bootstrap_samples[ix]['R'] = r_cs_value
self._bootstrap_samples[ix]['beta'] = beta_value
if verbose:
progress(bootstraps, bootstraps, status=' {:.2f} minutes'.format(
(time() - ti) / 60
))
def _calculate_objective(self, mu_l, mu_r, tau, l_cs_value, r_cs_value,
beta_value, weights, sum_components=True):
weights_w1 = np.diag(weights)
# Note: Not using cvx.sum and cvx.abs as in following caused
# an error at * weights_w1:
# ValueError: operands could not be broadcast together with shapes
# (288,1300) (1300,1300)
# term_f1 = sum((0.5 * abs(
# self._power_signals_d - l_cs_value.dot(r_cs_value))
# + (tau - 0.5)
# * (self._power_signals_d - l_cs_value.dot(r_cs_value)))
# * weights_w1)
term_f1 = (cvx.sum((0.5 * cvx.abs(
self._power_signals_d - l_cs_value.dot(r_cs_value))
+ (tau - 0.5) * (self._power_signals_d - l_cs_value.dot(
r_cs_value))) @ weights_w1) ).value
weights_w2 = np.eye(self._rank_k)
term_f2 = mu_l * norm((l_cs_value[:-2, :] - 2 * l_cs_value[1:-1, :] +
l_cs_value[2:, :]).dot(weights_w2), 'fro')
term_f3 = mu_r * norm(r_cs_value[:, :-2] - 2 * r_cs_value[:, 1:-1] +
r_cs_value[:, 2:], 'fro')
if r_cs_value.shape[1] < 365 + 2:
term_f4 = 0
else:
# Note: it was cvx.norm. Check if this modification makes a
# difference:
# term_f4 = (mu_r * norm(
# r_cs_value[1:, :-365] - r_cs_value[1:, 365:], 'fro'))
term_f4 = ((mu_r * cvx.norm(
r_cs_value[1:, :-365] - r_cs_value[1:, 365:], 'fro'))).value
components = [term_f1, term_f2, term_f3, term_f4]
objective = sum(components)
if sum_components:
return objective
else:
return components
def _obtain_hyper_parameters(self, mu_l, mu_r, tau):
if mu_l is None and self._state_data.mu_l is not None:
mu_l = self._state_data.mu_l
if mu_r is None and self._state_data.mu_r is not None:
mu_r = self._state_data.mu_r
if tau is None and self._state_data.tau is not None:
tau = self._state_data.tau
return mu_l, mu_r, tau
def _obtain_initial_values(self):
if self._state_data.l_value.size > 0:
l_cs_value = self._state_data.l_value
else:
l_cs_value = self._decomposition.matrix_l0
if self._state_data.r_value.size > 0:
r_cs_value = self._state_data.r_value
else:
r_cs_value = self._decomposition.matrix_r0
if self._state_data.beta_value != 0.0:
beta_value = self._state_data.beta_value
else:
beta_value = 0.0
self._keep_result_variables_as_properties(l_cs_value, r_cs_value,
beta_value)
return l_cs_value, r_cs_value, beta_value
def _obtain_initial_component_r0(self, verbose=True):
if verbose:
# print('obtaining initial value of component r0')
pass
if self._state_data.component_r0.size > 0:
# component_r0 = self._state_data.component_r0
component_r0 = np.ones(self._decomposition.matrix_r0.shape[1])
else:
# component_r0 = self._get_linearization_helper().obtain_component_r0(
# self._decomposition.matrix_r0, index_set=self.weights > 1e-3)
component_r0 = np.ones(self._decomposition.matrix_r0.shape[1])
return component_r0
def _obtain_weights(self, verbose=True):
if verbose:
# print('obtaining weights')
pass
if self._state_data.weights.size > 0:
weights = self._state_data.weights
else:
weights = self._get_weight_setting().obtain_weights(
self._power_signals_d)
if self._test_days is not None:
weights[self._test_days] = 0
self._weights = weights
return weights
def _set_testdays(self, power_signals_d, reserve_test_data):
if reserve_test_data:
m, n = power_signals_d.shape
day_indices = np.arange(n)
num = int(n * reserve_test_data)
self._test_days = np.sort(np.random.choice(day_indices, num,
replace=False))
else:
self._test_days = None
def _set_residuals(self):
if self._state_data.residuals_median is not None:
self._residuals_median = self._state_data.residuals_median
else:
self._residuals_median = None
if self._state_data.residuals_variance is not None:
self._residuals_variance = self._state_data.residuals_variance
else:
self._residuals_variance = None
if self._state_data.residual_l0_norm is not None:
self._residual_l0_norm = self._state_data.residual_l0_norm
else:
self._residual_l0_norm = None
def _analyze_residuals(self, l_cs_value, r_cs_value, weights):
# Residual analysis
weights_w1 = np.diag(weights)
wres = np.dot(l_cs_value.dot(
r_cs_value) - self._power_signals_d, weights_w1)
use_days = np.logical_not(np.isclose(np.sum(wres, axis=0), 0))
scaled_wres = wres[:, use_days] / np.average(
self._power_signals_d[:, use_days])
final_metric = scaled_wres[
self._power_signals_d[:, use_days] > 1e-3]
self._residuals_median = np.median(final_metric)
self._residuals_variance = np.power(np.std(final_metric), 2)
self._residual_l0_norm = np.linalg.norm(
self._matrix_l0[:, 0] - l_cs_value[:, 0])
def _get_linearization_helper(self):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
if ((not hasattr(self, '_linearization_helper')) or
(self._linearization_helper is None)):
return LinearizationHelper(solver_type=self._solver_type)
else: # This must be mock object inject from test
return self._linearization_helper
def set_linearization_helper(self, value):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
self._linearization_helper = value
def _get_weight_setting(self):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
if ((not hasattr(self, '_weight_setting')) or
(self._weight_setting is None)):
return WeightSetting(solver_type=self._solver_type)
else: # This must be mock object inject from test
return self._weight_setting
def set_weight_setting(self, value):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
self._weight_setting = value
def _get_left_matrix_minimization(self, weights, tau, mu_l,
non_neg_constraints=True):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
if ((not hasattr(self, '_left_matrix_minimization')) or
(self._left_matrix_minimization is None)):
return LeftMatrixMinimization(
self._power_signals_d, self._rank_k, weights, tau, mu_l,
non_neg_constraints=non_neg_constraints, solver_type=self._solver_type)
else: # This must be mock object inject from test
return self._left_matrix_minimization
def set_left_matrix_minimization(self, value):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
self._left_matrix_minimization = value
def _get_right_matrix_minimization(self, weights, tau, mu_r,
non_neg_constraints=True, is_degradation_calculated=True,
max_degradation=None, min_degradation=None):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
if ((not hasattr(self, '_right_matrix_minimization')) or
(self._right_matrix_minimization is None)):
return RightMatrixMinimization(
self._power_signals_d, self._rank_k, weights, tau, mu_r,
non_neg_constraints=non_neg_constraints,
is_degradation_calculated=is_degradation_calculated,
max_degradation=max_degradation,
min_degradation=min_degradation,
solver_type=self._solver_type)
else: # This must be mock object inject from test
return self._right_matrix_minimization
def set_right_matrix_minimization(self, value):
"""
For dependency injection for testing, i.e. for injecting mock.
"""
self._right_matrix_minimization = value
def _keep_result_variables_as_properties(self, l_cs_value, r_cs_value,
beta_value):
self._l_cs_value = l_cs_value
self._r_cs_value = r_cs_value
self._beta_value = beta_value
def _keep_supporting_parameters_as_properties(self, weights):
self._weights = weights
def _store_initial_state_data(self):
self._state_data.power_signals_d = self._power_signals_d
self._state_data.rank_k = self._rank_k
self._state_data.matrix_l0 = self._matrix_l0
self._state_data.matrix_r0 = self._matrix_r0
self._state_data.mu_l = 5e2
self._state_data.mu_r = 1e3
self._state_data.tau = 0.85
def _store_minimization_state_data(self, mu_l, mu_r, tau,
l_cs_value, r_cs_value, beta_value, component_r0):
self._state_data.mu_l = mu_l
self._state_data.mu_r = mu_r
self._state_data.tau = tau
self._state_data.l_value = l_cs_value
self._state_data.r_value = r_cs_value
self._state_data.beta_value = beta_value
self._state_data.component_r0 = component_r0
def _store_final_state_data(self, weights):
self._state_data.residuals_median = self._residuals_median
self._state_data.residuals_variance = self._residuals_variance
self._state_data.residual_l0_norm = self._residual_l0_norm
self._state_data.weights = weights
| 36,042 | 42.530193 | 111 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/exception.py
|
"""
Defines exceptions used in the context of this module "algorithm"
"""
class ProblemStatusError(Exception):
"""Error thrown when SCSF algorithm experiences something other than
an 'optimal' problem status during one of
the solve steps."""
| 255 | 27.444444 | 72 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/minimization/right_matrix_no_constraints.py
|
"""
This module defines functionality unique to right matrix minimization.
"""
import cvxpy as cvx
from statistical_clear_sky.algorithm.minimization.abstract\
import AbstractMinimization
from statistical_clear_sky.algorithm.exception import ProblemStatusError
class RightMatrixModifiedMinimization(AbstractMinimization):
"""
Uses minimization method in parent class with fixed Left Matrix value,
keeping Right matrix as a variable.
"""
def __init__(self, power_signals_d, rank_k, weights, tau, mu_r,
is_degradation_calculated=True,
max_degradation=0., min_degradation=-0.25,
non_neg_constraints=True, solver_type='ECOS'):
super().__init__(power_signals_d, rank_k, weights, tau,
non_neg_constraints=non_neg_constraints, solver_type=solver_type)
self._mu_r = mu_r
self._is_degradation_calculated = is_degradation_calculated
self._max_degradation = max_degradation
self._min_degradation = min_degradation
def _define_variables_and_parameters(self, l_cs_value, r_cs_value, beta_value, component_r0):
self.left_matrix = cvx.Parameter(shape=(self._power_signals_d.shape[0],
self._rank_k))
self.left_matrix.value = l_cs_value
self.right_matrix = cvx.Variable(shape=(self._rank_k,
self._power_signals_d.shape[1]))
self.right_matrix.value = r_cs_value
self.beta = cvx.Variable()
self.beta.value = beta_value
self.r0 = cvx.Parameter(len(component_r0))
self.r0.value = 1. / component_r0
return
def _update_parameters(self, l_cs_value, r_cs_value, beta_value, component_r0):
self.left_matrix.value = l_cs_value
self.beta.value = beta_value
self.r0.value = 1. / component_r0
def _term_f2(self, l_cs_param, r_cs_param):
'''
Apply smoothness constraint to all rows of right matrix
'''
r_tilde = self._obtain_r_tilde(r_cs_param)
term_f2 = self._mu_r * cvx.norm(r_tilde[:, :-2] - 2
* r_tilde[:, 1:-1] + r_tilde[:, 2:], 'fro')
return term_f2
def _term_f3(self, l_cs_param, r_cs_param, beta_param, component_r0):
'''
Apply periodicity penalty to all rows of right matrix except the
first one
'''
r_tilde = self._obtain_r_tilde(r_cs_param)
if self._power_signals_d.shape[1] > 365:
term_f3 = self._mu_r * cvx.norm(r_tilde[1:, :-365]
- r_tilde[1:, 365:], 'fro')
else:
term_f3 = self._mu_r * cvx.norm(r_tilde[:, :-365]
- r_tilde[:, 365:], 'fro')
if self._power_signals_d.shape[1] > 365:
r = r_cs_param[0, :].T
if self._is_degradation_calculated:
term_f3 += cvx.norm1(
cvx.multiply(component_r0[:-365], r[365:] - r[:-365])
- beta_param
)
else:
term_f3 += cvx.norm1(
cvx.multiply(component_r0[:-365], r[365:] - r[:-365])
)
return term_f3
def _constraints(self, l_cs_param, r_cs_param, beta_param, component_r0):
constraints = []
# if self._power_signals_d.shape[1] > 365:
# r = r_cs_param[0, :].T
# if self._is_degradation_calculated:
# constraints.extend([
# cvx.multiply(component_r0[:-365], r[365:] - r[:-365]) == beta_param
# ])
# if self._max_degradation is not None:
# constraints.append(
# beta_param <= self._max_degradation)
# if self._min_degradation is not None:
# constraints.append(
# beta_param >= self._min_degradation)
# else:
# constraints.append(cvx.multiply(component_r0[:-365],
# r[365:] - r[:-365]) == 0)
if self._non_neg_constraints:
constraints.extend([
l_cs_param @ r_cs_param >= 0,
r_cs_param[0] >= 0
])
return constraints
def _handle_exception(self, problem):
if problem.status != 'optimal':
raise ProblemStatusError('Minimize R status: ' + problem.status)
def _obtain_r_tilde(self, r_cs_param):
'''
This function handles the smoothness and periodicity constraints when
the data set is less than a year long. It operates by filling out the
rest of the year with blank variables, which are subsequently dropped
after the problem is solved.
:param r_cs_param: the right matrix CVX variable
:return: A cvx variable with second dimension at least 367
'''
if r_cs_param.shape[1] < 365 + 2:
n_tilde = 365 + 2 - r_cs_param.shape[1]
r_tilde = cvx.hstack([r_cs_param,
cvx.Variable(shape=(self._rank_k, n_tilde))])
else:
r_tilde = r_cs_param
return r_tilde
| 5,241 | 40.936 | 97 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/minimization/right_matrix.py
|
"""
This module defines functionality unique to right matrix minimization.
"""
import cvxpy as cvx
from statistical_clear_sky.algorithm.minimization.abstract\
import AbstractMinimization
from statistical_clear_sky.algorithm.exception import ProblemStatusError
class RightMatrixMinimization(AbstractMinimization):
"""
Uses minimization method in parent class with fixed Left Matrix value,
keeping Right matrix as a variable.
"""
def __init__(self, power_signals_d, rank_k, weights, tau, mu_r,
is_degradation_calculated=True,
max_degradation=0., min_degradation=-0.25,
non_neg_constraints=True, solver_type='ECOS'):
super().__init__(power_signals_d, rank_k, weights, tau,
non_neg_constraints=non_neg_constraints, solver_type=solver_type)
self._mu_r = mu_r
self._is_degradation_calculated = is_degradation_calculated
self._max_degradation = max_degradation
self._min_degradation = min_degradation
def _define_variables_and_parameters(self, l_cs_value, r_cs_value, beta_value, component_r0):
self.left_matrix = cvx.Parameter(shape=(self._power_signals_d.shape[0],
self._rank_k))
self.left_matrix.value = l_cs_value
self.right_matrix = cvx.Variable(shape=(self._rank_k,
self._power_signals_d.shape[1]))
self.right_matrix.value = r_cs_value
self.beta = cvx.Variable()
self.beta.value = beta_value
self.r0 = cvx.Parameter(len(component_r0))
self.r0.value = 1. / component_r0
return
def _update_parameters(self, l_cs_value, r_cs_value, beta_value, component_r0):
self.left_matrix.value = l_cs_value
self.beta.value = beta_value
self.r0.value = 1. / component_r0
def _term_f2(self, l_cs_param, r_cs_param):
'''
Apply smoothness constraint to all rows of right matrix
'''
r_tilde = self._obtain_r_tilde(r_cs_param)
term_f2 = self._mu_r * cvx.norm(r_tilde[:, :-2] - 2
* r_tilde[:, 1:-1] + r_tilde[:, 2:], 'fro')
return term_f2
def _term_f3(self, l_cs_param, r_cs_param):
'''
Apply periodicity penalty to all rows of right matrix except the
first one
'''
r_tilde = self._obtain_r_tilde(r_cs_param)
if self._power_signals_d.shape[1] > 365:
term_f3 = self._mu_r * cvx.norm(r_tilde[1:, :-365]
- r_tilde[1:, 365:], 'fro')
else:
term_f3 = self._mu_r * cvx.norm(r_tilde[:, :-365]
- r_tilde[:, 365:], 'fro')
return term_f3
def _constraints(self, l_cs_param, r_cs_param, beta_param, component_r0):
constraints = []
if self._power_signals_d.shape[1] > 365:
r = r_cs_param[0, :].T
if self._is_degradation_calculated:
constraints.extend([
cvx.multiply(component_r0[:-365], r[365:] - r[:-365]) == beta_param
])
if self._max_degradation is not None:
constraints.append(
beta_param <= self._max_degradation)
if self._min_degradation is not None:
constraints.append(
beta_param >= self._min_degradation)
else:
constraints.append(cvx.multiply(component_r0[:-365],
r[365:] - r[:-365]) == 0)
if self._non_neg_constraints:
constraints.extend([
l_cs_param @ r_cs_param >= 0,
r_cs_param[0] >= 0
])
return constraints
def _handle_exception(self, problem):
if problem.status != 'optimal':
raise ProblemStatusError('Minimize R status: ' + problem.status)
def _obtain_r_tilde(self, r_cs_param):
'''
This function handles the smoothness and periodicity constraints when
the data set is less than a year long. It operates by filling out the
rest of the year with blank variables, which are subsequently dropped
after the problem is solved.
:param r_cs_param: the right matrix CVX variable
:return: A cvx variable with second dimension at least 367
'''
if r_cs_param.shape[1] < 365 + 2:
n_tilde = 365 + 2 - r_cs_param.shape[1]
r_tilde = cvx.hstack([r_cs_param,
cvx.Variable(shape=(self._rank_k, n_tilde))])
else:
r_tilde = r_cs_param
return r_tilde
| 4,734 | 40.535088 | 97 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/minimization/abstract.py
|
"""
This module defines common functionality of minimization problem solution
process.
Since there is common code for minimization of both L matrix and R matrix,
the common code is placed in the abstract base class.
"""
from abc import abstractmethod
import cvxpy as cvx
import numpy as np
class AbstractMinimization():
"""
Abstract class for minimization that uses the same equation but
the subclasses fix either L (left) matrix value or R (right) matrix
value.
"""
def __init__(self, power_signals_d, rank_k, weights, tau,
non_neg_constraints=True, solver_type='ECOS'):
self._power_signals_d = power_signals_d
self._rank_k = rank_k
self._weights = cvx.Parameter(shape=len(weights), value=weights,
nonneg=True)
self._tau = tau
self._non_neg_constraints = non_neg_constraints
self._solver_type = solver_type
self._problem = None
self.left_matrix = None
self.right_matrix = None
self.beta = None
self.r0 = None
def minimize(self, l_cs_value, r_cs_value, beta_value, component_r0, tol=1e-8):
if self._problem is None:
self._construct_problem(l_cs_value, r_cs_value, beta_value, component_r0)
else:
self._update_parameters(l_cs_value, r_cs_value, beta_value, component_r0)
self._problem.solve(solver=self._solver_type)
# self._problem.solve(solver='MOSEK', mosek_params={
# 'MSK_DPAR_INTPNT_CO_TOL_PFEAS': tol,
# 'MSK_DPAR_INTPNT_CO_TOL_DFEAS': tol,
# 'MSK_DPAR_INTPNT_CO_TOL_REL_GAP': tol,
# 'MSK_DPAR_INTPNT_CO_TOL_INFEAS': tol
# })
self._handle_exception(self._problem)
return self._result()
@abstractmethod
def _define_variables_and_parameters(self):
pass
def update_weights(self, weights):
self._weights.value = weights
def _construct_problem(self, l_cs_value, r_cs_value, beta_value, component_r0):
self._define_variables_and_parameters(l_cs_value, r_cs_value, beta_value, component_r0)
objective = cvx.Minimize(self._term_f1(self.left_matrix, self.right_matrix)
+ self._term_f2(self.left_matrix, self.right_matrix)
+ self._term_f3(self.left_matrix, self.right_matrix))
constraints = self._constraints(self.left_matrix, self.right_matrix, self.beta,
self.r0)
problem = cvx.Problem(objective, constraints)
self._problem = problem
@abstractmethod
def _update_parameters(self):
pass
def _term_f1(self, l_cs_param, r_cs_param):
"""
This method defines the generic from of the first term of objective
function, which calculates a quantile regression cost function,
element-wise, between the PV power matrix (`self._power_signals_d`)
and the low-rank model (`l_cs_param * r_cs_param`).
Subclass defines which of l_cs and r_cs value is fixed.
"""
weights_w1 = cvx.diag(self._weights)
return cvx.sum((0.5 * cvx.abs(self._power_signals_d
- l_cs_param @ r_cs_param)
+ (self._tau - 0.5) * (self._power_signals_d
- l_cs_param @ r_cs_param))
@ weights_w1)
@abstractmethod
def _term_f2(self, l_cs_param, r_cs_param):
pass
@abstractmethod
def _term_f3(self, l_cs_param, r_cs_param):
pass
@abstractmethod
def _constraints(self, l_cs_param, r_cs_param, beta_param, component_r0):
pass
@abstractmethod
def _handle_exception(self, problem):
pass
def _result(self):
return self.left_matrix.value, self.right_matrix.value, self.beta.value
| 3,873 | 36.25 | 95 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/minimization/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/minimization/left_matrix.py
|
"""
This module defines functionality unique to left matrix minimization.
"""
import cvxpy as cvx
import numpy as np
from statistical_clear_sky.algorithm.minimization.abstract\
import AbstractMinimization
from statistical_clear_sky.algorithm.exception import ProblemStatusError
class LeftMatrixMinimization(AbstractMinimization):
"""
Uses minimization method in parent class with fixed Right matrix value,
keeping Left matrix as a variable.
"""
def __init__(self, power_signals_d, rank_k, weights, tau, mu_l,
non_neg_constraints=True, solver_type='ECOS'):
super().__init__(power_signals_d, rank_k, weights, tau,
non_neg_constraints=non_neg_constraints, solver_type=solver_type)
self._mu_l = mu_l
def _define_variables_and_parameters(self, l_cs_value, r_cs_value, beta_value, component_r0):
self.left_matrix = cvx.Variable(shape=(self._power_signals_d.shape[0],
self._rank_k))
self.left_matrix.value = l_cs_value
self.right_matrix = cvx.Parameter(shape=(self._rank_k,
self._power_signals_d.shape[1]))
self.right_matrix.value = r_cs_value
self.beta = cvx.Variable()
self.beta.value = beta_value
self.r0 = cvx.Parameter(len(component_r0))
self.r0.value = 1. / component_r0
return
def _update_parameters(self, l_cs_value, r_cs_value, beta_value, component_r0):
self.right_matrix.value = r_cs_value
self.beta.value = beta_value
self.r0.value = 1. / component_r0
def _term_f2(self, l_cs_param, r_cs_param):
weights_w2 = np.eye(self._rank_k)
term_f2 = self._mu_l * cvx.norm((l_cs_param[:-2, :] - 2
* l_cs_param[1:-1, :] + l_cs_param[2:, :]) @ weights_w2, 'fro')
return term_f2
def _term_f3(self, l_cs_param, r_cs_param):
return 0
def _constraints(self, l_cs_param, r_cs_param, beta_param, component_r0):
constraints = [cvx.sum(l_cs_param[:, 1:], axis=0) == 0]
ixs = self._handle_bad_night_data()
if sum(ixs) > 0:
constraints.append(l_cs_param[ixs, :] == 0)
if self._non_neg_constraints:
constraints.extend([
l_cs_param @ r_cs_param >= 0,
l_cs_param[:, 0] >= 0
])
return constraints
def _handle_exception(self, problem):
if problem.status != 'optimal':
raise ProblemStatusError('Minimize L status: ' + problem.status)
def _handle_bad_night_data(self):
'''
Method for generating the "nighttime" index set
This method finds the (approximate) set of time stamps that correspond with
nighttime across all seasons in the given data.
Old method looked for timestamps with an average power across all days that is
smaller than 0.5% of the max power value in the data set.
New method still looks for timestamps with values below 0.5% of the max, but
then converts this to a sparsity by row, and returns the rows with a sparsity
of greater than 96%. This approach is more robust than the old method because
it is not sensitive to the magnitude of any spurious nighttime data values.
:return:
'''
data = self._power_signals_d
row_sparsity = 1 - np.sum(data > 0.005 * np.max(data), axis = 1) / data.shape[1]
threshold = 0.96
#ix_array = np.average(self._power_signals_d, axis=1) / np.max(
# np.average(self._power_signals_d, axis=1)) <= 0.005
ix_array = row_sparsity >= threshold
return ix_array
| 3,700 | 40.58427 | 97 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/initialization/singular_value_decomposition.py
|
"""
This module defines the class for Singular Value Decomposition related
operations.
"""
import numpy as np
class SingularValueDecomposition:
"""
Class to perform various calculations based on Sigular Value Decomposition.
"""
def decompose(self, power_signals_d, rank_k=4):
"""
Arguments
---------
power_signals_d : numpy array
Representing a matrix with row for dates and column for time of day,
containing input power signals.
Keyword arguments
-----------------
rank_k : integer
Rank of the resulting low rank matrices.
"""
(left_singular_vectors_u, singular_values_sigma,
right_singular_vectors_v) = np.linalg.svd(power_signals_d)
left_singular_vectors_u, right_singular_vectors_v = \
self._adjust_singular_vectors(left_singular_vectors_u,
right_singular_vectors_v)
self._left_singular_vectors_u = left_singular_vectors_u
self._singular_values_sigma = singular_values_sigma
self._right_singular_vectors_v = right_singular_vectors_v
self._matrix_l0 = self._left_singular_vectors_u[:, :rank_k]
self._matrix_r0 = np.diag(self._singular_values_sigma[:rank_k]).dot(
right_singular_vectors_v[:rank_k, :])
def _adjust_singular_vectors(self, left_singular_vectors_u,
right_singular_vectors_v):
if np.sum(left_singular_vectors_u[:, 0]) < 0:
left_singular_vectors_u[:, 0] *= -1
right_singular_vectors_v[0] *= -1
return left_singular_vectors_u, right_singular_vectors_v
@property
def left_singular_vectors_u(self):
return self._left_singular_vectors_u
@property
def singular_values_sigma(self):
return self._singular_values_sigma
@property
def right_singular_vectors_v(self):
return self._right_singular_vectors_v
@property
def matrix_l0(self):
return self._matrix_l0
@property
def matrix_r0(self):
return self._matrix_r0
| 2,137 | 30.910448 | 80 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/initialization/linearization_helper.py
|
"""
This module defines helper class for the purpose of linearization.
(Named as a helper instead of util since it doesn't directly do liniearization.)
"""
import numpy as np
import cvxpy as cvx
class LinearizationHelper(object):
"""
Delegate class to take care of obtaining a value used to make make a
constraint to be linear, in order to make the optimization problem to
be convex optimization problem.
"""
def __init__(self, solver_type='ECOS'):
"""
Keyword arguments
-----------------
solver_type : SolverType Enum
Type of solver.
See statistical_clear_sky.solver_type.SolverType for valid solvers.
"""
self._solver_type = solver_type
def obtain_component_r0(self, initial_r_cs_value, index_set=None):
"""
Obtains the initial r0 values that are used in place of variables
denominator of degradation equation.
Removed duplicated code from the original implementation.
Arguments
-----------------
initial_r_cs_value : numpy array
Initial low dimension right matrix.
Returns
-------
numpy array
The values that is used in order to make the constraint of
degradation to be linear.
"""
component_r0 = initial_r_cs_value[0]
if index_set is None:
index_set = component_r0 > 1e-3 * np.percentile(component_r0, 95)
x = cvx.Variable(initial_r_cs_value.shape[1])
objective = cvx.Minimize(
cvx.sum(0.5 * cvx.abs(component_r0[index_set] - x[index_set]) + (.9 - 0.5) *
(component_r0[index_set] - x[index_set])) + 1e3 * cvx.norm(cvx.diff(x, k=2)))
if initial_r_cs_value.shape[1] > 365:
constraints = [cvx.abs(x[365:] - x[:-365]) <= 1e-2 * np.percentile(component_r0, 95)]
else:
constraints = []
problem = cvx.Problem(objective, constraints)
problem.solve(solver=self._solver_type)
result_component_r0 = x.value
return result_component_r0
| 2,108 | 34.15 | 97 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/initialization/weight_setting.py
|
"""
This module defines a class for Weight Setting Algorithm.
"""
import numpy as np
class WeightSetting(object):
"""
Delegate class.
Weight Setting Algorithm:
Two metrics are calculated and normalized to the interval [0, 1],
and then the geometric mean is taken.
Metric 1: daily smoothness
Metric 2: seasonally weighted daily energy
After calculating the geometric mean of these two values, weights below
"""
def __init__(self, solver_type='ECOS'):
self._solver_type = solver_type
def obtain_weights(self, power_signals_d):
try:
from solardatatools.clear_day_detection import find_clear_days
except ImportError:
print('Weights not set!')
print('Please make sure you have solar-data-tools installed')
weights = np.ones(power_signals_d.shape[1])
else:
weights = find_clear_days(power_signals_d, boolean_out=False)
return weights
| 976 | 30.516129 | 75 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/initialization/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/plot/plot_mixin.py
|
"""
This module defines Mixin for plot for IterativeClearSky.
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from solardatatools import plot_2d
class PlotMixin(object):
def plot_lr(self, figsize=(14, 10), show_days=False):
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=figsize)
ax[0, 1].plot(self._r_cs_value[0])
ax[1, 1].plot(self._r_cs_value[1:].T)
ax[0, 0].plot(self._l_cs_value[:, 0])
ax[1, 0].plot(self._l_cs_value[:, 1:])
ax[0, 0].legend(['$\\ell_1$'])
ax[1, 0].legend(['$\\ell_{}$'.format(ix) for ix in range(2,
self._r_cs_value.shape[0] + 1)])
ax[0, 1].legend(['$r_{1}$'])
ax[1, 1].legend(['$r_{}$'.format(ix) for ix in range(2,
self._r_cs_value.shape[0] + 1)])
if show_days:
use_day = self._obtain_weights_for_plotting() > 1e-1
days = np.arange(self._power_signals_d.shape[1])
ax[0, 1].scatter(days[use_day], self._r_cs_value[0][use_day],
color='orange', alpha=0.7)
plt.tight_layout()
return fig
def plot_energy(self, figsize=(12, 6), show_days=True, show_clear=True,
scale_power=False):
if scale_power:
c = 1./ 1000
else:
c = 1.
plt.figure(figsize=figsize)
plt.plot(np.sum(self._power_signals_d, axis=0) * 24 * c / self._power_signals_d.shape[0],
linewidth=1, alpha=0.7, label='measured daily energy')
if show_clear:
plt.plot((self._r_cs_value[0] * np.sum(self._l_cs_value[:, 0])) *
24 * c / self._power_signals_d.shape[0], linewidth=1,
label='estimated clear sky model')
if show_days:
use_day = self._obtain_weights_for_plotting() > 1e-1
days = np.arange(self._power_signals_d.shape[1])
plt.scatter(days[use_day], np.sum(self._power_signals_d,
axis=0)[use_day] * 24 * c / self._power_signals_d.shape[0],
color='orange', alpha=0.7, label='detected clear days')
plt.legend()
plt.title('Daily Energy Signal')
fig = plt.gcf()
return fig
def plot_singular_vectors(self, k=4, figsize=(10, 4), show_days=False):
fig, ax = plt.subplots(nrows=k, ncols=2, figsize=(figsize[0], 2*figsize[1]))
for i in range(k):
ax[i][0].plot(self._matrix_l0.T[i], linewidth=1)
ax[i][0].set_xlim(0, self._power_signals_d.shape[0])
ax[i][0].set_ylabel('$\\ell_{}$'.format(i + 1))
ax[i][1].plot(self._matrix_r0[i], linewidth=1)
ax[i][1].set_xlim(0, self._power_signals_d.shape[1])
ax[i][1].set_ylabel('$r_{}$'.format(i + 1))
ax[-1][0].set_xlabel('$i \\in 1, \\ldots, m$ (5-minute periods in one day)')
ax[-1][1].set_xlabel('$j \\in 1, \\ldots, n$ (days)')
if show_days:
use_day = self._obtain_weights_for_plotting() > 1e-1
days = np.arange(self._power_signals_d.shape[1])
for i in range(k):
ax[i, 1].scatter(days[use_day], self._matrix_r0[i][use_day], color='orange', alpha=0.7)
plt.tight_layout()
return fig
def plot_data_matrix(self, figsize=(12, 6), show_days=False, units='kW'):
if show_days:
use_day = self._obtain_weights_for_plotting() > 1e-1
else:
use_day = None
fig = plot_2d(self._power_signals_d, figsize=figsize, units=units,
clear_days=use_day)
return fig
def plot_measured_clear_matrices(self, figsize=(10, 10), show_days=False,
units='kW'):
with sns.axes_style("white"):
fig, ax = plt.subplots(nrows=2, figsize=figsize, sharex=True)
if show_days:
use_day = self._obtain_weights_for_plotting() > 1e-1
else:
use_day = None
plot_2d(self._power_signals_d, ax=ax[0], clear_days=use_day,
units=units)
plot_2d(self.clear_sky_signals(), ax=ax[1], clear_days=use_day,
units=units)
ax[0].set_xlabel('')
ax[1].set_title('Estimated clear sky power')
# ax[0].set_title('Measured power')
# ax[1].set_title('Estimated clear sky power')
# ax[1].set_xlabel('Day number')
# ax[0].set_yticks([])
# ax[0].set_ylabel('(sunset) Time of day (sunrise)')
# ax[1].set_yticks([])
# ax[1].set_ylabel('(sunset) Time of day (sunrise)')
# if show_days:
# xlim = ax[0].get_xlim()
# ylim = ax[0].get_ylim()
# use_day = self._obtain_weights_for_plotting() > 1e-1
# days = np.arange(self._power_signals_d.shape[1])
# y1 = np.ones_like(days[use_day]) * self._power_signals_d.shape[0] * .99
# ax[0].scatter(days[use_day], y1, marker='|', color='yellow', s=2)
# ax[0].scatter(days[use_day], .995 * y1, marker='|', color='yellow', s=2)
# ax[0].set_xlim(*xlim)
# ax[0].set_ylim(*ylim)
plt.tight_layout()
return fig
def plot_time_series(self, start_day=0, num_days=2, figsize=(8, 4), loc=(.35, .7)):
d1 = start_day
d2 = d1 + num_days
actual = self._power_signals_d[:, d1:d2].ravel(order='F')
clearsky = ((self.clear_sky_signals()))[:, d1:d2].ravel(order='F')
fig, ax = plt.subplots(nrows=1, figsize=figsize)
ax.plot(actual, linewidth=1, label='measured power')
ax.plot(clearsky, linewidth=1, color='red', label='clear sky signal')
plt.legend(loc=loc)
n = self._power_signals_d.shape[0]
ax.set_xlim(0, n * (d2 - d1))
ax.set_ylabel('kW')
ax.set_xticks(np.arange(0, n * num_days, 4 * 12))
ax.set_xticklabels(np.tile(np.arange(0, 24, 4), num_days))
ax.set_xlabel('Hour of Day')
fig = ax.get_figure()
return fig
def plot_time_series_with_weights(self, fig_title=None, start_day=0, num_days=5,
figsize=(16, 8)):
n = self._power_signals_d.shape[0]
d1 = start_day
d2 = d1 + num_days
actual = self._power_signals_d[:, d1:d2].ravel(order='F')
clearsky = ((self.clear_sky_signals()))[:, d1:d2].ravel(order='F')
fig, ax = plt.subplots(num=fig_title, nrows=2, figsize=figsize, sharex=True,
gridspec_kw={'height_ratios': [3, 1]})
xs = np.linspace(d1, d2, len(actual))
ax[0].plot(xs, actual, alpha=0.4, label='measured power')
ax[0].plot(xs, clearsky, linewidth=1, label='clear sky estimate')
ax[1].plot(xs, np.repeat(self._obtain_weights_for_plotting()[d1:d2],
n), linewidth=1, label='day weight')
ax[0].legend()
ax[1].legend()
# ax[0].set_ylim(0, np.max(actual) * 1.3)
ax[1].set_xlim(d1, d2)
ax[1].set_ylim(0, 1)
ax[1].set_xlabel('day number')
ax[0].set_ylabel('power')
plt.tight_layout()
return fig
def _obtain_weights_for_plotting(self):
'''
Workaround not to perform long-running weight setting optimization code
in constructor.
'''
if (not hasattr(self, '_weights')) or (self._weights is None):
self._weights = self._obtain_weights()
return self._weights
| 7,545 | 43.916667 | 103 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.