repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FPI
|
FPI-master/models/__init__.py
| 0 | 0 | 0 |
py
|
|
WD-selection-function
|
WD-selection-function-main/notebooks/config.py
|
font = {
'family': 'monospace',
'color': 'black',
'weight': 'bold',
'size': 20,
}
cmap_r = 'inferno_r'
cmap = 'inferno'
style = {
'font.family': 'serif',
'font.size': 22,
"axes.titlesize": "medium",
"axes.labelsize": "medium",
'axes.edgecolor': '#000000',
"xtick.direction": "out",
"ytick.direction": "out",
#"xtick.major.size": "8",
#"xtick.minor.size": "4",
#"ytick.major.size": "8",
#"ytick.minor.size": "4",
'xtick.labelsize': 'medium',
'ytick.labelsize': 'medium',
'ytick.color': '#000000',
'xtick.color': '#000000',
'xtick.top': False,
'ytick.right': False,
'axes.spines.top': True,
'axes.spines.right': True,
'axes.linewidth': 1.5,
'image.aspect': 'auto',
'figure.dpi':80
}
| 788 | 21.542857 | 32 |
py
|
RefactorF4Acc
|
RefactorF4Acc-master/Parser/Combinators.py
|
#package Parser::Combinators
import types
import re
#
# (c) 2017 Wim Vanderbauwhede
#
VERSION = '0.06'
__all__ =[
'debug',
'apply',
# 'show',
'sequence',
'commaSep',
'choice',
'tryParse',
'maybe',
'regex',
'parens',
'brackets',
'angleBrackets',
'braces',
'char',
'sepBy',
'sepByChar',
'oneOf',
'word',
'identifier',
'natural',
'number',
'symbol',
'greedyUpto',
'upto',
'many',
'many1',
'whiteSpace',
'comma',
'dot',
'semi',
'matches',
'unwrap',
'empty',
'run',
'getParseTree'
]
# I want to write the parser using lists, because sequencing is the most common operation.
# So I need a function to generate the actual parser from the lists
# which is actually a sequence of parsers
# The first arg is a list ref, the second arg is an optional code ref to process the returned list of matches
V=False
def apply( p, str_, *rest ):
V=rest
if V:
print('APPLY parser',p,str_)
if type(p) is types.FunctionType:
if V:
print('FUNCTION ',p,str_)
# note: for barewords we need p()(str_) but this is not consistent I think.
return p(str_)
elif type(p) is list:
if V:
print('LIST ',p,str_)
return sequence(p)(str_)
elif type(p) is dict:
if V:
print( 'DICT ',p,str_ )
if p:
for (k,pp) in p.items():
# ( k, pp ) = p.popitem()
( status, str2, mms ) = apply(pp, str_)
matches = { k : mms }
break
else:
status=0
matches=None
str2=str_
return ( status, str2, matches )
else: # ( ref(p) eq 'REF' ):
if V:
print('ELSE ',p,str_)
print( type(p) )
return p(str_)
# else:
# die show(p)
def foldl( f, acc, ls ):
for elt in ls:
acc = f( acc, elt )
return acc
def sequence( plst):
def gen( str_ ):
def f( acc, p ):
( st1, str1, matches) = acc
if st1 != 0:
# meaning the previous match succeeded or the remainder is empty
( st2, str2, ms ) = apply( p, str1 )
if ( st2 * st1 == 0 ):
return ( 0, str1, [] )
else:
# #say "SEQ matches: " . Dumper(:matches, ms ) if V
if ms is None:
return ( 1, str2, matches )
else:
return ( 1, str2, matches+[ ms ] )
else:
# Can be either match failed or remainder is empty
if st1 == 0:
# Match failed
# #say "SEQ did not match, returning " . Dumper(acc) if V
return acc
else:
# Remainder is empty
return ( 0, str1, matches )
( status, str2, matches ) = foldl( f, ( 1, str_, [] ), plst )
# #say "SEQ AFTER FOLD has matches: "
# . Dumper(matches)
# . ' and status '
# . status
# if V
if ( status == 0 ):
# #say "seq returns empty match []" if V
return ( 0, str_, [] )
# elif proc is not None:
# if type(proc[0]) is types.FunctionType:
## #say "seq returns parsers called on " . Dumper(matches) if V
# return ( 1, str2, proc[0](matches) )
# else:
## #say 'TROUBLE: <', show(plst), '><', show(proc), '>' if V
# return ( 1, str_, matches )
else:
# #say "seq returns " . Dumper(matches) if V
return ( 1, str2, matches )
return gen
# Choice: try every parser in the list until one succeeds or return fail. '<|>' in Parsec
def choice( *parsers ):
def gen( str_ ) :
for p in parsers:
( status, str_, matches ) = apply( p, str_ )
# print( status )
if status == 1:
if V:
print( "choice: remainder => <"+str_+">")
print( "choice: matches => [")
print( matches)
print( "]")
return ( status, str_, matches )
if V:
print( "NONE of the choices matched" )
return ( 0, str_, None ) # was []
return gen
# Normally, when a parser parses a string, it removes the portion that matched. If you want to keep the string, wrap the parser in try()
def tryParse( p ):
def gen( str_ ):
# #say "* try( 'str' )" if V
( status, rest, matches ) = p(str_)
if (status):
# #say "try: remainder => <rest>" if V
# #say "try: matches => [" . show(matches) . "]" if V
return ( 1, rest, matches )
else:
# #say "try: match failed => <str>" if V
return ( 0, str_, matches )
return gen
# maybe() is like try() but always succeeds
# it returns the matches and the consumed str_ing or the orig str_ing and no matches
def maybe( p ):
def gen( str_ ):
# #say "* maybe('str_')" if V
( status, rest, matches ) = apply( p, str_ )
if (status):
# #say "maybe matches: [" . show(matches) . "]" if V
return ( 1, rest, matches )
else:
# #say "maybe: no matches for <str_>" if V
return ( 1, str_, None )
return gen
def parens_( p ):
def gen( str_ ):
# #say "* parens(str_)" if V
matches = None
( status, str_3, ch ) = char('(')(str_)
if status == 1 :
# OK, found a '(', now try and parse the rest
str_4 = str_3.lstrip()
# str_4 =~ s/\s*//
( st, str_4s, matches ) = apply( p, str_4 )
# #say "parens: remainder => <str_4s>" if V
# #say "parens: matches => [" . show(matches) . "]" if V
status *= st
if status == 1 :
( st, str_5, ch ) = char(')')(str_4s)
status *= st
if status == 1: # OK!
str_6 = str_5.lstrip()
# #say "parens: remainder => <str_5>" if V
# #say "parens: matches => " . show(matches) . "" if V
return ( 1, str_6, matches )
else: # parse failed on closing paren
# #say "parse failed on closing paren str_5" if V
return ( 0, str_5, matches )
else: # parse failed on ref
# WV20160428 this was str_4, whick broke nested parens
#say "parse failed on ref str_ str_4" if V
return ( 0, str_, matches )
else: # parse failed on opening paren
return ( 0, str_3, None )
return gen
def enclosedBy( open_char,close_char, p ):
def gen( str_ ):
# #say "* parens(str_)" if V
matches = None
( status, str_3, ch ) = char( open_char )(str_)
if status == 1 :
# OK, found a '(', now try and parse the rest
str_4 = str_3.lstrip()
# str_4 =~ s/\s*//
( st, str_4s, matches ) = apply( p, str_4 )
# #say "parens: remainder => <str_4s>" if V
# #say "parens: matches => [" . show(matches) . "]" if V
status *= st
if status == 1 :
( st, str_5, ch ) = char( close_char )(str_4s)
status *= st
if status == 1: # OK!
str_6 = str_5.lstrip()
# #say "parens: remainder => <str_5>" if V
# #say "parens: matches => " . show(matches) . "" if V
return ( 1, str_6, matches )
else: # parse failed on closing paren
# #say "parse failed on closing paren str_5" if V
return ( 0, str_5, matches )
else: # parse failed on ref
# WV20160428 this was str_4, whick broke nested parens
#say "parse failed on ref str_ str_4" if V
return ( 0, str_, matches )
else: # parse failed on opening paren
return ( 0, str_3, None )
return gen
def parens( p ):
return enclosedBy('(',')',p)
def brackets( p ):
return enclosedBy('[',']',p)
def braces( p ):
return enclosedBy('{','}',p)
def angleBrackets( p ):
return enclosedBy('<','>',p)
def char ( ch ):
def gen( str_ ):
#say "* char('ch', 'str_')" if V
if str_[ 0 : 1 ] == ch :
#say "char: matched \'ch\' " if V
#say "char: remainder <" . substr_( str_, 1 ) . ">" if V
return ( 1, str_[ 1 : ], ch )
else:
return ( 0, str_, None )
return gen
def sepBy ( sep, p ):
def gen (str_ ):
matches = []
( status, str_1, m ) = apply(p, str_)
if status == 1:
matches+=[ m] #.push( m )
( status, str_2, m ) = sep(str_1)
while status == 1:
str_2s = str_2.lstrip()
( st, str_3, m ) = p(str_2s)
matches+=[ m] #.push(m)
( status, str_2, m ) = sep(str_3)
return ( 1, str_2, matches )
else: # first match failed.
return ( 0, str_1, None )
return gen
def sepByChar ( sep, p ):
def gen( str_) :
matches = []
( status, str_1, m ) = apply( p, str_ )
if status:
matches+=[ m ]#.push( m )
( status, str_2, m ) = char(sep)(str_1)
while status:
str_2s = str_2.lstrip()
( st, str_3, m ) = apply( p, str_2s )
matches+=[ m ]#.push( m )
( status, str_2, m ) = char(sep)(str_3)
return ( 1, str_2, matches )
else: # first match failed.
return ( 0, str_1, None )
return gen
# This is a lexeme parser, so it skips trailing whitespace
# Should be called "identifier" I think
def word_(*dummy):
def gen ( str_ ):
#say "* word( 'str' )" if V
patt = re.compile('^([A-Za-z_]\w*)')
matches = patt.match(str_)
if matches is not None :
patt2=re.compile('^'+matches.group(0)+'\s*')
str_ = patt2.sub('',str_,count=1)
#say "word: remainder => <str>" if V
#say "word: matches => [matches]" if V
return ( 1, str_, matches.group(0) )
else:
#say "word: match failed => <str>" if V
return ( 0, str_, None )
# assumes status is 0|1, str is string, matches is [string]
return gen
def debug_(*dummy):
def gen ( str_ ):
print( "* debug( '"+str_+"' )" )
if str_ == 'debug' :
patt2=re.compile('^debug\s*')
str_ = patt2.sub('',str_,count=1)
print( "debug: remainder => <"+str_+">")
print( "debug: matches => [ 'debug' ]", ( 1, str_, ['debug'] ))
return ( 1, str_, ['debug'] )
else:
print( "debug: match failed => <"+str_+">",( 0, str_, None )
)
return ( 0, str_, None )
# assumes status is 0|1, str is string, matches is [string]
return gen
debug = debug_()
def identifier(*dummy):
def gen ( str_ ):
#say "* word( 'str' )" if V
patt = re.compile('^([a-z_]\w*)')
matches = patt.match(str_)
if matches is not None :
patt2=re.compile('^'+matches.group(0)+'\s*')
str_ = patt2.sub('',str_,count=1)
#say "word: remainder => <str>" if V
#say "word: matches => [matches]" if V
return ( 1, str_, matches.group(0) )
else:
#say "word: match failed => <str>" if V
return ( 0, str_, None )
# assumes status is 0|1, str is string, matches is [string]
return gen
# matches an unsigned integer
def natural_(*dummy):
def gen( str_) :
#say "* natural( 'str_' )" if V
patt = re.compile('^(\d+)')
matches = patt.match(str_)
if matches is not None :
patt2=re.compile('^'+matches.group(0)+'\s*')
str_ = patt2.sub('',str_,count=1)
#say "word: remainder => <str>" if V
#say "word: matches => [matches]" if V
return ( 1, str_, matches.group(0) )
else:
#say "word: match failed => <str>" if V
return ( 0, str_, None )
# assumes status is 0|1, str is string, matches is [string]
return gen
# matches any number
def number():
def gen( str_) :
if V:
print( "* number( '"+str_+"' )\n" )
status = 0
patt = re.compile('^([\-\+]?\d*(?:\.\d*)?(?:[eE][\-\+]?\d*(?:\.\d*)?)?)(\W|[^\.]|)')
matches = patt.match(str_)
if matches is not None :
m = matches.group(1)
check = matches.group(2)
matches = m
if check == '' :
# It means no wrong characters after the number, defined as \w or \.
status = 1
patt2=re.compile('^'+m+'\s*')
str_ = patt2.sub('', str_,count=1)
# otherwise it was not a number
if V:
print( "number: remainder => <str_>\n")
print( "number: matches => [matches]\n" )
return ( status, str_, matches )
else:
if V:
print( "number: match failed => <str_>\n" )
return ( status, str_, undef )
# assumes status is 0|1, str_ is str_ing, matches is [str_ing]
return gen
# As in Parsec, parses a literal and removes trailing whitespace
def symbol ( lit_str_ ):
# patt1=re.compile('(\W)')
# m1 = patt1.match(lit_str_);
# lit_str_ = patt1.sub(m1.group(0),lit_str_)
def gen( str_) :
#say "* symbol( 'lit_str_', 'str_' )" if V
status = 0
patt2 = re.compile('\s*'+lit_str_+'\s*')
m2 = patt2.match(str_)
if m2 is not None:
m = m2.group(0)
matches = lit_str_
status = 1
#str_ =~ s/^\s*lit_str_\s*//
patt3 = re.compile('^\s*'+lit_str_+'\s*')
str_ = patt3.sub('',str_)
#say "symbol: remainder => <str_>" if V
#say "symbol: matches => [matches]" if V
return ( status, str_, matches )
else:
#say "symbol: match failed => <str_>" if V
return ( status, str_, None )
return gen
# This parser parses anything up to the last occurence of a given literal and trailing whitespace
def greedyUpto ( lit_str_ ):
patt1=re.compile('(\W)')
m1 = patt1.match(lit_str);
lit_str_ = patt1.sub(m1.group(0),lit_str_)
#lit_str_ =~ s/(\W)/\\1/g
def gen( str_) :
#say "* greedyUpto( \'lit_str_\', \'str_\' )" if V
#if ( str_ =~ /^(.*)\s*lit_str_\s*/ ):
patt2 = re.compile('^(.*)\s*'+lit_str_+'\s*')
m2 = patt2.match(str_)
if m2 is not None:
matches = m2.group(0).strip()
#m =~ s/\s*//
#matches = m is '' ? undef : m
#str_ =~ s/^.*lit_str_\s*//
#say "greedyUpto: remainder => <str_>" if V
#say "greedyUpto: matches => [matches]" if V
return ( 1, str_, matches )
else:
#say "greedyUpto: match failed => <str_>" if V
return ( 0, str_, None )
return gen
# This parser parses anything up to the last occurence of a given literal and trailing whitespace
def upto ( lit_str_ ):
patt1=re.compile('(\W)')
m1 = patt1.match(lit_str);
lit_str_ = patt1.sub(m1.group(0),lit_str_)
#lit_str_ =~ s/(\W)/\\1/g
def gen( str_) :
#say "* greedyUpto( \'lit_str_\', \'str_\' )" if V
#if ( str_ =~ /^(.*)\s*lit_str_\s*/ ):
patt2 = re.compile('^(.*?)\s*'+lit_str_+'\s*')
m2 = patt2.match(str_)
if m2 is not None:
matches = m2.group(0).strip()
#m =~ s/\s*//
#matches = m is '' ? undef : m
#str_ =~ s/^.*lit_str_\s*//
#say "greedyUpto: remainder => <str_>" if V
#say "greedyUpto: matches => [matches]" if V
return ( 1, str_, matches )
else:
#say "greedyUpto: match failed => <str_>" if V
return ( 0, str_, None )
return gen
# Enough rope: this parser will parse whatever the regex is, str_ipping trailing whitespace
def regex ( regex_str_ ):
def gen( str_) :
patt = re.compile(regex_str)
#regex_str_=~s/\*/\\\*/g
matches = patt.match(str_)
if matches is not None:
str_ = patt.sub('',str_,count=0)
matches = m
return ( 1, str_, matches )
return ( 0, str_, matches )
return gen
# `many`, as in Parsec, parses 0 or more the specified parsers
def many ( parser ):
def gen( str_) :
# print "* many( 'str_' )\n" if V
( status, str_, m ) = parser(str_)
if status:
matches = [m]
while status == 1:
( status, str_, m ) = parser(str_)
if m is not None:
matches+=[ m ]#.push( m )
# print "many: remainder => <str_>\n" if V
# print "many: matches => [" . show(matches) . "]\n" if V
return ( 1, str_, matches )
else: # first match failed.
# print "many: first match failed => <str_>\n" if V
return ( 1, str_, None )
return gen
# `many1`, as in Parsec, parses 1 or more the specified parsers
def many1 ( parser ):
def gen( str_) :
matches = []
#say "* many( 'str_' )" if V
( status, str_, m ) = parser(str_)
if status == 1:
matches=matches+[ m ]
while status == 1:
( status, str_, m ) = parser(str_)
if m is not None:
matches= matches+[ m ] #.push( m )
#say "many: remainder => <str_>" if V
#say "many: matches => [" . show(matches) . "]" if V
# else: # first match failed.
#say "many: first match failed => <str_>" if V
# return ( 0, str_, None )
return ( 1, str_, matches )
return gen
def punctuation( ch_ ):
def gen( str_) :
#say "* comma( 'str_' )" if V
patt =re.compile('^\s*\\'+ch_+'\s*')
m = patt.match(str_)
str_ = patt.sub('',str_,count=1)
st = 0
if m is not None:
st = 1
#say "comma: match" if V
#else:
#say "comma: match failed" if V
return ( st, str_, None )
return gen
def dot_():
return punctuation('.')
dot = dot_()
# Matches a comma with optional whitespace
def comma_():
def gen( str_) :
#say "* comma( 'str_' )" if V
patt =re.compile('^\s*,\s*')
m = patt.match(str_)
str_ = patt.sub('',str_,count=1)
st = 0
if m is not None:
st = 1
#say "comma: match" if V
#else:
#say "comma: match failed" if V
return ( st, str_, None )
return gen
comma=comma_()
def commaSep( p ):
return sepBy(comma, p)
# Matches a semicolon with optional whitespace
def semi_():
def gen( str_) :
#say "* comma( 'str_' )" if V
patt =re.compile('^\s*;\s*')
m = patt.match(str_)
str_ = patt.sub('',str_,count=1)
st = 0
if m is not None:
st = 1
#say "comma: match" if V
#else:
#say "comma: match failed" if V
return ( st, str_, None )
return gen
semi = semi_()
# strip leading whitespace, always success
def whiteSpace_():
def gen ( str_ ):
#say "* whiteSpace( \'str_\' )" if V
patt = re.compile('^(\s*)')
m = patt.match( str_ ).group(0)
str_1 = str_.lstrip()
return ( 1, str_1, m )
return gen
whiteSpace = whiteSpace_()
def oneOf ( patt_lst ):
def gen( str_) :
#say "* oneOf([" . join( '|',:patt_lst ) . "],'str_')" if V
for p in patt_lst:
( status, str_, matches ) = symbol(p)(str_)
if status:
#say "choice: remainder => <str_>" if V
#say "choice: matches => [" . show(matches) . "]" if V
return ( 1, str_, matches )
return ( 0, str_, undef )
return gen
# Enough rope: this parser will parse whatever the regex is, str_ipping trailing whitespace
def regex ( regex_str_ ):
def gen( str_) :
#regex_str_=~s/\*/\\\*/g
#say "* regex( '/regex_str_/', 'str_' )" if V
patt = re.compile( regex_str_ )
matches = patt.match( str_ )
if matches is not None:
patt2 = re.compile('('+regex_str_+')\s*' )
str_ = patt2.sub('',str_,count=1)
return ( 1, str_, matches.group(0) )
#else:
#say "regex: match failed => <str_>" if V
return ( 0, str_, matches )
# assumes status is 0|1, str_ is str_ing, matches is [str_ing]
return gen
def matches(x):
return x
def unwrap ( elt_in_array ):
elt = elt_in_array.shift()
return elt
def empty( elt_in_array ):
if len(elt_in_array ) > 0:
return False
else:
return True
word = word_()
natural = natural_()
def remove_undefined_values( hlist ):
nhlist=[]
if type(hlist) is list:
for elt in hlist:
if elt is not None:
nelt = remove_undefined_values(elt)
nhlist+=[ nelt ]
hlist = nhlist
elif type(hlist) is dict:
for k in hlist.keys():
hlist[k] = remove_undefined_values( hlist[k] )
if hlist[k] is None:
hlist.delete(k)
return hlist
def get_tree_as_lists ( list_ ):
hlist = []
for elt in list_:
if ( type(elt) is list and len(elt) > 0 ): # non-empty list
telt = get_tree_as_lists(elt)
if ( type(telt) is dict or len(telt) > 0 ):
hlist+=[ telt ]
else:
hlist+=[ telt ]
elif ( type(elt) is dict ):
# hash: need to process the rhs of the pair
for k in elt.keys():
v = elt[k]
if ( type(v) is not list ):
# not an array => wrap in array and redo
hlist+=[ { k : v } ]
elif ( len(v) == 1 and type( v[0] ) is list ):
# a single-elt array where the elt also an array
tv = get_tree_as_lists(v)
hlist+=[ { k : tv } ]
else:
pv = []
for v_ in v:
if ( type(v_) is list and len(v_) > 0 ):
pv+=[ get_tree_as_lists(v_) ]
elif ( type(v_) is dict ):
pv+=[ get_tree_as_lists( [v_] ) ]
elif ( v_ is not None ):
pv+=[ v_ ]
hlist+=[ { k : pv } ]
if len(hlist)==1:
return hlist[0]
else:
return hlist
def add_to_map ( hmap, k, rv ):
if ( k in hmap ):
if ( type( hmap[k] ) is not list ):
hmap[k] = [ hmap[k] ]
if ( type(rv) is list ):
hmap[k]+= rv
else:
hmap[k]+=[ rv ]
else:
hmap[k] = rv
return hmap
# list to map
# This is a map with a list of matches for every tag, so if the tags are not unique the matches are grouped.
def l2m ( hlist, hmap ):
if ( type(hlist) is list ):
all_scalars=1
for elt in hlist:
if type(elt) is not list and type(elt) is not dict:
all_scalars=0
break
if all_scalars:
return hlist
else:
for elt in hlist:
if ( type(elt) is dict ):
if len( elt.keys() == 1 ):
( k, v ) = elt.popitem()
if ( type(v) is list ):
mv = l2m( v, {} )
hmap=add_to_map( hmap, k, mv )
else:
hmap=add_to_map( hmap, k, v )
else:
exit( 'BOOM!' )
elif ( type(elt) is list ):
mv = l2m( elt, {} )
for k in mv.keys():
hmap=add_to_map( hmap, k, mv[k] )
else:
return elt
elif type(hlist) is dict :
for k in hlist.keys():
hmap=add_to_map( hmap, k, hlist[k] )
else:
return hlist
return hmap
def getParseTree ( m ):
mm = remove_undefined_values(m)
tal = get_tree_as_lists(m)
map_ = l2m( tal, {})
return map_
# return m
def run ( p, str_ ):
( st, rest, m ) = apply(p, str_ )
# return m
return getParseTree(m)
"""
"parser11 = choice( [ natural(), word() ] )
res11 = parser11("42 choices")
print( res11 )
parser1 = many1( choice( [ natural(), word() ] ) )
res1 = parser1("42 choices")
print( res1 )
parser = sequence( [word(),word(), parens( many1( choice( [natural(),word()] ) ) ), symbol('.') ] )
res = parser("a test (with 2 parens) . ")
print(res)
parser3 = sepBy( comma(), natural())
res3 = parser3("11, 22, 33 ")
print(res3)
snip = '<a href="http://www.python.org">Learn Python</a>'
snip2 = 'href="http://www.python.org 42'
href_attr_parser = sequence( [symbol('href'),symbol('='), symbol('"http://'), sepBy(dot(), word() ) , natural() ])#, symbol('"') ] )
tag_parser = sequence( [
angleBrackets(
sequence( [symbol('a'),symbol('href') ,symbol('='),
symbol('"http://'),
sepByChar('.', word() ),
symbol('"')
] )
),
many1( word() ),
angleBrackets( sequence( [symbol('/a') ] ) )
] )
res_tag = href_attr_parser(snip2)
print( res_tag )
snip4 = 'www.python.org ;'
parser4 = sequence( [sepBy(dot(), word() ), semi() ])#, symbol('"') ] )
res4 = parser4( snip4 )
print( res4 )
"""
| 26,587 | 31.824691 | 136 |
py
|
RefactorF4Acc
|
RefactorF4Acc-master/Parser/test_Combinators.py
|
from Combinators import *
str51 = 'debug'
str52 = 'debug) '
print('*** create parser ***')
dim_parser2 = {'Dim' : debug } # parens( word ) }
print('*** apply parser ***')
res51 =apply( dim_parser2,str51)
print( '** parser after application: ',dim_parser2)
print('str51: ',res51)
print('*** apply parser again ***')
res52 = apply( dim_parser2,str52)
print('str52: ',res52)
#exit(0)
test1 = 'integer'
parser1 = word
res1 = apply( word, test1 )
print(res1)
snip4 = 'www.python.org ; 42'
parser4 = [ { 'URL' : sepBy(dot, word ) }, semi, natural ]
res4 = run(parser4, snip4 )
print( res4 )
print()
parser4c = [ sepByChar('.', word ), semi, natural ]
res4c = apply( parser4c, snip4 )
print( res4c )
snip2 = 'href="http://www.python.org"'
href_attr_parser = sequence( [
symbol('href'),
symbol('='),
symbol('"http://'),
sepBy(dot, word ) , symbol('"')
] )
res_tag = href_attr_parser(snip2)
print( res_tag )
snip = '< a href="http://www.python.org" >Learn Python</a>'
tag_parser = [
angleBrackets(
[ symbol('a') ,href_attr_parser ]
),
many( word ),
angleBrackets( symbol('/a') )
]
href_tag = apply(tag_parser,snip)
print( href_tag )
parser11 = many1( choice( {"Num" : natural} , {"Str":word} ) )
res11 = run( parser11, "42 choices" )
print( res11 )
str2 = ' integer(8) :: var '
p = sequence( [
whiteSpace,
word,
parens( natural ),
symbol('::'),
word
] )
res = p(str2)
print(res)
print( '------------------------------------------------------------------------------' )
type_parser = [
{'Type' : word},
maybe( parens( choice(
{'Kind' : natural},
[
symbol('kind'),
symbol('='),
{'Kind' : natural}
]
)))
]
dim_parser = [
symbol('dimension'),
{'Dim' : parens( sepByChar(',', regex('[^,\)]+'))) }
]
intent_parser = [
symbol('intent'),
{ 'Intent' : parens( word ) }
]
arglist_parser = [
symbol('::'),
{'Args' : sepByChar(',', word) }
]
F95_arg_decl_parser = [
whiteSpace,
{'TypeTup' : type_parser},
maybe(
[
comma,
dim_parser
],
),
maybe(
[
comma,
intent_parser
],
),
symbol('::'),
{'Vars' : commaSep(word)}
]
# where
str1 = ' integer(kind=8), dimension(0:ip, -1:jp+1, kp) , intent( In ) :: u, v,w'
str4 = 'integer(kind=8), dimension(0:ip, -1:jp+1, kp) , intent( In ) :: u, v,w'
str4='integer'
str5 = 'dimension(0:ip, -1:jp+1, kp) '#, intent( In ) :: u, v,w'
str6 = ' intent( In ) '#:: u, v,w'
str7 = ':: u, v,w'
(st,rest, matches) = apply(F95_arg_decl_parser,str1)
print('str1:')
print(matches)
print( '------------------------------------------------------------------------------' )
print( '------------------------------------------------------------------------------' )
print(getParseTree( matches ))
print( '------------------------------------------------------------------------------' )
res4 = apply(word,str4) #type_parser(str4)
print('str4: ',res4)
res4 = apply(word,str4) #type_parser(str4)
print('str4: ',res4)
res6 = apply( intent_parser,str6)
print('str6: ',res6)
[st,rest, matches] = apply(arglist_parser,str7)
print('str7: ',matches)
str2 = ' real, dimension(0:7) :: f '
( st, rest, matches2) = apply(F95_arg_decl_parser,str2)
print('str2: '+rest)
print( matches2)
str3 = ' real(8), dimension(0:7,kp) :: f,g '
res = run(F95_arg_decl_parser,str3)
print('str3: ',res)
#print( matches3)
| 3,671 | 19.982857 | 89 |
py
|
Cingulata
|
Cingulata-master/optim/utils.py
|
#
# (C) Copyright 2017 CEA LIST. All Rights Reserved.
# Contributor(s): Cingulata team (formerly Armadillo team)
#
# This software is governed by the CeCILL-C license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-C
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-C license and that you accept its terms.
#
#
import sys
import networkx as nx
import re
import itertools
import operator
def readBlifFile(fileName):
f = open(fileName)
lines = f.readlines()
f.close()
return parseBlif(lines)
def is_mult(tt):
return tt and tt in is_mult.tt
is_mult.tt = [
'11 1', # and
'11 0', # nand
'01 1', # andny
'10 1', # andyn
'00 0', # or
'00 1', # nor
'10 0', # orny
'01 0', # oryn
]
def is_add(tt):
return tt and tt in is_add.tt
is_add.tt = [
# '0 1', # not
'01 1\n10 1', # xor
'10 1\n01 1', # xor
'00 1\n11 1', # xnor
'11 1\n00 1', # xnor
]
def parseBlif(lines):
cmds = "".join(lines).split('.')
G = nx.DiGraph()
for cmd in cmds:
if cmd.startswith('names'):
cmd = cmd.strip().split('\n')
var = cmd[0].split()[1:]
out = var[-1]
G.add_nodes_from(var)
edges = [(v, out) for v in var[:-1]]
G.add_edges_from(edges)
G.nodes()[out]['gate'] = True
G.nodes()[out]['truth_table_var'] = var
G.nodes()[out]['truth_table'] = "\n".join(cmd[1:]).strip()
elif cmd.startswith('inputs'):
cmd = cmd.replace('\\','').split()
nodes = cmd[1:]
G.add_nodes_from(nodes, input=True)
elif cmd.startswith('outputs'):
cmd = cmd.replace('\\','').split()
nodes = cmd[1:]
G.add_nodes_from(nodes, output=True)
return G
def getMultiplicativeDepth(G):
depths = getMultiplicativeDepths(G)
outs = getOutputNodes(G)
return max(dict(filter(lambda e: e[0] in outs, depths.items())).values())
def getMultiplicativeDepths(G):
depth = dict()
for v in nx.topological_sort(G):
if G.in_degree(v) == 0:
depth[v] = 0
else:
depth[v] = max([depth[u] for u in G.predecessors(v)]) + is_mult(G.nodes()[v].get('truth_table'))
return depth
def getInputNodes(G):
return list(filter(lambda n: 'input' in G.nodes()[n], G.nodes()))
def getOutputNodes(G):
return list(filter(lambda n: 'output' in G.nodes()[n], G.nodes()))
truthTable2GateType = {
'0' : 'const_0',
'1' : 'const_1',
'0 1' : 'not',
'1 1' : 'buf',
'11 1' : 'and',
'11 0' : 'nand',
'01 1' : 'andny',
'10 1' : 'andyn',
'00 0' : 'or',
'00 1' : 'nor',
'10 0' : 'orny',
'01 0' : 'oryn',
'01 1\n10 1' : 'xor',
'10 1\n01 1' : 'xor',
'00 1\n11 1' : 'xnor',
'11 1\n00 1' : 'xnor',
}
def getNodeCountPerType(G):
nodes = list(filter(lambda n: 'truth_table' in G.nodes()[n], G.nodes()))
nodeTypes = list(map(lambda n: truthTable2GateType[G.nodes()[n]['truth_table']], nodes))
nodeCount = dict()
for nodeType in set(nodeTypes):
nodeCount[nodeType] = len(list(filter(lambda nt: nodeType == nt, nodeTypes)))
return nodeCount
def getMultiplicativeNodes(G):
return list(filter(lambda n: is_mult(G.nodes()[n].get('truth_table')), G.nodes()))
def getAdditiveNodes(G):
return list(filter(lambda n: is_add(G.nodes()[n].get('truth_table')), G.nodes()))
def getMultiplicativeNodeCnt(G):
return len(getMultiplicativeNodes(G))
def getAdditiveNodeCnt(G):
return len(getAdditiveNodes(G))
| 3,978 | 26.631944 | 102 |
py
|
Cingulata
|
Cingulata-master/optim/graph_info.py
|
#
# (C) Copyright 2017 CEA LIST. All Rights Reserved.
# Contributor(s): Cingulata team (formerly Armadillo team)
#
# This software is governed by the CeCILL-C license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-C
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-C license and that you accept its terms.
#
#
import argparse
import networkx as nx
import utils
#TODO: inputs are only in the ".inputs" section and not all nodes whose names start with "i_"
def printMultDepthMax():
print (max(multDepths.values()))
def printMultDepthPerOut():
outputs = utils.getOutputNodes(circuit)
outputs = sorted(outputs, key=lambda n: int(n[2:]))
print ('\n'.join(map(lambda out: out + ': ' + str(multDepths[out]), outputs)))
def printMultGateCount():
print (utils.getMultiplicativeNodeCnt(circuit))
def printAddGateCount():
print (utils.getAdditiveNodeCnt(circuit))
def printGateCount():
print ('\n'.join(map(lambda e: e[0] + ': '+ str(e[1]), nodeCountPerType.items())))
def printInputCount():
print (len(utils.getInputNodes(circuit)))
def printInputs():
print (" ".join(sorted(utils.getInputNodes(circuit))))
def printOutputCount():
print (len(utils.getOutputNodes(circuit)))
def printOutputs():
print (" ".join(sorted(utils.getOutputNodes(circuit))))
def readClearInps(fileName):
f = open(fileName)
tokens = f.read().split()
clearInps = dict(zip(tokens[::2], map(lambda i: bool(int(i)), tokens[1::2])))
f.close()
return clearInps
allChoices = {
'mult_depth_max': ('Maximal multiplicative depth', printMultDepthMax),
'mult_depth_per_out': ('Multiplicative depth per output', printMultDepthPerOut),
'mult_gate_cnt': ('Multiplicative gate count', printMultGateCount),
'add_gate_cnt': ('Additive gate count', printAddGateCount),
'gate_cnt': ('Gate count', printGateCount),
'inp_cnt': ('Input count', printInputCount),
'inps': ('Inputs', printInputs),
'out_cnt': ('Output count', printOutputCount),
'outs': ('Outputs', printOutputs)
}
parser = argparse.ArgumentParser(description='Compute graph characteristics (multiplicative depht,...)')
parser.add_argument('blifFile', type=str, help='input BLIF file')
for k, (desc, func) in allChoices.items():
parser.add_argument('--' + k, action="store_true", default=False, help=desc)
parser.add_argument('--all', action="store_true", default=False, help="print all characteristics")
parser.add_argument('-v', action="store_true", default=False, dest="verbose")
parser.add_argument('--clear_inps', type=str, default = "", dest = "clear_inps_file", help='clear inputs file')
args = parser.parse_args()
if args.all:
choices = allChoices
else:
choices=dict(filter(lambda key_value: args.__dict__[key_value[0]] , allChoices.items()))
#exit if no choices selected
if not choices:
print (parser.usage)
exit(0)
circuit = utils.readBlifFile(args.blifFile)
clearInputs = dict()
if args.clear_inps_file:
clearInputs = readClearInps(args.clear_inps_file)
utils.updateClearCipherExecGates(circuit, clearInputs)
if 'mult_depth_max' in choices or 'mult_depth_per_out' in choices:
multDepths = utils.getMultiplicativeDepths(circuit)
if 'mult_gate_cnt' in choices or 'add_gate_cnt' in choices or 'gate_cnt' in choices:
nodeCountPerType = dict(sorted(utils.getNodeCountPerType(circuit).items(), key=lambda e: e[0]))
for k, (desc, func) in choices.items():
if args.verbose:
print (desc, ':')
func()
| 4,014 | 34.848214 | 111 |
py
|
tesstrain
|
tesstrain-main/generate_line_box.py
|
#!/usr/bin/env python3
import argparse
import io
import unicodedata
from PIL import Image
#
# command line arguments
#
arg_parser = argparse.ArgumentParser('''Creates tesseract box files for given (line) image text pairs''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?', metavar='TXT', help='Line text (GT)', required=True)
# Image file
arg_parser.add_argument('-i', '--image', nargs='?', metavar='IMAGE', help='Image file', required=True)
args = arg_parser.parse_args()
#
# main
#
# Get image size.
width, height = Image.open(args.image).size
# load gt
with io.open(args.txt, "r", encoding='utf-8') as f:
lines = f.read().strip().split('\n')
if len(lines) != 1:
raise ValueError("ERROR: %s: Ground truth text file should contain exactly one line, not %s" % (args.txt, len(lines)))
line = unicodedata.normalize('NFC', lines[0].strip())
if line:
for i in range(1, len(line)):
char = line[i]
prev_char = line[i-1]
if unicodedata.combining(char):
print("%s 0 0 %d %d 0" % ((prev_char + char), width, height))
elif not unicodedata.combining(prev_char):
print("%s 0 0 %d %d 0" % (prev_char, width, height))
if not unicodedata.combining(line[-1]):
print("%s 0 0 %d %d 0" % (line[-1], width, height))
print("\t 0 0 %d %d 0" % (width, height))
| 1,365 | 28.695652 | 126 |
py
|
tesstrain
|
tesstrain-main/shuffle.py
|
#!/usr/bin/env python3
# shuffle.py - shuffle lines in pseudo random order
#
# Usage:
# shuffle.py [SEED [FILE]]
#
# Sort and shuffle the lines read from stdin in pseudo random order
# and write them to stdout.
#
# If FILE is given, then apply to that in-place (instead of stdin and stdout).
#
# The optional SEED argument is used as a seed for the random generator.
# A shuffled list can be reproduced by using the same seed again.
import random
import sys
# If at least one argument was given, the first argument is used as the seed.
if len(sys.argv) > 1:
random.seed(sys.argv[1])
if len(sys.argv) > 2:
fd0 = open(sys.argv[2], 'r')
else:
fd0 = sys.stdin
# Read lines from standard input.
lines = fd0.readlines()
# First sort the input lines (directory entries may come in undefined order).
lines.sort()
# Then shuffle the lines.
random.shuffle(lines)
if len(sys.argv) > 2:
fd1 = open(sys.argv[2], 'w')
else:
fd1 = sys.stdout
# Write the shuffled lines to standard output.
fd1.writelines(lines)
| 1,031 | 22.454545 | 78 |
py
|
tesstrain
|
tesstrain-main/generate_wordstr_box.py
|
#!/usr/bin/env python3
import argparse
import io
import unicodedata
import bidi.algorithm
from PIL import Image
#
# command line arguments
#
arg_parser = argparse.ArgumentParser('''Creates tesseract WordStr box files for given (line) image text pairs''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?', metavar='TXT', help='Line text (GT)', required=True)
# Image file
arg_parser.add_argument('-i', '--image', nargs='?', metavar='IMAGE', help='Image file', required=True)
args = arg_parser.parse_args()
#
# main
#
# load image
with open(args.image, "rb") as f:
im = Image.open(f)
width, height = im.size
# load gt
with io.open(args.txt, "r", encoding='utf-8') as f:
lines = f.read().strip().split('\n')
if len(lines) != 1:
raise ValueError("ERROR: %s: Ground truth text file should contain exactly one line, not %s" % (args.txt, len(lines)))
line = unicodedata.normalize('NFC', lines[0].strip())
# create WordStr line boxes for Indic & RTL
if line:
line = bidi.algorithm.get_display(line)
print("WordStr 0 0 %d %d 0 #%s" % (width, height, line))
print("\t 0 0 %d %d 0" % (width, height))
| 1,160 | 25.386364 | 126 |
py
|
tesstrain
|
tesstrain-main/generate_gt_from_box.py
|
#!/usr/bin/env python3
import argparse
import io
#
# command line arguments
#
arg_parser = argparse.ArgumentParser(
'''Creates groundtruth files from text2image generated box files''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?',
metavar='TXT', help='Line text (GT)', required=True)
# Text box file
arg_parser.add_argument('-b', '--box', nargs='?', metavar='BOX',
help='text2image generated box file (BOX)', required=True)
args = arg_parser.parse_args()
#
# main
# uses US (ASCII unit separator, U+001F) for substitution to get the space delimiters
#
gtstring = io.StringIO()
gtfile = open(args.txt, "w", encoding='utf-8')
with io.open(args.box, "r", encoding='utf-8') as boxfile:
print(''.join(line.replace(" ", "\u001f ").split(' ', 1)[0] for line in boxfile if line), file=gtstring)
gt = gtstring.getvalue().replace("\u001f", " ").replace("\t", "\n")
print(gt, file=gtfile)
| 967 | 28.333333 | 109 |
py
|
tesstrain
|
tesstrain-main/generate_line_syllable_box.py
|
#!/usr/bin/env python3
import argparse
import io
import unicodedata
from PIL import Image
#
# command line arguments
#
arg_parser = argparse.ArgumentParser('''Creates tesseract box files for given (line) image text pairs''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?', metavar='TXT', help='Line text (GT)', required=True)
# Image file
arg_parser.add_argument('-i', '--image', nargs='?', metavar='IMAGE', help='Image file', required=True)
args = arg_parser.parse_args()
#
# main
#
# https://stackoverflow.com/questions/6805311/combining-devanagari-characters
# Letters are category Lo (Letter, Other), vowel signs are category Mc (Mark, Spacing Combining),
# virama is category Mn (Mark, Nonspacing) and spaces are category Zs (Separator, Space).
def splitclusters(s):
"""Generate the grapheme clusters for the string s. (Not the full
Unicode text segmentation algorithm, but probably good enough for
Devanagari.)
"""
# http://pyright.blogspot.com/2009/12/pythons-unicodedata-module.html
# The combining code is typically zero. The virama gets its own special code of nine.
# i.e. unicodedata.category=Mn unicodedata.combining=9
# (Could be used to extend for other Indic languages).
virama = u'\N{DEVANAGARI SIGN VIRAMA}'
cluster = u''
last = None
for c in s:
cat = unicodedata.category(c)[0]
if cat == 'M' or cat == 'L' and last == virama:
cluster += c
else:
if cluster:
yield cluster
cluster = c
last = c
if cluster:
yield cluster
# Get image size.
width, height = Image.open(args.image).size
# load gt
with io.open(args.txt, "r", encoding='utf-8') as f:
lines = f.read().strip().split('\n')
if len(lines) != 1:
raise ValueError("ERROR: %s: Ground truth text file should contain exactly one line, not %s" % (args.txt, len(lines)))
line = unicodedata.normalize('NFC', lines[0].strip())
if line:
for syllable in (splitclusters(line)):
print("%s 0 0 %d %d 0" % (syllable, width, height))
print("\t 0 0 %d %d 0" % (width, height))
| 2,137 | 29.985507 | 126 |
py
|
tesstrain
|
tesstrain-main/normalize.py
|
#!/usr/bin/env python3
import argparse
import io
import unicodedata
# Command line arguments.
arg_parser = argparse.ArgumentParser(description='Normalize all ground truth texts for the given text files.')
arg_parser.add_argument("filename", help="filename of text file", nargs='*')
arg_parser.add_argument("-n", "--dry-run", help="show which files would be normalized but don't change them", action="store_true")
arg_parser.add_argument("-v", "--verbose", help="show ignored files", action="store_true")
arg_parser.add_argument("-f", "--form", help="normalization form (default: NFC)", choices=["NFC", "NFKC", "NFD", "NFKD"], default="NFC")
args = arg_parser.parse_args()
# Read all files and overwrite them with normalized text if necessary.
for filename in args.filename:
with io.open(filename, "r", encoding="utf-8") as f:
try:
text = f.read()
except UnicodeDecodeError:
if args.verbose:
print(filename + " (ignored)")
continue
normalized_text = unicodedata.normalize(args.form, text)
if text != normalized_text:
print(filename)
if not args.dry_run:
with io.open(filename, "w", encoding="utf-8") as out:
out.write(normalized_text)
| 1,286 | 40.516129 | 136 |
py
|
tesstrain
|
tesstrain-main/src/setup.py
|
from pathlib import Path
import setuptools
ROOT_DIRECTORY = Path(__file__).parent.resolve()
setuptools.setup(
name='tesstrain',
description='Training utils for Tesseract',
long_description=(ROOT_DIRECTORY / 'README.md').read_text(encoding='utf-8'),
long_description_content_type='text/markdown',
url='https://github.com/tesseract-ocr/tesstrain',
packages=setuptools.find_packages(),
license='Apache Software License 2.0',
author='Tesseract contributors',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
],
keywords='Tesseract,tesseract-ocr,OCR,optical character recognition',
python_requires='>=3.7',
install_requires=[
'tqdm',
],
entry_points={
'console_scripts': [
],
},
)
| 1,308 | 30.166667 | 80 |
py
|
tesstrain
|
tesstrain-main/src/tesstrain/__main__.py
|
# (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script provides an easy way to execute various phases of training
# Tesseract. For a detailed description of the phases, see
# https://tesseract-ocr.github.io/tessdoc/Training-Tesseract.html.
import logging
from tesstrain.arguments import (
get_argument_parser,
TrainingArguments,
verify_parameters_and_handle_defaults
)
from tesstrain.generate import cleanup
from tesstrain.wrapper import run_from_context
log = logging.getLogger()
def setup_logging_console():
log.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console_formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s - %(message)s", datefmt="%H:%M:%S"
)
console.setFormatter(console_formatter)
log.addHandler(console)
def setup_logging_logfile(logfile):
logfile = logging.FileHandler(logfile, encoding='utf-8')
logfile.setLevel(logging.DEBUG)
logfile_formatter = logging.Formatter(
"[%(asctime)s] - %(levelname)s - %(name)s - %(message)s"
)
logfile.setFormatter(logfile_formatter)
log.addHandler(logfile)
return logfile
def parse_flags(argv=None):
ctx = TrainingArguments()
log.debug(ctx)
parser = get_argument_parser()
parser.parse_args(args=argv, namespace=ctx)
return verify_parameters_and_handle_defaults(ctx)
def main():
setup_logging_console()
ctx = parse_flags()
logfile = setup_logging_logfile(ctx.log_file)
run_from_context(ctx)
log.removeHandler(logfile)
logfile.close()
cleanup(ctx)
log.info("All done!")
return 0
if __name__ == '__main__':
main()
| 2,242 | 28.12987 | 74 |
py
|
tesstrain
|
tesstrain-main/src/tesstrain/language_specific.py
|
# (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set some language specific variables.
"""
import logging
import os
log = logging.getLogger(__name__)
# Array of all valid language codes.
VALID_LANGUAGE_CODES = (
"afr amh ara asm aze aze_cyrl bel ben bih bod bos bul cat "
"ceb ces chi_sim chi_tra chr cym cyr_lid dan deu div dzo "
"ell eng enm epo est eus fas fil fin fra frk frm gle glg "
"grc guj hat heb hin hrv hun hye iast iku ind isl ita ita_old "
"jav jav_java jpn kan kat kat_old kaz khm kir kmr kor kur_ara lao lat "
"lat_lid lav lit mal mar mkd mlt msa mya nep nld nor ori "
"pan pol por pus ron rus san sin slk slv snd spa spa_old "
"sqi srp srp_latn swa swe syr tam tel tgk tgl tha tir tur "
"uig ukr urd uzb uzb_cyrl vie yid gle_uncial "
)
# Codes for which we have webtext but no fonts:
UNUSABLE_LANGUAGE_CODES = ""
FRAKTUR_FONTS = [
"CaslonishFraxx Medium",
"Cloister Black, Light",
"Proclamate Light",
"UnifrakturMaguntia",
"Walbaum-Fraktur",
]
# List of fonts to train on
LATIN_FONTS = [
"Arial Bold",
"Arial Bold Italic",
"Arial Italic",
"Arial",
"Courier New Bold",
"Courier New Bold Italic",
"Courier New Italic",
"Courier New",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"Times New Roman,",
"Georgia Bold",
"Georgia Italic",
"Georgia",
"Georgia Bold Italic",
"Trebuchet MS Bold",
"Trebuchet MS Bold Italic",
"Trebuchet MS Italic",
"Trebuchet MS",
"Verdana Bold",
"Verdana Italic",
"Verdana",
"Verdana Bold Italic",
"Tex Gyre Bonum Bold",
"Tex Gyre Bonum Italic",
"Tex Gyre Bonum Bold Italic",
"Tex Gyre Schola Bold",
"Tex Gyre Schola Italic",
"Tex Gyre Schola Bold Italic",
"Tex Gyre Schola Regular",
"DejaVu Sans Ultra-Light",
]
# List of fonts for printed/neo-Latin ('lat' language code, different from Latin script)
NEOLATIN_FONTS = [
"GFS Bodoni",
"GFS Bodoni Bold",
"GFS Bodoni Italic",
"GFS Bodoni Bold Italic",
"GFS Didot",
"GFS Didot Bold",
"GFS Didot Italic",
"GFS Didot Bold Italic",
"Cardo",
"Cardo Bold",
"Cardo Italic",
"Wyld",
"Wyld Italic",
"EB Garamond",
"EB Garamond Italic",
"Junicode",
"Junicode Bold",
"Junicode Italic",
"Junicode Bold Italic",
"IM FELL DW Pica PRO",
"IM FELL English PRO",
"IM FELL Double Pica PRO",
"IM FELL French Canon PRO",
"IM FELL Great Primer PRO",
"IM FELL DW Pica PRO Italic",
"IM FELL English PRO Italic",
"IM FELL Double Pica PRO Italic",
"IM FELL French Canon PRO Italic",
"IM FELL Great Primer PRO Italic",
]
IRISH_UNCIAL_FONTS = [
"Bunchlo Arsa Dubh GC",
"Bunchlo Arsa GC",
"Bunchlo Arsa GC Bold",
"Bunchlo Dubh GC",
"Bunchlo GC",
"Bunchlo GC Bold",
"Bunchlo Nua GC Bold",
"Bunchló na Nod GC",
"Gadelica",
"Glanchlo Dubh GC",
"Glanchlo GC",
"Glanchlo GC Bold",
"Seanchló Dubh GC",
"Seanchló GC",
"Seanchló GC Bold",
"Seanchló na Nod GC",
"Seanchló Ársa Dubh GC",
"Seanchló Ársa GC",
"Seanchló Ársa GC Bold",
"Tromchlo Beag GC",
"Tromchlo Mor GC",
"Urchlo GC",
"Urchlo GC Bold",
]
EARLY_LATIN_FONTS = [
*FRAKTUR_FONTS,
*LATIN_FONTS,
# The Wyld font family renders early modern ligatures encoded in the private
# unicode area.
"Wyld",
"Wyld Italic",
# Fonts that render the Yogh symbol (U+021C, U+021D) found in Old English.
"GentiumAlt",
]
VIETNAMESE_FONTS = [
"Arial Unicode MS Bold",
"Arial Bold Italic",
"Arial Italic",
"Arial Unicode MS",
"FreeMono Bold",
"Courier New Bold Italic",
"FreeMono Italic",
"FreeMono",
"GentiumAlt Italic",
"GentiumAlt",
"Palatino Linotype Bold",
"Palatino Linotype Bold Italic",
"Palatino Linotype Italic",
"Palatino Linotype",
"Really No 2 LT W2G Light",
"Really No 2 LT W2G Light Italic",
"Really No 2 LT W2G Medium",
"Really No 2 LT W2G Medium Italic",
"Really No 2 LT W2G Semi-Bold",
"Really No 2 LT W2G Semi-Bold Italic",
"Really No 2 LT W2G Ultra-Bold",
"Really No 2 LT W2G Ultra-Bold Italic",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"Times New Roman,",
"Verdana Bold",
"Verdana Italic",
"Verdana",
"Verdana Bold Italic",
"VL Gothic",
"VL PGothic",
]
DEVANAGARI_FONTS = [
"FreeSans",
"Chandas",
"Kalimati",
"Uttara",
"Lucida Sans",
"gargi Medium",
"Lohit Devanagari",
"Arial Unicode MS Bold",
"Ascender Uni",
"Noto Sans Devanagari Bold",
"Noto Sans Devanagari",
"Samyak Devanagari Medium",
"Sarai",
"Saral LT Bold",
"Saral LT Light",
"Nakula",
"Sahadeva",
"Samanata",
"Santipur OT Medium",
]
KANNADA_FONTS = [
"Kedage Bold",
"Kedage Italic",
"Kedage",
"Kedage Bold Italic",
"Mallige Bold",
"Mallige Italic",
"Mallige",
"Mallige Bold Italic",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"cheluvi Medium",
"Noto Sans Kannada Bold",
"Noto Sans Kannada",
"Lohit Kannada",
"Tunga",
"Tunga Bold",
]
TELUGU_FONTS = [
"Pothana2000",
"Vemana2000",
"Lohit Telugu",
"Arial Unicode MS Bold",
"Ascender Uni",
"Dhurjati",
"Gautami Bold",
"Gidugu",
"Gurajada",
"Lakki Reddy",
"Mallanna",
"Mandali",
"NATS",
"NTR",
"Noto Sans Telugu Bold",
"Noto Sans Telugu",
"Peddana",
"Ponnala",
"Ramabhadra",
"Ravi Prakash",
"Sree Krushnadevaraya",
"Suranna",
"Suravaram",
"Tenali Ramakrishna",
"Gautami",
]
TAMIL_FONTS = [
"TAMu_Kadambri",
"TAMu_Kalyani",
"TAMu_Maduram",
"TSCu_Paranar",
"TSCu_Times",
"TSCu_Paranar Bold",
"FreeSans",
"FreeSerif",
"Lohit Tamil",
"Arial Unicode MS Bold",
"Ascender Uni",
"Droid Sans Tamil Bold",
"Droid Sans Tamil",
"Karla Tamil Inclined Bold Italic",
"Karla Tamil Inclined Italic",
"Karla Tamil Upright Bold",
"Karla Tamil Upright",
"Noto Sans Tamil Bold",
"Noto Sans Tamil",
"Noto Sans Tamil UI Bold",
"Noto Sans Tamil UI",
"TSCu_Comic Normal",
"Lohit Tamil Classical",
]
THAI_FONTS = [
"FreeSerif",
"FreeSerif Italic",
"Garuda",
"Norasi",
"Lucida Sans Typewriter",
"Lucida Sans",
"Garuda Oblique",
"Norasi Oblique",
"Norasi Italic",
"Garuda Bold",
"Norasi Bold",
"Lucida Sans Typewriter Bold",
"Lucida Sans Semi-Bold",
"Garuda Bold Oblique",
"Norasi Bold Italic",
"Norasi Bold Oblique",
"AnuParp LT Thai",
"Arial Unicode MS Bold",
"Arial Unicode MS",
"Ascender Uni",
"Loma",
"Noto Serif Thai Bold",
"Noto Serif Thai",
"Purisa Light",
"Sirichana LT Bold",
"Sirichana LT",
"Sukothai LT Bold",
"Sukothai LT",
"UtSaHaGumm LT Thai",
"Tahoma",
]
KOREAN_FONTS = [
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Baekmuk Batang Patched",
"Baekmuk Batang",
"Baekmuk Dotum",
"Baekmuk Gulim",
"Baekmuk Headline",
]
CHI_SIM_FONTS = [
"AR PL UKai CN",
"AR PL UMing Patched Light",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"WenQuanYi Zen Hei Medium",
]
CHI_TRA_FONTS = [
"AR PL UKai TW",
"AR PL UMing TW MBE Light",
"AR PL UKai Patched",
"AR PL UMing Patched Light",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"WenQuanYi Zen Hei Medium",
]
JPN_FONTS = [
"TakaoExGothic",
"TakaoExMincho",
"TakaoGothic",
"TakaoMincho",
"TakaoPGothic",
"TakaoPMincho",
"VL Gothic",
"VL PGothic",
"Noto Sans Japanese Bold",
"Noto Sans Japanese Light",
]
RUSSIAN_FONTS = [
"Arial Bold",
"Arial Bold Italic",
"Arial Italic",
"Arial",
"Courier New Bold",
"Courier New Bold Italic",
"Courier New Italic",
"Courier New",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"Times New Roman,",
"Georgia Bold",
"Georgia Italic",
"Georgia",
"Georgia Bold Italic",
"Trebuchet MS Bold",
"Trebuchet MS Bold Italic",
"Trebuchet MS Italic",
"Trebuchet MS",
"Verdana Bold",
"Verdana Italic",
"Verdana",
"Verdana Bold Italic",
"DejaVu Serif",
"DejaVu Serif Oblique",
"DejaVu Serif Bold",
"DejaVu Serif Bold Oblique",
"Lucida Bright",
"FreeSerif Bold",
"FreeSerif Bold Italic",
"DejaVu Sans Ultra-Light",
]
GREEK_FONTS = [
"Arial Unicode MS",
"Arial Unicode MS Bold",
"DejaVu Sans Mono",
"DejaVu Sans Mono Oblique",
"DejaVu Sans Mono Bold",
"DejaVu Sans Mono Bold Oblique",
"DejaVu Serif",
"DejaVu Serif Semi-Condensed",
"DejaVu Serif Oblique",
"DejaVu Serif Bold",
"DejaVu Serif Bold Oblique",
"DejaVu Serif Bold Semi-Condensed",
"FreeSerif Bold",
"FreeSerif Bold Italic",
"FreeSerif Italic",
"FreeSerif",
"GentiumAlt",
"GentiumAlt Italic",
"Linux Biolinum O Bold",
"Linux Biolinum O",
"Linux Libertine O Bold",
"Linux Libertine O",
"Linux Libertine O Bold Italic",
"Linux Libertine O Italic",
"Palatino Linotype Bold",
"Palatino Linotype Bold Italic",
"Palatino Linotype Italic",
"Palatino Linotype",
"UmePlus P Gothic",
"VL PGothic",
]
ANCIENT_GREEK_FONTS = [
"GFS Artemisia",
"GFS Artemisia Bold",
"GFS Artemisia Bold Italic",
"GFS Artemisia Italic",
"GFS Bodoni",
"GFS Bodoni Bold",
"GFS Bodoni Bold Italic",
"GFS Bodoni Italic",
"GFS Didot",
"GFS Didot Bold",
"GFS Didot Bold Italic",
"GFS Didot Italic",
"GFS DidotClassic",
"GFS Neohellenic",
"GFS Neohellenic Bold",
"GFS Neohellenic Bold Italic",
"GFS Neohellenic Italic",
"GFS Philostratos",
"GFS Porson",
"GFS Pyrsos",
"GFS Solomos",
]
ARABIC_FONTS = [
"Arabic Transparent Bold",
"Arabic Transparent",
"Arab",
"Arial Unicode MS Bold",
"Arial Unicode MS",
"ASVCodar LT Bold",
"ASVCodar LT Light",
"Badiya LT Bold",
"Badiya LT",
"Badr LT Bold",
"Badr LT",
"Dimnah",
"Frutiger LT Arabic Bold",
"Frutiger LT Arabic",
"Furat",
"Hassan LT Bold",
"Hassan LT Light",
"Jalal LT Bold",
"Jalal LT Light",
"Midan Bold",
"Midan",
"Mitra LT Bold",
"Mitra LT Light",
"Palatino LT Arabic",
"Palatino Sans Arabic Bold",
"Palatino Sans Arabic",
"Simplified Arabic Bold",
"Simplified Arabic",
"Times New Roman, Bold",
"Times New Roman,",
"Traditional Arabic Bold",
"Traditional Arabic",
]
HEBREW_FONTS = [
"Arial Bold",
"Arial Bold Italic",
"Arial Italic",
"Arial",
"Courier New Bold",
"Courier New Bold Italic",
"Courier New Italic",
"Courier New",
"Ergo Hebrew Semi-Bold",
"Ergo Hebrew Semi-Bold Italic",
"Ergo Hebrew",
"Ergo Hebrew Italic",
"Really No 2 LT W2G Light",
"Really No 2 LT W2G Light Italic",
"Really No 2 LT W2G Medium",
"Really No 2 LT W2G Medium Italic",
"Really No 2 LT W2G Semi-Bold",
"Really No 2 LT W2G Semi-Bold Italic",
"Really No 2 LT W2G Ultra-Bold",
"Really No 2 LT W2G Ultra-Bold Italic",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"Times New Roman,",
"Lucida Sans",
"Tahoma",
]
BENGALI_FONTS = [
"Bangla Medium",
"Lohit Bengali",
"Mukti Narrow",
"Mukti Narrow Bold",
"Jamrul Medium Semi-Expanded",
"Likhan Medium",
"Arial Unicode MS Bold",
"Ascender Uni",
"FreeSans",
"FreeSans Oblique",
"FreeSerif",
"FreeSerif Italic",
"Noto Sans Bengali Bold",
"Noto Sans Bengali",
"Ani",
"Lohit Assamese",
"Lohit Bengali",
"Mitra Mono",
]
KYRGYZ_FONTS = [
"Arial",
"Arial Bold",
"Arial Italic",
"Arial Bold Italic",
"Courier New",
"Courier New Bold",
"Courier New Italic",
"Courier New Bold Italic",
"Times New Roman,",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"DejaVu Serif",
"DejaVu Serif Oblique",
"DejaVu Serif Bold",
"DejaVu Serif Bold Oblique",
"Lucida Bright",
"FreeSerif Bold",
"FreeSerif Bold Italic",
]
PERSIAN_FONTS = [
"Amiri Bold Italic",
"Amiri Bold",
"Amiri Italic",
"Amiri",
"Andale Sans Arabic Farsi",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Lateef",
"Lucida Bright",
"Lucida Sans Oblique",
"Lucida Sans Semi-Bold",
"Lucida Sans",
"Lucida Sans Typewriter Bold",
"Lucida Sans Typewriter Oblique",
"Lucida Sans Typewriter",
"Scheherazade",
"Tahoma",
"Times New Roman,",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"Yakout Linotype Bold",
"Yakout Linotype",
]
AMHARIC_FONTS = [
"Abyssinica SIL",
"Droid Sans Ethiopic Bold",
"Droid Sans Ethiopic",
"FreeSerif",
"Noto Sans Ethiopic Bold",
"Noto Sans Ethiopic",
]
ARMENIAN_FONTS = [
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"FreeMono",
"FreeMono Italic",
"FreeSans",
"FreeSans Bold",
"FreeSans Oblique",
]
BURMESE_FONTS = [
"Myanmar Sans Pro",
"Noto Sans Myanmar Bold",
"Noto Sans Myanmar",
"Padauk Bold",
"Padauk",
"TharLon",
]
JAVANESE_FONTS = ["Prada"]
NORTH_AMERICAN_ABORIGINAL_FONTS = [
"Aboriginal Sans",
"Aboriginal Sans Bold Italic",
"Aboriginal Sans Italic",
"Aboriginal Sans Bold",
"Aboriginal Serif Bold",
"Aboriginal Serif Bold Italic",
"Aboriginal Serif Italic",
"Aboriginal Serif",
]
GEORGIAN_FONTS = [
"Arial Unicode MS Bold",
"Arial Unicode MS",
"BPG Algeti GPL\&GNU",
"BPG Chveulebrivi GPL\&GNU",
"BPG Courier GPL\&GNU",
"BPG Courier S GPL\&GNU",
"BPG DejaVu Sans 2011 GNU-GPL",
"BPG Elite GPL\&GNU",
"BPG Excelsior GPL\&GNU",
"BPG Glaho GPL\&GNU",
"BPG Gorda GPL\&GNU",
"BPG Ingiri GPL\&GNU",
"BPG Mrgvlovani Caps GNU\&GPL",
"BPG Mrgvlovani GPL\&GNU",
"BPG Nateli Caps GPL\&GNU Light",
"BPG Nateli Condenced GPL\&GNU Light",
"BPG Nateli GPL\&GNU Light",
"BPG Nino Medium Cond GPL\&GNU",
"BPG Nino Medium GPL\&GNU Medium",
"BPG Sans GPL\&GNU",
"BPG Sans Medium GPL\&GNU",
"BPG Sans Modern GPL\&GNU",
"BPG Sans Regular GPL\&GNU",
"BPG Serif GPL\&GNU",
"BPG Serif Modern GPL\&GNU",
"FreeMono",
"FreeMono Bold Italic",
"FreeSans",
"FreeSerif",
"FreeSerif Bold",
"FreeSerif Bold Italic",
"FreeSerif Italic",
]
OLD_GEORGIAN_FONTS = [
"Arial Unicode MS Bold",
"Arial Unicode MS",
"BPG Algeti GPL\&GNU",
"BPG Courier S GPL\&GNU",
"BPG DejaVu Sans 2011 GNU-GPL",
"BPG Elite GPL\&GNU",
"BPG Excelsior GPL\&GNU",
"BPG Glaho GPL\&GNU",
"BPG Ingiri GPL\&GNU",
"BPG Mrgvlovani Caps GNU\&GPL",
"BPG Mrgvlovani GPL\&GNU",
"BPG Nateli Caps GPL\&GNU Light",
"BPG Nateli Condenced GPL\&GNU Light",
"BPG Nateli GPL\&GNU Light",
"BPG Nino Medium Cond GPL\&GNU",
"BPG Nino Medium GPL\&GNU Medium",
"BPG Sans GPL\&GNU",
"BPG Sans Medium GPL\&GNU",
"BPG Sans Modern GPL\&GNU",
"BPG Sans Regular GPL\&GNU",
"BPG Serif GPL\&GNU",
"BPG Serif Modern GPL\&GNU",
"FreeSans",
"FreeSerif",
"FreeSerif Bold",
"FreeSerif Bold Italic",
"FreeSerif Italic",
]
KHMER_FONTS = [
"Khmer OS",
"Khmer OS System",
"Khmer OS Battambang",
"Khmer OS Bokor",
"Khmer OS Content",
"Khmer OS Fasthand",
"Khmer OS Freehand",
"Khmer OS Metal Chrieng",
"Khmer OS Muol Light",
"Khmer OS Muol Pali",
"Khmer OS Muol",
"Khmer OS Siemreap",
"Noto Sans Bold",
"Noto Sans",
"Noto Serif Khmer Bold",
"Noto Serif Khmer Light",
]
KURDISH_FONTS = [
"Amiri Bold Italic",
"Amiri Bold",
"Amiri Italic",
"Amiri",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Lateef",
"Lucida Bright",
"Lucida Sans Oblique",
"Lucida Sans Semi-Bold",
"Lucida Sans",
"Lucida Sans Typewriter Bold",
"Lucida Sans Typewriter Oblique",
"Lucida Sans Typewriter",
"Scheherazade",
"Tahoma",
"Times New Roman,",
"Times New Roman, Bold",
"Times New Roman, Bold Italic",
"Times New Roman, Italic",
"Unikurd Web",
"Yakout Linotype Bold",
"Yakout Linotype",
]
LAOTHIAN_FONTS = [
"Phetsarath OT",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"Dhyana Bold",
"Dhyana",
"Lao Muang Don",
"Lao Muang Khong",
"Lao Sans Pro",
"Noto Sans Lao Bold",
"Noto Sans Lao",
"Noto Sans Lao UI Bold",
"Noto Sans Lao UI",
"Noto Serif Lao Bold",
"Noto Serif Lao",
"Phetsarath Bold",
"Phetsarath",
"Souliyo Unicode",
]
GUJARATI_FONTS = [
"Lohit Gujarati",
"Rekha Medium",
"Samyak Gujarati Medium",
"aakar Medium",
"padmaa Bold",
"padmaa Medium",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"FreeSans",
"Noto Sans Gujarati Bold",
"Noto Sans Gujarati",
"Shruti",
"Shruti Bold",
]
MALAYALAM_FONTS = [
"AnjaliOldLipi",
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"Dyuthi",
"FreeSerif",
"Kalyani",
"Kartika",
"Kartika Bold",
"Lohit Malayalam",
"Meera",
"Noto Sans Malayalam Bold",
"Noto Sans Malayalam",
"Rachana",
"Rachana_w01",
"RaghuMalayalam",
"suruma",
]
ORIYA_FONTS = [
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"ori1Uni Medium",
"Samyak Oriya Medium",
"Lohit Oriya",
]
PUNJABI_FONTS = [
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"Saab",
"Lohit Punjabi",
"Noto Sans Gurmukhi",
"Noto Sans Gurmukhi Bold",
"FreeSans",
"FreeSans Bold",
"FreeSerif",
]
SINHALA_FONTS = [
"Noto Sans Sinhala Bold",
"Noto Sans Sinhala",
"OCRUnicode",
"Yagpo",
"LKLUG",
"FreeSerif",
]
SYRIAC_FONTS = [
"East Syriac Adiabene",
"East Syriac Ctesiphon",
"Estrangelo Antioch",
"Estrangelo Edessa",
"Estrangelo Midyat",
"Estrangelo Nisibin",
"Estrangelo Quenneshrin",
"Estrangelo Talada",
"Estrangelo TurAbdin",
"Serto Batnan Bold",
"Serto Batnan",
"Serto Jerusalem Bold",
"Serto Jerusalem Italic",
"Serto Jerusalem",
"Serto Kharput",
"Serto Malankara",
"Serto Mardin Bold",
"Serto Mardin",
"Serto Urhoy Bold",
"Serto Urhoy",
"FreeSans",
]
THAANA_FONTS = ["FreeSerif"]
TIBETAN_FONTS = [
"Arial Unicode MS",
"Arial Unicode MS Bold",
"Ascender Uni",
"DDC Uchen",
"Jomolhari",
"Kailasa",
"Kokonor",
"Tibetan Machine Uni",
"TibetanTsugRing",
"Yagpo",
]
# The following fonts will be rendered vertically in phase I.
VERTICAL_FONTS = [
"TakaoExGothic",
"TakaoExMincho",
"AR PL UKai Patched",
"AR PL UMing Patched Light",
"Baekmuk Batang Patched",
]
FLAGS_webtext_prefix = os.environ.get("FLAGS_webtext_prefix", "")
# Set language-specific values for several global variables, including
# ${TEXT_CORPUS}
# holds the text corpus file for the language, used in phase F
# ${FONTS[@]}
# holds a sequence of applicable fonts for the language, used in
# phase F & I. only set if not already set, i.e. from command line
# ${TRAINING_DATA_ARGUMENTS}
# non-default arguments to the training_data program used in phase T
# ${FILTER_ARGUMENTS}[ -]
# character-code-specific filtering to distinguish between scripts
# (eg. CJK) used by filter_borbidden_characters in phase F
# ${WORDLIST2DAWG_ARGUMENTS}
# specify fixed length dawg generation for non-space-delimited lang
# TODO(dsl): We can refactor these into functions that assign FONTS,
# TEXT_CORPUS, etc. separately.
def set_lang_specific_parameters(ctx, lang):
# The default text location is now given directly from the language code.
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/{lang}.corpus.txt"
FILTER_ARGUMENTS = []
WORDLIST2DAWG_ARGUMENTS = ""
# These dawg factors represent the fraction of the corpus not covered by the
# dawg, and seem like reasonable defaults, but the optimal value is likely
# to be highly corpus-dependent, as well as somewhat language-dependent.
# Number dawg factor is the fraction of all numeric strings that are not
# covered, which is why it is higher relative to the others.
PUNC_DAWG_FACTOR = None
NUMBER_DAWG_FACTOR = 0.125
WORD_DAWG_FACTOR = 0.05
BIGRAM_DAWG_FACTOR = 0.015
TRAINING_DATA_ARGUMENTS = []
FRAGMENTS_DISABLED = "y"
RUN_SHAPE_CLUSTERING = False
AMBIGS_FILTER_DENOMINATOR = "100000"
LEADING = 32
MEAN_COUNT = 40 # Default for latin script.
# Language to mix with the language for maximum accuracy. Defaults to eng.
# If no language is good, set to the base language.
MIX_LANG = "eng"
FONTS = ctx.fonts
TEXT2IMAGE_EXTRA_ARGS = []
EXPOSURES = []
GENERATE_WORD_BIGRAMS = None
WORD_DAWG_SIZE = None
# Latin languages.
if lang == "enm":
TEXT2IMAGE_EXTRA_ARGS += ["--ligatures"] # Add ligatures when supported
if not FONTS:
FONTS = EARLY_LATIN_FONTS
elif lang == "frm":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/fra.corpus.txt"
# Make long-s substitutions for Middle French text
FILTER_ARGUMENTS += ["--make_early_language_variant=fra"]
TEXT2IMAGE_EXTRA_ARGS += ["--ligatures"] # Add ligatures when supported.
if not FONTS:
FONTS = EARLY_LATIN_FONTS
elif lang == "frk":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/deu.corpus.txt"
if not FONTS:
FONTS = FRAKTUR_FONTS
elif lang == "ita_old":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/ita.corpus.txt"
# Make long-s substitutions for Early Italian text
FILTER_ARGUMENTS += ["--make_early_language_variant=ita"]
TEXT2IMAGE_EXTRA_ARGS += ["--ligatures"] # Add ligatures when supported.
if not FONTS:
FONTS = EARLY_LATIN_FONTS
elif lang == "lat":
if not EXPOSURES:
EXPOSURES = "-3 -2 -1 0 1 2 3".split()
if not FONTS:
FONTS = NEOLATIN_FONTS
elif lang == "spa_old":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/spa.corpus.txt"
# Make long-s substitutions for Early Spanish text
FILTER_ARGUMENTS += ["--make_early_language_variant=spa"]
TEXT2IMAGE_EXTRA_ARGS += ["--ligatures"] # Add ligatures when supported.
if not FONTS:
FONTS = EARLY_LATIN_FONTS
elif lang == "srp_latn":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/srp.corpus.txt"
elif lang == "vie":
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
if not FONTS:
FONTS = VIETNAMESE_FONTS
# Highly inflective languages get a bigger dawg size.
# TODO(rays) Add more here!
elif lang == "hun":
WORD_DAWG_SIZE = 1_000_000
elif lang == "pol":
WORD_DAWG_SIZE = 1_000_000
# Latin with default treatment.
elif lang == "afr":
pass
elif lang == "aze":
pass
elif lang == "bos":
pass
elif lang == "cat":
pass
elif lang == "ceb":
pass
elif lang == "ces":
PUNC_DAWG_FACTOR = 0.004
elif lang == "cym":
pass
elif lang == "dan":
pass
elif lang == "deu":
WORD_DAWG_FACTOR = 0.125
elif lang == "eng":
WORD_DAWG_FACTOR = 0.03
elif lang == "epo":
pass
elif lang == "est":
pass
elif lang == "eus":
pass
elif lang == "fil":
pass
elif lang == "fin":
pass
elif lang == "fra":
WORD_DAWG_FACTOR = 0.08
elif lang == "gle":
pass
elif lang == "gle_uncial":
if not FONTS:
FONTS = IRISH_UNCIAL_FONTS
elif lang == "glg":
pass
elif lang == "hat":
pass
elif lang == "hrv":
pass
elif lang == "iast":
pass
elif lang == "ind":
pass
elif lang == "isl":
pass
elif lang == "ita":
pass
elif lang == "jav":
pass
elif lang == "lav":
pass
elif lang == "lit":
pass
elif lang == "mlt":
pass
elif lang == "msa":
pass
elif lang == "nld":
WORD_DAWG_FACTOR = 0.02
elif lang == "nor":
pass
elif lang == "por":
pass
elif lang == "ron":
pass
elif lang == "slk":
pass
elif lang == "slv":
pass
elif lang == "spa":
pass
elif lang == "sqi":
pass
elif lang == "swa":
pass
elif lang == "swe":
pass
elif lang == "tgl":
pass
elif lang == "tur":
pass
elif lang == "uzb":
pass
elif lang == "zlm":
pass
# Special code for performing language-id that is trained on
# EFIGS+Latin+Vietnamese text with regular + fraktur fonts.
elif lang == "lat_lid":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/lat_lid.corpus.txt"
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
GENERATE_WORD_BIGRAMS = 0
# Strip unrenderable words as not all fonts will render the extended
# latin symbols found in Vietnamese text.
WORD_DAWG_SIZE = 1_000_000
if not FONTS:
FONTS = EARLY_LATIN_FONTS
# Cyrillic script-based languages. It is bad to mix Latin with Cyrillic.
elif lang == "rus":
if not FONTS:
FONTS = RUSSIAN_FONTS
MIX_LANG = "rus"
NUMBER_DAWG_FACTOR = 0.05
WORD_DAWG_SIZE = 1_000_000
elif lang in (
"aze_cyrl",
"bel",
"bul",
"kaz",
"mkd",
"srp",
"tgk",
"ukr",
"uzb_cyrl",
):
MIX_LANG = f"{lang}"
if not FONTS:
FONTS = RUSSIAN_FONTS
# Special code for performing Cyrillic language-id that is trained on
# Russian, Serbian, Ukrainian, Belarusian, Macedonian, Tajik and Mongolian
# text with the list of Russian fonts.
elif lang == "cyr_lid":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/cyr_lid.corpus.txt"
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
GENERATE_WORD_BIGRAMS = 0
WORD_DAWG_SIZE = 1_000_000
if not FONTS:
FONTS = RUSSIAN_FONTS
# South Asian scripts mostly have a lot of different graphemes, so trim
# down the MEAN_COUNT so as not to get a huge amount of text.
elif lang in ("asm", "ben"):
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
if not FONTS:
FONTS = BENGALI_FONTS
elif lang in ("bih", "hin", "mar", "nep", "san"):
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
if not FONTS:
FONTS = DEVANAGARI_FONTS
elif lang == "bod":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
if not FONTS:
FONTS = TIBETAN_FONTS
elif lang == "dzo":
WORD_DAWG_FACTOR = 0.01
if not FONTS:
FONTS = TIBETAN_FONTS
elif lang == "guj":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
if not FONTS:
FONTS = GUJARATI_FONTS
elif lang == "kan":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--no_newline_in_output"]
TEXT2IMAGE_EXTRA_ARGS += ["--char_spacing=0.5"]
if not FONTS:
FONTS = KANNADA_FONTS
elif lang == "mal":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--no_newline_in_output"]
TEXT2IMAGE_EXTRA_ARGS += ["--char_spacing=0.5"]
if not FONTS:
FONTS = MALAYALAM_FONTS
elif lang == "ori":
WORD_DAWG_FACTOR = 0.01
if not FONTS:
FONTS = ORIYA_FONTS
elif lang == "pan":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.01
if not FONTS:
FONTS = PUNJABI_FONTS
elif lang == "sin":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.01
if not FONTS:
FONTS = SINHALA_FONTS
elif lang == "tam":
MEAN_COUNT = 30
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--no_newline_in_output"]
TEXT2IMAGE_EXTRA_ARGS += ["--char_spacing=0.5"]
if not FONTS:
FONTS = TAMIL_FONTS
elif lang == "tel":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--no_newline_in_output"]
TEXT2IMAGE_EXTRA_ARGS += ["--char_spacing=0.5"]
if not FONTS:
FONTS = TELUGU_FONTS
# SouthEast Asian scripts.
elif lang == "jav_java":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
if not FONTS:
FONTS = JAVANESE_FONTS
elif lang == "khm":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
if not FONTS:
FONTS = KHMER_FONTS
elif lang == "lao":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
if not FONTS:
FONTS = LAOTHIAN_FONTS
elif lang == "mya":
MEAN_COUNT = 12
WORD_DAWG_FACTOR = 0.15
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
if not FONTS:
FONTS = BURMESE_FONTS
elif lang == "tha":
MEAN_COUNT = 30
WORD_DAWG_FACTOR = 0.01
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
FILTER_ARGUMENTS += ["--segmenter_lang=tha"]
TRAINING_DATA_ARGUMENTS += ["--no_space_in_output", "--desired_bigrams="]
AMBIGS_FILTER_DENOMINATOR = "1000"
LEADING = 48
if not FONTS:
FONTS = THAI_FONTS
# CJK
elif lang == "chi_sim":
MEAN_COUNT = 15
PUNC_DAWG_FACTOR = 0.015
WORD_DAWG_FACTOR = 0.015
GENERATE_WORD_BIGRAMS = 0
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
TRAINING_DATA_ARGUMENTS += ["--no_space_in_output", "--desired_bigrams="]
FILTER_ARGUMENTS += ["--charset_filter=chi_sim", "--segmenter_lang=chi_sim"]
if not FONTS:
FONTS = CHI_SIM_FONTS
elif lang == "chi_tra":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.015
GENERATE_WORD_BIGRAMS = 0
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
TRAINING_DATA_ARGUMENTS += ["--no_space_in_output", "--desired_bigrams="]
FILTER_ARGUMENTS += ["--charset_filter=chi_tr", "--segmenter_lang=chi_tra"]
if not FONTS:
FONTS = CHI_TRA_FONTS
elif lang == "jpn":
MEAN_COUNT = 15
WORD_DAWG_FACTOR = 0.015
GENERATE_WORD_BIGRAMS = 0
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
TRAINING_DATA_ARGUMENTS += ["--no_space_in_output", "--desired_bigrams="]
FILTER_ARGUMENTS += ["--charset_filter=jpn", "--segmenter_lang=jpn"]
if not FONTS:
FONTS = JPN_FONTS
elif lang == "kor":
MEAN_COUNT = 20
WORD_DAWG_FACTOR = 0.015
NUMBER_DAWG_FACTOR = 0.05
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=10000"]
TRAINING_DATA_ARGUMENTS += ["--desired_bigrams="]
GENERATE_WORD_BIGRAMS = 0
FILTER_ARGUMENTS += ["--charset_filter=kor", "--segmenter_lang=kor"]
if not FONTS:
FONTS = KOREAN_FONTS
# Middle-Eastern scripts.
elif lang == "ara":
if not FONTS:
FONTS = ARABIC_FONTS
elif lang == "div":
if not FONTS:
FONTS = THAANA_FONTS
elif lang in ("fas", "pus", "snd", "uig", "urd"):
if not FONTS:
FONTS = PERSIAN_FONTS
elif lang in ("heb", "yid"):
NUMBER_DAWG_FACTOR = 0.05
WORD_DAWG_FACTOR = 0.08
if not FONTS:
FONTS = HEBREW_FONTS
elif lang == "syr":
if not FONTS:
FONTS = SYRIAC_FONTS
# Other scripts.
elif lang in ("amh", "tir"):
if not FONTS:
FONTS = AMHARIC_FONTS
elif lang == "chr":
if not FONTS:
FONTS = [*NORTH_AMERICAN_ABORIGINAL_FONTS, "Noto Sans Cherokee"]
elif lang == "ell":
NUMBER_DAWG_FACTOR = 0.05
WORD_DAWG_FACTOR = 0.08
if not FONTS:
FONTS = GREEK_FONTS
elif lang == "grc":
if not EXPOSURES:
EXPOSURES = "-3 -2 -1 0 1 2 3".split()
if not FONTS:
FONTS = ANCIENT_GREEK_FONTS
elif lang == "hye":
if not FONTS:
FONTS = ARMENIAN_FONTS
elif lang == "iku":
if not FONTS:
FONTS = NORTH_AMERICAN_ABORIGINAL_FONTS
elif lang == "kat":
if not FONTS:
FONTS = GEORGIAN_FONTS
elif lang == "kat_old":
TEXT_CORPUS = f"{FLAGS_webtext_prefix}/kat.corpus.txt"
if not FONTS:
FONTS = OLD_GEORGIAN_FONTS
elif lang == "kir":
if not FONTS:
FONTS = KYRGYZ_FONTS
TRAINING_DATA_ARGUMENTS += ["--infrequent_ratio=100"]
elif lang == "kmr":
if not FONTS:
FONTS = LATIN_FONTS
elif lang == "kur_ara":
if not FONTS:
FONTS = KURDISH_FONTS
else:
raise ValueError(f"Error: {lang} is not a valid language code")
FLAGS_mean_count = int(os.environ.get("FLAGS_mean_count", -1))
if FLAGS_mean_count > 0:
TRAINING_DATA_ARGUMENTS += [f"--mean_count={FLAGS_mean_count}"]
elif not MEAN_COUNT:
TRAINING_DATA_ARGUMENTS += [f"--mean_count={MEAN_COUNT}"]
# Default to Latin fonts if none have been set
if not FONTS:
FONTS = LATIN_FONTS
# Default to 0 exposure if it hasn't been set
if not EXPOSURES:
EXPOSURES = [0]
# Set right-to-left and normalization mode.
if lang in (
"ara",
"div",
"fas",
"pus",
"snd",
"syr",
"uig",
"urd",
"kur_ara",
"heb",
"yid",
):
LANG_IS_RTL = True
NORM_MODE = 2
elif lang in (
"asm",
"ben",
"bih",
"hin",
"mar",
"nep",
"guj",
"kan",
"mal",
"tam",
"tel",
"pan",
"dzo",
"sin",
"san",
"bod",
"ori",
"khm",
"mya",
"tha",
"lao",
"jav ",
"jav_java",
):
LANG_IS_RTL = False
NORM_MODE = 2
else:
LANG_IS_RTL = False
NORM_MODE = 1
vars_to_transfer = {
'ambigs_filter_denominator': AMBIGS_FILTER_DENOMINATOR,
'bigram_dawg_factor': BIGRAM_DAWG_FACTOR,
'exposures': EXPOSURES,
'filter_arguments': FILTER_ARGUMENTS,
'fonts': FONTS,
'fragments_disabled': FRAGMENTS_DISABLED,
'generate_word_bigrams': GENERATE_WORD_BIGRAMS,
'lang_is_rtl': LANG_IS_RTL,
'leading': LEADING,
'mean_count': MEAN_COUNT,
'mix_lang': MIX_LANG,
'norm_mode': NORM_MODE,
'number_dawg_factor': NUMBER_DAWG_FACTOR,
'punc_dawg_factor': PUNC_DAWG_FACTOR,
'run_shape_clustering': RUN_SHAPE_CLUSTERING,
'text2image_extra_args': TEXT2IMAGE_EXTRA_ARGS,
'text_corpus': TEXT_CORPUS,
'training_data_arguments': TRAINING_DATA_ARGUMENTS,
'word_dawg_factor': WORD_DAWG_FACTOR,
'word_dawg_size': WORD_DAWG_SIZE,
'wordlist2dawg_arguments': WORDLIST2DAWG_ARGUMENTS,
}
for attr, value in vars_to_transfer.items():
if hasattr(ctx, attr):
if getattr(ctx, attr) != value:
log.debug(f"{attr} = {value} (was {getattr(ctx, attr)})")
setattr(ctx, attr, value)
else:
log.debug(f"{attr} = {value} (set on cmdline)")
else:
log.debug(f"{attr} = {value}")
setattr(ctx, attr, value)
return ctx
| 37,240 | 25.487198 | 88 |
py
|
tesstrain
|
tesstrain-main/src/tesstrain/arguments.py
|
# (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Argument handling utilities.
"""
import argparse
import atexit
import logging
import os
import pathlib
import platform
from datetime import date
from tempfile import TemporaryDirectory, mkdtemp
from tesstrain.generate import err_exit
log = logging.getLogger(__name__)
class TrainingArguments(argparse.Namespace):
def __init__(self):
super(TrainingArguments, self).__init__()
self.uname = platform.uname().system.lower()
self.lang_code = "eng"
self.timestamp = str(date.today())
self._font_config_cache = TemporaryDirectory(prefix="font_tmp")
self.font_config_cache = self._font_config_cache.name
self.fonts_dir = (
"/Library/Fonts/" if "darwin" in self.uname else "/usr/share/fonts/"
)
self.max_pages = 0
self.save_box_tiff = False
self.overwrite = False
self.linedata = False
self.run_shape_clustering = False
self.extract_font_properties = True
self.distort_image = False
def __eq__(self, other):
return (
argparse.Namespace.__eq__(self, other) and
self.uname == other.uname and
self.lang_code == other.lang_code and
self.timestamp == other.timestamp and
self.font_config_cache == other.font_config_cache and
self.fonts_dir == other.fonts_dir and
self.max_pages == other.max_pages and
self.save_box_tiff == other.save_box_tiff and
self.overwrite == other.overwrite and
self.linedata == other.linedata and
self.run_shape_clustering == other.run_shape_clustering and
self.extract_font_properties == other.extract_font_properties and
self.distort_image == other.distort_image
)
def get_argument_parser():
parser = argparse.ArgumentParser(
prog='tesstrain',
epilog="""
The font names specified in --fontlist need to be recognizable by Pango using
fontconfig. An easy way to list the canonical names of all fonts available on
your system is to run text2image with --list_available_fonts and the
appropriate --fonts_dir path.
"""
)
parser.add_argument(
"--fontlist",
dest="fonts",
nargs="+",
type=str,
help="A list of fontnames to train on.",
)
parser.add_argument(
"--vertical_fontlist",
dest="vertical_fonts",
nargs="+",
type=str,
help="A list of fontnames to render vertical text.",
)
parser.add_argument("--fonts_dir", help="Path to font files.")
parser.add_argument("--tmp_dir", help="Path to temporary training directory.")
parser.add_argument(
"--lang", metavar="LANG_CODE", dest="lang_code", help="ISO 639 code."
)
parser.add_argument(
"--langdata_dir",
metavar="DATADIR",
help="Path to tesseract/training/langdata directory.",
)
parser.add_argument("--maxpages", type=int, dest="max_pages")
parser.add_argument(
"--output_dir", metavar="OUTPUTDIR", help="Location of output traineddata file."
)
parser.add_argument(
"--overwrite", action="store_true", help="Safe to overwrite files in output_dir."
)
parser.add_argument(
"--save_box_tiff",
action="store_true",
help="Save box/tiff pairs along with lstmf files.",
)
parser.add_argument(
"--linedata_only",
dest="linedata",
action="store_true",
help="Only generate training data for lstmtraining.",
)
inputdata_group = parser.add_argument_group(
"inputdata",
"OPTIONAL flags for input data. If unspecified we will look for them in the langdata_dir directory.",
)
inputdata_group.add_argument(
"--training_text", metavar="TEXTFILE", help="Text to render and use for training."
)
inputdata_group.add_argument(
"--wordlist",
dest="wordlist_file",
metavar="WORDFILE",
help="Word list for the language ordered by decreasing frequency.",
)
parser.add_argument("--extract_font_properties", action="store_true")
parser.add_argument(
"--noextract_font_properties", dest="extract_font_properties", action="store_false"
)
parser.add_argument(
"--distort_image", dest="distort_image", action="store_true"
)
tessdata_group = parser.add_argument_group(
"tessdata",
(
"OPTIONAL flag to specify location of existing traineddata files, required during feature extraction. "
"If unspecified will use TESSDATA_PREFIX defined in the current environment."
),
)
tessdata_group.add_argument(
"--tessdata_dir",
metavar="TESSDATADIR",
help="Path to tesseract/tessdata directory.",
)
parser.add_argument(
"--exposures",
metavar="EXPOSURES",
action="append",
nargs="+",
help="A list of exposure levels to use (e.g. -1,0,1).",
)
parser.add_argument(
"--ptsize",
metavar="PT_SIZE",
type=int,
default=12,
help="Size of printed text.",
)
return parser
def verify_parameters_and_handle_defaults(ctx):
log.debug(ctx)
if not ctx.lang_code:
err_exit("Need to specify a language --lang")
if not ctx.langdata_dir:
err_exit("Need to specify path to language files --langdata_dir")
if not ctx.tessdata_dir:
tessdata_prefix = os.environ.get("TESSDATA_PREFIX", "")
if not tessdata_prefix:
err_exit(
"Need to specify a --tessdata_dir or have a "
"TESSDATA_PREFIX variable defined in your environment"
)
else:
ctx.tessdata_dir = tessdata_prefix
if not ctx.output_dir:
ctx.output_dir = mkdtemp(prefix=f"trained-{ctx.lang_code}-{ctx.timestamp}")
log.info(f"Output directory set to: {ctx.output_dir}")
# Location where intermediate files will be created.
if not ctx.tmp_dir:
ctx.training_dir = mkdtemp(prefix=f"{ctx.lang_code}-{ctx.timestamp}")
else:
ctx.training_dir = mkdtemp(prefix=f"{ctx.lang_code}-{ctx.timestamp}", dir=ctx.tmp_dir)
# Location of log file for the whole run.
ctx.log_file = pathlib.Path(ctx.training_dir) / "tesstrain.log"
log.info(f"Log file location: {ctx.log_file}")
def show_tmpdir_location(training_dir):
# On successful exit we will delete this first; on failure we want to let the user
# know where the log is
if pathlib.Path(training_dir).exists():
print(f"Temporary files retained at: {training_dir}")
atexit.register(show_tmpdir_location, ctx.training_dir)
# Take training text and wordlist from the langdata directory if not
# specified in the command-line.
if not ctx.training_text:
ctx.training_text = (
pathlib.Path(ctx.langdata_dir) / ctx.lang_code / f"{ctx.lang_code}.training_text"
)
if not ctx.wordlist_file:
ctx.wordlist_file = (
pathlib.Path(ctx.langdata_dir) / ctx.lang_code / f"{ctx.lang_code}.wordlist"
)
ctx.word_bigrams_file = (
pathlib.Path(ctx.langdata_dir) / ctx.lang_code / f"{ctx.lang_code}.word.bigrams"
)
ctx.numbers_file = (
pathlib.Path(ctx.langdata_dir) / ctx.lang_code / f"{ctx.lang_code}.numbers"
)
ctx.punc_file = pathlib.Path(ctx.langdata_dir) / ctx.lang_code / f"{ctx.lang_code}.punc"
ctx.bigram_freqs_file = pathlib.Path(ctx.training_text).with_suffix(
".training_text.bigram_freqs"
)
ctx.unigram_freqs_file = pathlib.Path(ctx.training_text).with_suffix(
".training_text.unigram_freqs"
)
ctx.train_ngrams_file = pathlib.Path(ctx.training_text).with_suffix(
".training_text.train_ngrams"
)
ctx.generate_dawgs = 1
log.debug(ctx)
return ctx
| 8,597 | 33.95122 | 115 |
py
|
tesstrain
|
tesstrain-main/src/tesstrain/generate.py
|
# (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility for generating the various files.
For a detailed description of the phases, see
https://tesseract-ocr.github.io/tessdoc/Training-Tesseract.html.
"""
import concurrent.futures
import logging
import os
import pathlib
import shutil
import subprocess
import sys
from operator import itemgetter
from tqdm import tqdm
from tesstrain.language_specific import VERTICAL_FONTS
log = logging.getLogger(__name__)
def err_exit(msg):
log.critical(msg)
sys.exit(1)
def run_command(cmd, *args, env=None):
"""
Helper function to run a command and append its output to a log. Aborts early if
the program file is not found.
"""
for d in ("", "api/", "training/"):
testcmd = shutil.which(f"{d}{cmd}")
if shutil.which(testcmd):
cmd = testcmd
break
if not shutil.which(cmd):
err_exit(f"{cmd} not found")
log.debug(f"Running {cmd}")
args = list(args)
for idx, arg in enumerate(args):
log.debug(arg)
# Workaround for https://bugs.python.org/issue33617
# TypeError: argument of type 'WindowsPath' is not iterable
if isinstance(arg, pathlib.WindowsPath):
args[idx] = str(arg)
proc = subprocess.run(
[cmd, *args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env
)
proclog = logging.getLogger(cmd)
if proc.returncode == 0:
proclog.debug(proc.stdout.decode("utf-8", errors="replace"))
else:
try:
proclog.error(proc.stdout.decode("utf-8", errors="replace"))
except Exception as e:
proclog.error(e)
err_exit(f"Program {cmd} failed with return code {proc.returncode}. Abort.")
def check_file_readable(*filenames):
"""
Check if all the given files exist, or exit otherwise.
Used to check required input files and produced output files in each phase.
"""
if isinstance(filenames, (str, pathlib.Path)):
filenames = [filenames]
for filename in filenames:
try:
with pathlib.Path(filename).open():
pass
except FileNotFoundError:
err_exit(f"Required/expected file '{filename}' does not exist")
except PermissionError:
err_exit(f"{filename} is not readable")
except IOError as e:
err_exit(f"{filename} IO Error: {str(e)}")
return True
def cleanup(ctx):
if os.path.exists(ctx.log_file):
shutil.copy(ctx.log_file, ctx.output_dir)
shutil.rmtree(ctx.training_dir)
def initialize_fontconfig(ctx):
"""
Initialize the font configuration with a unique font cache directory.
"""
sample_path = pathlib.Path(ctx.font_config_cache) / "sample_text.txt"
pathlib.Path(sample_path).write_text("Text\n")
log.info(f"Testing font: {ctx.fonts[0]}")
run_command(
"text2image",
f"--fonts_dir={ctx.fonts_dir}",
f"--font={ctx.fonts[0]}",
f"--outputbase={sample_path}",
f"--text={sample_path}",
f"--fontconfig_tmpdir={ctx.font_config_cache}",
f"--ptsize={ctx.ptsize}",
)
def make_fontname(font):
return font.replace(" ", "_").replace(",", "")
def make_outbase(ctx, fontname, exposure):
return pathlib.Path(ctx.training_dir) / f"{ctx.lang_code}.{fontname}.exp{exposure}"
def generate_font_image(ctx, font, exposure, char_spacing):
"""
Helper function for `phaseI_generate_image`.
Generates the image for a single language/font combination in a way that can be run
in parallel.
"""
log.info(f"Rendering using {font}")
fontname = make_fontname(font)
outbase = make_outbase(ctx, fontname, exposure)
common_args = [
f"--fontconfig_tmpdir={ctx.font_config_cache}",
f"--fonts_dir={ctx.fonts_dir}",
f"--strip_unrenderable_words",
f"--leading={ctx.leading}",
f"--char_spacing={char_spacing}",
f"--exposure={exposure}",
f"--outputbase={outbase}",
f"--max_pages={ctx.max_pages}",
]
if ctx.distort_image:
common_args.append("--distort_image")
# add --writing_mode=vertical-upright to common_args if the font is
# specified to be rendered vertically.
vertical_fonts = ctx.vertical_fonts or VERTICAL_FONTS
if font in vertical_fonts:
common_args.append("--writing_mode=vertical-upright")
run_command(
"text2image",
*common_args,
f"--font={font}",
f"--text={ctx.training_text}",
f"--ptsize={ctx.ptsize}",
*ctx.text2image_extra_args,
)
check_file_readable(str(outbase) + ".box", str(outbase) + ".tif")
if ctx.extract_font_properties and pathlib.Path(ctx.train_ngrams_file).exists():
log.info(f"Extracting font properties of {font}")
run_command(
"text2image",
*common_args,
f"--font={font}",
f"--ligatures=false",
f"--text={ctx.train_ngrams_file}",
f"--only_extract_font_properties",
f"--ptsize=32",
)
check_file_readable(str(outbase) + ".fontinfo")
return f"{font}-{exposure}"
def phase_I_generate_image(ctx, par_factor=None):
"""
Phase I: Generate (I)mages from training text for each font.
"""
if not par_factor or par_factor <= 0:
par_factor = 1
log.info("=== Phase I: Generating training images ===")
check_file_readable(ctx.training_text)
char_spacing = 0.0
for exposure in ctx.exposures:
if ctx.extract_font_properties and pathlib.Path(ctx.bigram_freqs_file).exists():
# Parse .bigram_freqs file and compose a .train_ngrams file with text
# for tesseract to recognize during training. Take only the ngrams whose
# combined weight accounts for 95% of all the bigrams in the language.
lines = pathlib.Path(ctx.bigram_freqs_file).read_text(encoding="utf-8").split("\n")
records = (line.split() for line in lines)
p = 0.99
ngram_frac = p * sum(int(rec[1]) for rec in records if len(rec) >= 2)
with pathlib.Path(ctx.train_ngrams_file).open("w", encoding="utf-8") as f:
cumsum = 0
for bigram, count in sorted(records, key=itemgetter(1), reverse=True):
if cumsum > ngram_frac:
break
f.write(bigram + " ")
cumsum += count
check_file_readable(ctx.train_ngrams_file)
with tqdm(
total=len(ctx.fonts)
) as pbar, concurrent.futures.ThreadPoolExecutor(max_workers=par_factor) as executor:
futures = [
executor.submit(generate_font_image, ctx, font, exposure, char_spacing)
for font in ctx.fonts
]
for future in concurrent.futures.as_completed(futures):
try:
future.result()
except Exception as exc:
err_exit("Failed while generating images " + str(exc))
else:
pbar.update(1)
# Check that each process was successful.
for font in ctx.fonts:
fontname = make_fontname(font)
outbase = make_outbase(ctx, fontname, exposure)
check_file_readable(str(outbase) + ".box", str(outbase) + ".tif")
return
def phase_UP_generate_unicharset(ctx):
"""
Phase UP: Generate (U)nicharset and (P)roperties file.
"""
log.info("=== Phase UP: Generating unicharset and unichar properties files ===")
box_files = pathlib.Path(ctx.training_dir).glob("*.box")
ctx.unicharset_file = pathlib.Path(ctx.training_dir) / f"{ctx.lang_code}.unicharset"
run_command(
"unicharset_extractor",
"--output_unicharset",
f"{ctx.unicharset_file}",
"--norm_mode",
f"{ctx.norm_mode}",
*box_files,
)
check_file_readable(ctx.unicharset_file)
ctx.xheights_file = pathlib.Path(ctx.training_dir) / f"{ctx.lang_code}.xheights"
run_command(
"set_unicharset_properties",
"-U",
f"{ctx.unicharset_file}",
"-O",
f"{ctx.unicharset_file}",
"-X",
f"{ctx.xheights_file}",
f"--script_dir={ctx.langdata_dir}",
)
check_file_readable(ctx.xheights_file)
def phase_E_extract_features(ctx, box_config, ext):
"""
Phase E: (E)xtract .tr feature files from .tif/.box files.
"""
log.info(f"=== Phase E: Generating {ext} files ===")
img_files = list(pathlib.Path(ctx.training_dir).glob("*.exp*.tif"))
log.debug(img_files)
# Use any available language-specific configs.
config = ""
testconfig = pathlib.Path(ctx.langdata_dir) / ctx.lang_code / f"{ctx.lang_code}.config"
if testconfig.exists():
config = testconfig
log.info(f"Using {ctx.lang_code}.config")
tessdata_environ = os.environ.copy()
tessdata_environ["TESSDATA_PREFIX"] = str(ctx.tessdata_dir)
log.info(f"Using TESSDATA_PREFIX={tessdata_environ['TESSDATA_PREFIX']}")
with tqdm(total=len(img_files)) as pbar, concurrent.futures.ThreadPoolExecutor(
max_workers=2
) as executor:
futures = []
for img_file in img_files:
future = executor.submit(
run_command,
"tesseract",
img_file,
pathlib.Path(img_file).with_suffix(""),
*box_config,
config,
env=tessdata_environ,
)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
try:
future.result()
except Exception as exc:
err_exit("Failed while extracting features: " + str(exc))
else:
pbar.update(1)
# Check that all the output files were produced.
for img_file in img_files:
check_file_readable(pathlib.Path(img_file.with_suffix("." + ext)))
return
def make_lstmdata(ctx):
log.info("=== Constructing LSTM training data ===")
lang_prefix = f"{ctx.langdata_dir}/{ctx.lang_code}/{ctx.lang_code}"
path_output = pathlib.Path(ctx.output_dir)
if not path_output.is_dir():
log.info(f"Creating new directory {ctx.output_dir}")
path_output.mkdir(exist_ok=True, parents=True)
args = []
if ctx.lang_is_rtl:
args.append("--lang_is_rtl")
if ctx.norm_mode >= 2:
args.append("--pass_through_recoder")
# Build the starter traineddata from the inputs.
run_command(
"combine_lang_model",
"--input_unicharset",
f"{ctx.training_dir}/{ctx.lang_code}.unicharset",
"--script_dir",
f"{ctx.langdata_dir}",
"--words",
f"{lang_prefix}.wordlist",
"--numbers",
f"{lang_prefix}.numbers",
"--puncs",
f"{lang_prefix}.punc",
"--output_dir",
f"{ctx.output_dir}",
"--lang",
f"{ctx.lang_code}",
*args,
)
def get_file_list():
training_path = pathlib.Path(ctx.training_dir)
if ctx.save_box_tiff:
log.info("=== Saving box/tiff pairs for training data ===")
yield from training_path.glob(f"{ctx.lang_code}*.box")
yield from training_path.glob(f"{ctx.lang_code}*.tif")
log.info("=== Moving lstmf files for training data ===")
yield from training_path.glob(f"{ctx.lang_code}.*.lstmf")
for f in get_file_list():
log.debug(f"Moving {f} to {path_output / f.name}")
shutil.move(str(f), path_output / f.name)
lstm_list = f"{ctx.output_dir}/{ctx.lang_code}.training_files.txt"
dir_listing = (str(p) for p in path_output.glob(f"{ctx.lang_code}.*.lstmf"))
with pathlib.Path(lstm_list).open(mode="w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(dir_listing))
| 12,537 | 32.257294 | 95 |
py
|
tesstrain
|
tesstrain-main/src/tesstrain/wrapper.py
|
# (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Actual execution logic.
"""
import logging
import sys
from typing import List, Optional
from tesstrain.arguments import (
TrainingArguments,
verify_parameters_and_handle_defaults
)
from tesstrain.generate import (
initialize_fontconfig,
phase_I_generate_image,
phase_UP_generate_unicharset,
phase_E_extract_features,
make_lstmdata,
cleanup,
)
from tesstrain import language_specific
log = logging.getLogger()
def run_from_context(ctx):
if not ctx.linedata:
log.error("--linedata_only is required since only LSTM is supported")
sys.exit(1)
log.info(f"=== Starting training for language {ctx.lang_code}")
ctx = language_specific.set_lang_specific_parameters(ctx, ctx.lang_code)
initialize_fontconfig(ctx)
phase_I_generate_image(ctx, par_factor=8)
phase_UP_generate_unicharset(ctx)
if ctx.linedata:
phase_E_extract_features(ctx, ["lstm.train"], "lstmf")
make_lstmdata(ctx)
def run(
fonts: List[str],
langdata_directory: str,
maximum_pages: int,
fonts_directory: Optional[str] = None,
temporary_directory: Optional[str] = None,
language_code: Optional[str] = None,
output_directory: Optional[str] = None,
overwrite: bool = False, # TODO: Not required anymore.
save_box_tiff: bool = False,
linedata_only: bool = False,
training_text: Optional[str] = None,
wordlist_file: Optional[str] = None,
extract_font_properties: bool = True,
distort_image: bool = False,
tessdata_directory: Optional[str] = None,
exposures: Optional[List[int]] = None,
point_size: int = 12
):
"""
:param fonts: A list of font names to train on. These need to be recognizable by
Pango using fontconfig. An easy way to list the canonical name of all
fonts available on your system is to run text2image with
`--list_available_fonts` and the appropriate `--fonts_dir` path.
:param fonts_directory: Path to font files.
:param temporary_directory: Path to temporary training directory.
:param language_code: ISO 639 language code. Defaults to English.
:param langdata_directory: Path to tesseract/training/langdata directory.
:param maximum_pages: The maximum number of pages to generate.
:param output_directory: Location of generated traineddata file.
:param overwrite: Safe to overwrite files in output directory.
:param save_box_tiff: Save box/tiff pairs along with lstmf files.
:param linedata_only: Only generate training data for lstmtraining.
:param training_text: File with the text to render and use for training. If
unspecified, we will look for it in the langdata
directory.
:param wordlist_file: File with the word list for the language ordered by
decreasing frequency. If unspecified, we will look for it in
the langdata directory.
:param extract_font_properties: Assumes that the input file contains a list of
ngrams. Renders each ngram, extracts spacing
properties and records them in a `.fontinfo` file.
:param distort_image: Degrade rendered image with noise, blur, invert.
:param tessdata_directory: Specify location of existing traineddata files,
required during feature extraction. If set, it should be
the path to the tesseract/tessdata directory. If
unspecified, the `TESSDATA_PREFIX` specified in the
current environment will be used.
:param exposures: A list of exposure levels to use (e.g. `[-1, 0, 1]`). If
unspecified, language-specific ones will be used.
:param point_size: Size of printed text.
"""
ctx = TrainingArguments()
ctx.fonts = fonts
ctx.fonts_dir = fonts_directory if fonts_directory else ctx.fonts_dir
ctx.tmp_dir = temporary_directory
ctx.lang_code = language_code if language_code else ctx.lang_code
ctx.langdata_dir = langdata_directory
ctx.max_pages = maximum_pages
ctx.output_dir = output_directory
ctx.overwrite = overwrite
ctx.save_box_tiff = save_box_tiff
ctx.linedata = linedata_only
ctx.training_text = training_text
ctx.wordlist_file = wordlist_file
ctx.extract_font_properties = extract_font_properties
ctx.distort_image = distort_image
ctx.tessdata_dir = tessdata_directory
ctx.exposures = exposures
ctx.ptsize = point_size
verify_parameters_and_handle_defaults(ctx)
run_from_context(ctx)
cleanup(ctx)
log.info("All done!")
return 0
| 5,445 | 40.257576 | 87 |
py
|
tesstrain
|
tesstrain-main/src/tesstrain/__init__.py
|
# (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tesstrain.wrapper import run
__version__ = '0.1'
| 663 | 40.5 | 74 |
py
|
tesstrain
|
tesstrain-main/plot/plot_cer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
maxticks=10
dataframe = pd.read_csv("plot_cer.csv",sep='\t', encoding='utf-8')
dataframe['TrainingIteration'] = dataframe['TrainingIteration'].fillna(-2)
dataframe['TrainingIteration'] = dataframe['TrainingIteration'].astype(int)
dataframe['TrainingIteration'] = dataframe['TrainingIteration'].astype(str)
dataframe['TrainingIteration'] = dataframe['TrainingIteration'].replace('-2', np.nan)
t = dataframe['TrainingIteration']
x = dataframe['LearningIteration']
y = dataframe.IterationCER
c = dataframe.CheckpointCER
e = dataframe.EvalCER
cmax = c[np.argmax(c)]
maxCERtoDisplay=cmax+2
def annot_min(boxcolor, xpos, ypos, x,y):
xmin = x[np.argmin(y)]
ymin = y.min()
boxtext= "{:.3f}% at Learning Iteration {:.0f}" .format(ymin,xmin)
ax1.annotate(boxtext, xy=(xmin, ymin), xytext=(xpos,ypos), textcoords='offset points',
arrowprops=dict(shrinkA=0.05, shrinkB=1, fc='black', ec='white', connectionstyle="arc3"),
bbox=dict(boxstyle='round,pad=0.2', fc=boxcolor, alpha=0.3))
PlotTitle="Tesseract LSTM training and Evaluation Character Error Rates (-1 to " + str(maxCERtoDisplay) + "%)"
plt.title(label=PlotTitle)
fig = plt.figure(figsize=(11,8.5)) #size is in inches
ax1 = fig.add_subplot()
ax1.set_ylim([-1,maxCERtoDisplay])
ax1.set_xlim([-1000,30000])
ax1.set_xlabel('Learning Iterations')
ax1.set_ylabel('Character Error Rate (%)')
ax1.set_xticks(x)
ax1.tick_params(axis='x', rotation=45, labelsize='small')
ax1.locator_params(axis='x', nbins=maxticks) # limit ticks on x-axis
ax1.grid(True)
if not c.dropna().empty: # not NaN or empty
ax1.scatter(x, c, c='gold', s=50, label='Best Model Checkpoints CER')
ax1.plot(x, c, 'gold')
annot_min('gold',-150,-30,x,c)
ax1.scatter(x, y, s=3, c='teal', label='CER every 100 Training Iterations')
ax1.plot(x, y, 'teal', linewidth=0.7)
if not e.dropna().empty: # not NaN or empty
ax1.plot(x, e, 'magenta')
ax1.scatter(x, e, c='magenta', s=50, label='Evaluation CER')
annot_min('magenta',-150,40,x,e)
plt.legend(loc='upper right')
ax2 = ax1.twiny() # ax1 and ax2 share y-axis
ax2.set_xlabel("Training Iterations")
ax2.set_xlim(ax1.get_xlim()) # ensure the independent x-axes now span the same range
ax2.set_xticks(x) # copy over the locations of the x-ticks from Learning Iterations
ax2.tick_params(axis='x', rotation=45, labelsize='small')
ax2.set_xticklabels(t) # But give value of Training Iterations
ax2.locator_params(axis='x', nbins=maxticks) # limit ticks on secondary x-axis
plt.savefig("plot_cer.png")
| 2,625 | 37.617647 | 110 |
py
|
tesstrain
|
tesstrain-main/plot/plot_cer_validation.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
maxticks=10
dataframe = pd.read_csv("plot_cer_validation.csv",sep='\t', encoding='utf-8')
t = dataframe['TrainingIteration']
x = dataframe['LearningIteration']
v = dataframe.ValidationCER
c = dataframe.CheckpointCER
cmax = c[np.argmax(c)]
vmax = v[np.argmax(v)]
if vmax > cmax:
maxCERtoDisplay=vmax+2
else:
maxCERtoDisplay=cmax+2
def annot_min(boxcolor, xpos, ypos, x,y):
xmin = x[np.argmin(y)]
ymin = y.min()
boxtext= "{:.3f}% at Learning Iteration {:}" .format(ymin,xmin)
ax1.annotate(boxtext, xy=(xmin, ymin), xytext=(xpos,ypos), textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.2', fc=boxcolor, alpha=0.3))
PlotTitle="Tesseract LSTM Training and Validation Character Error Rate %"
fig = plt.figure(figsize=(11,8.5)) #size is in inches
ax1 = fig.add_subplot()
ax1.set_ylim([-1,maxCERtoDisplay])
ax1.set_xlabel('Learning Iterations')
ax1.set_ylabel('Character Error Rate (%)')
ax1.set_xticks(x)
ax1.tick_params(axis='x', rotation=45, labelsize='small')
ax1.locator_params(axis='x', nbins=maxticks) # limit ticks on x-axis
ax1.grid(True)
if not c.dropna().empty: # not NaN or empty
ax1.scatter(x, c, c='gold', s=50, label='Best Model Checkpoints CER')
ax1.plot(x, c, 'gold')
annot_min('gold',-100,-30,x,c)
if not v.dropna().empty: # not NaN or empty
ax1.plot(x, v, 'blue')
ax1.scatter(x, v, c='blue', s=50, label='Validation CER')
annot_min('blue',-100,-30,x,v)
# CER of START_MODEL using same eval list
dflang = pd.read_csv("plot_cer_lang.csv",sep='\t', encoding='utf-8')
ax1.text(x.min(),dflang.LangCER[0],
"{:.3f}% for START_MODEL {}" .format(dflang.LangCER[0],dflang.Name[0]),
color='red')
plt.title(label=PlotTitle)
plt.legend(loc='upper right')
ax2 = ax1.twiny() # ax1 and ax2 share y-axis
ax2.set_xlabel("Training Iterations")
ax2.set_xlim(ax1.get_xlim()) # ensure the independent x-axes now span the same range
ax2.set_xticks(x) # copy over the locations of the x-ticks from Learning Iterations
ax2.tick_params(axis='x', rotation=45, labelsize='small')
ax2.set_xticklabels(t) # But give value of Training Iterations
ax2.locator_params(axis='x', nbins=maxticks) # limit ticks on secondary x-axis
plt.savefig("plot_cer_validation.png")
| 2,360 | 34.238806 | 90 |
py
|
Age-and-Gender-Recognition
|
Age-and-Gender-Recognition-main/Age and Gender Recognition using Caffe Model - Youtube.py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import cv2
import os
os.chdir('D:\Python37\Projects\Gender-and-Age-Detection- Youtube\Gender-and-Age-Detection\models')
# In[33]:
def detectFace(net,frame,confidence_threshold=0.7):
frameOpencvDNN=frame.copy()
print(frameOpencvDNN.shape)
frameHeight=frameOpencvDNN.shape[0]
frameWidth=frameOpencvDNN.shape[1]
blob=cv2.dnn.blobFromImage(frameOpencvDNN,1.0,(227,227),[124.96,115.97,106.13],swapRB=True,crop=False)
net.setInput(blob)
detections=net.forward()
faceBoxes=[]
for i in range(detections.shape[2]):
confidence=detections[0,0,i,2]
if confidence>confidence_threshold:
x1=int(detections[0,0,i,3]*frameWidth)
y1=int(detections[0,0,i,4]*frameHeight)
x2=int(detections[0,0,i,5]*frameWidth)
y2=int(detections[0,0,i,6]*frameHeight)
faceBoxes.append([x1,y1,x2,y2])
cv2.rectangle(frameOpencvDNN,(x1,y1),(x2,y2),(0,255,0),int(round(frameHeight/150)),8)
return frameOpencvDNN,faceBoxes
faceProto='opencv_face_detector.pbtxt'
faceModel='opencv_face_detector_uint8.pb'
ageProto='age_deploy.prototxt'
ageModel='age_net.caffemodel'
genderProto='gender_deploy.prototxt'
genderModel='gender_net.caffemodel'
genderList=['Male','Female']
ageList=['(0-2)','(4-6)','(8-12)','(15-20)','(25-32)','(38-43)','(48-53)','(60-100)']
faceNet=cv2.dnn.readNet(faceModel,faceProto)
ageNet=cv2.dnn.readNet(ageModel,ageProto)
genderNet=cv2.dnn.readNet(genderModel,genderProto)
video=cv2.VideoCapture(0)
padding=20
while cv2.waitKey(1)<0:
hasFrame,frame=video.read()
if not hasFrame:
cv2.waitKey()
break
resultImg,faceBoxes=detectFace(faceNet,frame)
if not faceBoxes:
print("No face detected")
for faceBox in faceBoxes:
face=frame[max(0,faceBox[1]-padding):min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding):min(faceBox[2]+padding, frame.shape[1]-1)]
blob=cv2.dnn.blobFromImage(face,1.0,(227,227),[124.96,115.97,106.13],swapRB=True,crop=False)
genderNet.setInput(blob)
genderPreds=genderNet.forward()
gender=genderList[genderPreds[0].argmax()]
ageNet.setInput(blob)
agePreds=ageNet.forward()
age=ageList[agePreds[0].argmax()]
cv2.putText(resultImg,f'{gender},{age}',(faceBox[0],faceBox[1]-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,255,255),2,cv2.LINE_AA)
cv2.imshow("Detecting age and Gender",resultImg)
if cv2.waitKey(33) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
# In[ ]:
| 2,680 | 29.123596 | 154 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/test.py
|
import os, sys
import torch
import models as MODEL
import torchvision.transforms as T
import torchvision
import argparse
from torch.backends import cudnn
import numpy as np
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='test')
parser.add_argument('--dir', type=str, default='')
args = parser.parse_args()
print(args)
cudnn.benchmark = False
cudnn.deterministic = True
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
def normalize(x, ms=None):
if ms == None:
ms = [(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)]
for i in range(x.shape[1]):
x[:,i] = (x[:,i] - ms[0][i]) / ms[1][i]
return x
def test(model, trans):
target = torch.from_numpy(np.load(args.dir + '/labels.npy')).long()
if 'target' in args.dir:
label_switch = torch.tensor(list(range(500, 1000)) + list(range(0, 500))).long()
target = label_switch[target]
img_num = 0
count = 0
advfile_ls = os.listdir(args.dir)
for advfile_ind in range(len(advfile_ls)-1):
adv_batch = torch.from_numpy(np.load(args.dir + '/batch_{}.npy'.format(advfile_ind))).float() / 255
if advfile_ind == 0:
adv_batch_size = adv_batch.shape[0]
img = adv_batch
img_num += img.shape[0]
label = target[advfile_ind * adv_batch_size : advfile_ind*adv_batch_size + adv_batch.shape[0]]
label = label.to(device)
img = img.to(device)
with torch.no_grad():
pred = torch.argmax(model(trans(img)), dim=1).view(1,-1)
count += (label != pred.squeeze(0)).sum().item()
del pred, img
del adv_batch
return round(100. - 100. * count / img_num, 2) if 'target' in args.dir else round(100. * count / img_num, 2)
inceptionv3 = MODEL.inceptionv3.Inception3()
inceptionv3.to(device)
inceptionv3.load_state_dict(torch.load('attack/imagenet/models/ckpt/inception_v3_google-1a9a5a14.pth'))
inceptionv3.eval()
def trans_incep(x):
if 'incep' in args.dir:
return normalize(x, ms = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]).data
else:
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
x = F.interpolate(x, size=(299,299))
return normalize(x, ms = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]).data
print('inceptionv3:', test(model = inceptionv3, trans = trans_incep))
del inceptionv3
pnasnet = MODEL.pnasnet.pnasnet5large(ckpt_dir ='attack/imagenet/models/ckpt/pnasnet5large-bf079911.pth', num_classes=1000, pretrained='imagenet')
pnasnet.to(device)
pnasnet.eval()
def trans_pnas(x):
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
x = F.interpolate(x, size=(331,331))
return normalize(x, ms = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]).data
print('pnasnet:', test(model = pnasnet, trans = trans_pnas))
del pnasnet
senet = MODEL.senet.senet154(ckpt_dir ='attack/imagenet/models/ckpt/senet154-c7b49a05.pth')
senet.to(device)
senet.eval()
def trans_se(x):
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
return normalize(x, ms = None).data
print('senet:', test(model = senet, trans = trans_se))
del senet
densenet = torchvision.models.densenet121(pretrained=False)
densenet.to(device)
import re
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = torch.load('attack/imagenet/models/ckpt/densenet121-a639ec97.pth')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
densenet.load_state_dict(state_dict)
densenet.eval()
print('densenet:', test(model = densenet, trans = trans_se))
del densenet
mobilenet = torchvision.models.mobilenet_v2(pretrained=False)
mobilenet.to(device)
mobilenet.load_state_dict(torch.load('attack/imagenet/models/ckpt/mobilenet_v2-b0353104.pth'))
mobilenet.eval()
print('mobilenet:', test(model = mobilenet, trans = trans_se))
del mobilenet
def trans_ori(x):
if 'incep' in args.dir:
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=False)
x = x[:, :, (256-224)//2: (256-224)//2 + 224, (256-224)//2: (256-224)//2 + 224]
return normalize(x, ms = None).data
else:
return normalize(x, ms = None).data
resnet50 = MODEL.resnet.resnet50(state_dict_dir ='attack/imagenet/models/ckpt/resnet50-19c8e357.pth')
resnet50.eval()
resnet50.to(device)
print('resnet50:', test(model = resnet50, trans = trans_ori))
del resnet50
| 4,941 | 35.880597 | 146 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/utils.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import numpy as np
from torch.utils.data import Dataset
import csv
import PIL.Image as Image
import os
import torchvision.transforms as T
import pickle
# Selected imagenet. The .csv file format:
# class_index, class, image_name
# 0,n01440764,ILSVRC2012_val_00002138.JPEG
# 2,n01484850,ILSVRC2012_val_00004329.JPEG
# ...
class SelectedImagenet(Dataset):
def __init__(self, imagenet_val_dir, selected_images_csv, transform=None):
super(SelectedImagenet, self).__init__()
self.imagenet_val_dir = imagenet_val_dir
self.selected_images_csv = selected_images_csv
self.transform = transform
self._load_csv()
def _load_csv(self):
reader = csv.reader(open(self.selected_images_csv, 'r'))
next(reader)
self.selected_list = list(reader)
def __getitem__(self, item):
target, target_name, image_name = self.selected_list[item]
image = Image.open(os.path.join(self.imagenet_val_dir, target_name, image_name))
if image.mode != 'RGB':
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, int(target)
def __len__(self):
return len(self.selected_list)
class Normalize(nn.Module):
def __init__(self,):
super(Normalize, self).__init__()
self.ms = [(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)]
def forward(self, input):
x = input.clone()
for i in range(x.shape[1]):
x[:,i] = (x[:,i] - self.ms[0][i]) / self.ms[1][i]
return x
def input_diversity(img):
gg = torch.randint(0, 2, (1,)).item()
if gg == 0:
return img
else:
rnd = torch.randint(224,257, (1,)).item()
rescaled = F.interpolate(img, (rnd, rnd), mode = 'nearest')
h_rem = 256 - rnd
w_hem = 256 - rnd
pad_top = torch.randint(0, h_rem + 1, (1,)).item()
pad_bottom = h_rem - pad_top
pad_left = torch.randint(0, w_hem + 1, (1,)).item()
pad_right = w_hem - pad_left
padded = F.pad(rescaled, pad = (pad_left, pad_right, pad_top, pad_bottom))
padded = F.interpolate(padded, (224, 224), mode = 'nearest')
return padded
def linbp_forw_resnet50(model, x, do_linbp, linbp_layer):
jj = int(linbp_layer.split('_')[0])
kk = int(linbp_layer.split('_')[1])
x = model[0](x)
x = model[1].conv1(x)
x = model[1].bn1(x)
x = model[1].relu(x)
x = model[1].maxpool(x)
ori_mask_ls = []
conv_out_ls = []
relu_out_ls = []
conv_input_ls = []
def layer_forw(jj, kk, jj_now, kk_now, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp):
if jj < jj_now:
x, ori_mask, conv_out, relu_out, conv_in = block_func(mm, x, linbp=True)
ori_mask_ls.append(ori_mask)
conv_out_ls.append(conv_out)
relu_out_ls.append(relu_out)
conv_input_ls.append(conv_in)
elif jj == jj_now:
if kk_now >= kk:
x, ori_mask, conv_out, relu_out, conv_in = block_func(mm, x, linbp=True)
ori_mask_ls.append(ori_mask)
conv_out_ls.append(conv_out)
relu_out_ls.append(relu_out)
conv_input_ls.append(conv_in)
else:
x, _, _, _, _ = block_func(mm, x, linbp=False)
else:
x, _, _, _, _ = block_func(mm, x, linbp=False)
return x, ori_mask_ls
for ind, mm in enumerate(model[1].layer1):
x, ori_mask_ls = layer_forw(jj, kk, 1, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
for ind, mm in enumerate(model[1].layer2):
x, ori_mask_ls = layer_forw(jj, kk, 2, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
for ind, mm in enumerate(model[1].layer3):
x, ori_mask_ls = layer_forw(jj, kk, 3, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
for ind, mm in enumerate(model[1].layer4):
x, ori_mask_ls = layer_forw(jj, kk, 4, ind, x, mm, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls, do_linbp)
x = model[1].avgpool(x)
x = torch.flatten(x, 1)
x = model[1].fc(x)
return x, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls
def block_func(block, x, linbp):
identity = x
conv_in = x+0
out = block.conv1(conv_in)
out = block.bn1(out)
out_0 = out + 0
if linbp:
out = linbp_relu(out_0)
else:
out = block.relu(out_0)
ori_mask_0 = out.data.bool().int()
out = block.conv2(out)
out = block.bn2(out)
out_1 = out + 0
if linbp:
out = linbp_relu(out_1)
else:
out = block.relu(out_1)
ori_mask_1 = out.data.bool().int()
out = block.conv3(out)
out = block.bn3(out)
if block.downsample is not None:
identity = block.downsample(identity)
identity_out = identity + 0
x_out = out + 0
out = identity_out + x_out
out = block.relu(out)
ori_mask_2 = out.data.bool().int()
return out, (ori_mask_0, ori_mask_1, ori_mask_2), (identity_out, x_out), (out_0, out_1), (0, conv_in)
def linbp_relu(x):
x_p = F.relu(-x)
x = x + x_p.data
return x
def linbp_backw_resnet50(img, loss, conv_out_ls, ori_mask_ls, relu_out_ls, conv_input_ls, xp):
for i in range(-1, -len(conv_out_ls)-1, -1):
if i == -1:
grads = torch.autograd.grad(loss, conv_out_ls[i])
else:
grads = torch.autograd.grad((conv_out_ls[i+1][0], conv_input_ls[i+1][1]), conv_out_ls[i], grad_outputs=(grads[0], main_grad_norm))
normal_grad_2 = torch.autograd.grad(conv_out_ls[i][1], relu_out_ls[i][1], grads[1]*ori_mask_ls[i][2],retain_graph=True)[0]
normal_grad_1 = torch.autograd.grad(relu_out_ls[i][1], relu_out_ls[i][0], normal_grad_2 * ori_mask_ls[i][1], retain_graph=True)[0]
normal_grad_0 = torch.autograd.grad(relu_out_ls[i][0], conv_input_ls[i][1], normal_grad_1 * ori_mask_ls[i][0], retain_graph=True)[0]
del normal_grad_2, normal_grad_1
main_grad = torch.autograd.grad(conv_out_ls[i][1], conv_input_ls[i][1], grads[1])[0]
alpha = normal_grad_0.norm(p=2, dim = (1,2,3), keepdim = True) / main_grad.norm(p=2,dim = (1,2,3), keepdim=True)
main_grad_norm = xp * alpha * main_grad
input_grad = torch.autograd.grad((conv_out_ls[0][0], conv_input_ls[0][1]), img, grad_outputs=(grads[0], main_grad_norm))
return input_grad[0].data
def ila_forw_resnet50(model, x, ila_layer):
jj = int(ila_layer.split('_')[0])
kk = int(ila_layer.split('_')[1])
x = model[0](x)
x = model[1].conv1(x)
x = model[1].bn1(x)
x = model[1].relu(x)
if jj == 0 and kk ==0:
return x
x = model[1].maxpool(x)
for ind, mm in enumerate(model[1].layer1):
x = mm(x)
if jj == 1 and ind == kk:
return x
for ind, mm in enumerate(model[1].layer2):
x = mm(x)
if jj == 2 and ind == kk:
return x
for ind, mm in enumerate(model[1].layer3):
x = mm(x)
if jj == 3 and ind == kk:
return x
for ind, mm in enumerate(model[1].layer4):
x = mm(x)
if jj == 4 and ind == kk:
return x
return False
class ILAProjLoss(torch.nn.Module):
def __init__(self):
super(ILAProjLoss, self).__init__()
def forward(self, old_attack_mid, new_mid, original_mid, coeff):
n = old_attack_mid.shape[0]
x = (old_attack_mid - original_mid).view(n, -1)
y = (new_mid - original_mid).view(n, -1)
# x_norm = x / torch.norm(x, dim = 1, keepdim = True)
proj_loss =torch.sum(y * x) / n
return proj_loss
| 7,806 | 36 | 142 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/attack_resnet50.py
|
import os, sys
import torch
import torchvision.transforms as T
import torch.nn as nn
import argparse
import torch.nn.functional as F
import torchvision
import models as MODEL
from torch.backends import cudnn
import numpy as np
from utils import SelectedImagenet, Normalize, input_diversity, \
linbp_forw_resnet50, linbp_backw_resnet50, ila_forw_resnet50, ILAProjLoss
parser = argparse.ArgumentParser()
parser.add_argument('--epsilon', type=float, default=0.03)
parser.add_argument('--sgm_lambda', type=float, default=1.0)
parser.add_argument('--niters', type=int, default=300)
parser.add_argument('--ila_niters', type=int, default=100)
parser.add_argument('--method', type=str, default = 'linbp_ila_pgd')
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--linbp_layer', type=str, default='3_1')
parser.add_argument('--ila_layer', type=str, default='2_3')
parser.add_argument('--save_dir', type=str, default = '')
parser.add_argument('--target_attack', default=False, action='store_true')
args = parser.parse_args()
if __name__ == '__main__':
print(args)
cudnn.benchmark = False
cudnn.deterministic = True
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
os.makedirs(args.save_dir, exist_ok=True)
epsilon = args.epsilon
batch_size = args.batch_size
method = args.method
ila_layer = args.ila_layer
linbp_layer = args.linbp_layer
save_dir = args.save_dir
niters = args.niters
ila_niters = args.ila_niters
target_attack = args.target_attack
sgm_lambda = args.sgm_lambda
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
trans = T.Compose([
T.Resize((256,256)),
T.CenterCrop((224,224)),
T.ToTensor()
])
dataset = SelectedImagenet(imagenet_val_dir='data/imagenet/ILSVRC2012_img_val',
selected_images_csv='data/imagenet/selected_imagenet.csv',
transform=trans
)
ori_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers = 8, pin_memory = False)
model = MODEL.resnet.resnet50(state_dict_dir ='attack/imagenet/models/ckpt/resnet50-19c8e357.pth')
model.eval()
model = nn.Sequential(
Normalize(),
model
)
model.to(device)
if target_attack:
label_switch = torch.tensor(list(range(500,1000))+list(range(0,500))).long()
label_ls = []
for ind, (ori_img, label)in enumerate(ori_loader):
label_ls.append(label)
if target_attack:
label = label_switch[label]
ori_img = ori_img.to(device)
img = ori_img.clone()
m = 0
for i in range(niters):
# In our implementation of PGD, we incorporate randomness at each iteration to further enhance the transferability
if 'pgd' in method:
img_x = img + img.new(img.size()).uniform_(-epsilon, epsilon)
else:
img_x = img
img_x.requires_grad_(True)
if 'linbp' in method:
att_out, ori_mask_ls, conv_out_ls, relu_out_ls, conv_input_ls = linbp_forw_resnet50(model, img_x, True, linbp_layer)
pred = torch.argmax(att_out, dim=1).view(-1)
loss = nn.CrossEntropyLoss()(att_out, label.to(device))
model.zero_grad()
input_grad = linbp_backw_resnet50(img_x, loss, conv_out_ls, ori_mask_ls, relu_out_ls, conv_input_ls, xp=sgm_lambda)
else:
if method == 'mdi2fgsm' or method == 'linbp_mdi2fgsm':
att_out = model(input_diversity(img_x))
else:
att_out = model(img_x)
pred = torch.argmax(att_out, dim=1).view(-1)
loss = nn.CrossEntropyLoss()(att_out, label.to(device))
model.zero_grad()
loss.backward()
input_grad = img_x.grad.data
model.zero_grad()
if 'mdi2fgsm' in method or 'mifgsm' in method:
input_grad = 1 * m + input_grad / torch.norm(input_grad, dim=(1, 2, 3), p=1, keepdim=True)
m = input_grad
if target_attack:
input_grad = - input_grad
if method == 'fgsm' or '_fgsm' in method:
img = img.data + 2 * epsilon * torch.sign(input_grad)
else:
img = img.data + 1./255 * torch.sign(input_grad)
img = torch.where(img > ori_img + epsilon, ori_img + epsilon, img)
img = torch.where(img < ori_img - epsilon, ori_img - epsilon, img)
img = torch.clamp(img, min=0, max=1)
if 'ila' in method:
attack_img = img.clone()
img = ori_img.clone().to(device)
with torch.no_grad():
mid_output = ila_forw_resnet50(model, ori_img, ila_layer)
mid_original = torch.zeros(mid_output.size()).to(device)
mid_original.copy_(mid_output)
mid_output = ila_forw_resnet50(model, attack_img, ila_layer)
mid_attack_original = torch.zeros(mid_output.size()).to(device)
mid_attack_original.copy_(mid_output)
for _ in range(ila_niters):
img.requires_grad_(True)
mid_output = ila_forw_resnet50(model, img, ila_layer)
loss = ILAProjLoss()(
mid_attack_original.detach(), mid_output, mid_original.detach(), 1.0
)
model.zero_grad()
loss.backward()
input_grad = img.grad.data
model.zero_grad()
if method == 'ila_fgsm':
img = img.data + 2 * epsilon * torch.sign(input_grad)
else:
img = img.data + 1./255 * torch.sign(input_grad)
img = torch.where(img > ori_img + epsilon, ori_img + epsilon, img)
img = torch.where(img < ori_img - epsilon, ori_img - epsilon, img)
img = torch.clamp(img, min=0, max=1)
del mid_output, mid_original, mid_attack_original
np.save(save_dir + '/batch_{}.npy'.format(ind), torch.round(img.data*255).cpu().numpy().astype(np.uint8()))
del img, ori_img, input_grad
print('batch_{}.npy saved'.format(ind))
label_ls = torch.cat(label_ls)
np.save(save_dir + '/labels.npy', label_ls.numpy())
print('images saved')
| 6,574 | 40.878981 | 132 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/models/pnasnet.py
|
from __future__ import print_function, division, absolute_import
from collections import OrderedDict
import torch
import torch.nn as nn
pretrained_settings = {
'pnasnet5large': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 331, 331],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 331, 331],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class MaxPool(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPool, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, x):
if self.zero_pad:
x = self.zero_pad(x)
x = self.pool(x)
if self.zero_pad:
x = x[:, :, 1:, 1:]
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel_size, dw_stride,
dw_padding):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = nn.Conv2d(in_channels, in_channels,
kernel_size=dw_kernel_size,
stride=dw_stride, padding=dw_padding,
groups=in_channels, bias=False)
self.pointwise_conv2d = nn.Conv2d(in_channels, out_channels,
kernel_size=1, bias=False)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
stem_cell=False, zero_pad=False):
super(BranchSeparables, self).__init__()
padding = kernel_size // 2
middle_channels = out_channels if stem_cell else in_channels
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.relu_1 = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, middle_channels,
kernel_size, dw_stride=stride,
dw_padding=padding)
self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001)
self.relu_2 = nn.ReLU()
self.separable_2 = SeparableConv2d(middle_channels, out_channels,
kernel_size, dw_stride=1,
dw_padding=padding)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.relu_1(x)
if self.zero_pad:
x = self.zero_pad(x)
x = self.separable_1(x)
if self.zero_pad:
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class ReluConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1):
super(ReluConvBn, self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
bias=False)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.relu(x)
x = self.conv(x)
x = self.bn(x)
return x
class FactorizedReduction(nn.Module):
def __init__(self, in_channels, out_channels):
super(FactorizedReduction, self).__init__()
self.relu = nn.ReLU()
self.path_1 = nn.Sequential(OrderedDict([
('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
('conv', nn.Conv2d(in_channels, out_channels // 2,
kernel_size=1, bias=False)),
]))
self.path_2 = nn.Sequential(OrderedDict([
('pad', nn.ZeroPad2d((0, 1, 0, 1))),
('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
('conv', nn.Conv2d(in_channels, out_channels // 2,
kernel_size=1, bias=False)),
]))
self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.relu(x)
x_path1 = self.path_1(x)
x_path2 = self.path_2.pad(x)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
out = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
return out
class CellBase(nn.Module):
def cell_forward(self, x_left, x_right):
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2)
x_comb_iter_3_right = self.comb_iter_3_right(x_right)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_left)
if self.comb_iter_4_right:
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
else:
x_comb_iter_4_right = x_right
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat(
[x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3,
x_comb_iter_4], 1)
return x_out
class CellStem0(CellBase):
def __init__(self, in_channels_left, out_channels_left, in_channels_right,
out_channels_right):
super(CellStem0, self).__init__()
self.conv_1x1 = ReluConvBn(in_channels_right, out_channels_right,
kernel_size=1)
self.comb_iter_0_left = BranchSeparables(in_channels_left,
out_channels_left,
kernel_size=5, stride=2,
stem_cell=True)
self.comb_iter_0_right = nn.Sequential(OrderedDict([
('max_pool', MaxPool(3, stride=2)),
('conv', nn.Conv2d(in_channels_left, out_channels_left,
kernel_size=1, bias=False)),
('bn', nn.BatchNorm2d(out_channels_left, eps=0.001)),
]))
self.comb_iter_1_left = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=7, stride=2)
self.comb_iter_1_right = MaxPool(3, stride=2)
self.comb_iter_2_left = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=5, stride=2)
self.comb_iter_2_right = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=3, stride=2)
self.comb_iter_3_left = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=3)
self.comb_iter_3_right = MaxPool(3, stride=2)
self.comb_iter_4_left = BranchSeparables(in_channels_right,
out_channels_right,
kernel_size=3, stride=2,
stem_cell=True)
self.comb_iter_4_right = ReluConvBn(out_channels_right,
out_channels_right,
kernel_size=1, stride=2)
def forward(self, x_left):
x_right = self.conv_1x1(x_left)
x_out = self.cell_forward(x_left, x_right)
return x_out
class Cell(CellBase):
def __init__(self, in_channels_left, out_channels_left, in_channels_right,
out_channels_right, is_reduction=False, zero_pad=False,
match_prev_layer_dimensions=False):
super(Cell, self).__init__()
# If `is_reduction` is set to `True` stride 2 is used for
# convolutional and pooling layers to reduce the spatial size of
# the output of a cell approximately by a factor of 2.
stride = 2 if is_reduction else 1
# If `match_prev_layer_dimensions` is set to `True`
# `FactorizedReduction` is used to reduce the spatial size
# of the left input of a cell approximately by a factor of 2.
self.match_prev_layer_dimensions = match_prev_layer_dimensions
if match_prev_layer_dimensions:
self.conv_prev_1x1 = FactorizedReduction(in_channels_left,
out_channels_left)
else:
self.conv_prev_1x1 = ReluConvBn(in_channels_left,
out_channels_left, kernel_size=1)
self.conv_1x1 = ReluConvBn(in_channels_right, out_channels_right,
kernel_size=1)
self.comb_iter_0_left = BranchSeparables(out_channels_left,
out_channels_left,
kernel_size=5, stride=stride,
zero_pad=zero_pad)
self.comb_iter_0_right = MaxPool(3, stride=stride, zero_pad=zero_pad)
self.comb_iter_1_left = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=7, stride=stride,
zero_pad=zero_pad)
self.comb_iter_1_right = MaxPool(3, stride=stride, zero_pad=zero_pad)
self.comb_iter_2_left = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=5, stride=stride,
zero_pad=zero_pad)
self.comb_iter_2_right = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=3, stride=stride,
zero_pad=zero_pad)
self.comb_iter_3_left = BranchSeparables(out_channels_right,
out_channels_right,
kernel_size=3)
self.comb_iter_3_right = MaxPool(3, stride=stride, zero_pad=zero_pad)
self.comb_iter_4_left = BranchSeparables(out_channels_left,
out_channels_left,
kernel_size=3, stride=stride,
zero_pad=zero_pad)
if is_reduction:
self.comb_iter_4_right = ReluConvBn(out_channels_right,
out_channels_right,
kernel_size=1, stride=stride)
else:
self.comb_iter_4_right = None
def forward(self, x_left, x_right):
x_left = self.conv_prev_1x1(x_left)
x_right = self.conv_1x1(x_right)
x_out = self.cell_forward(x_left, x_right)
return x_out
class PNASNet5Large(nn.Module):
def __init__(self, num_classes=1001):
super(PNASNet5Large, self).__init__()
self.num_classes = num_classes
self.conv_0 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(3, 96, kernel_size=3, stride=2, bias=False)),
('bn', nn.BatchNorm2d(96, eps=0.001))
]))
self.cell_stem_0 = CellStem0(in_channels_left=96, out_channels_left=54,
in_channels_right=96,
out_channels_right=54)
self.cell_stem_1 = Cell(in_channels_left=96, out_channels_left=108,
in_channels_right=270, out_channels_right=108,
match_prev_layer_dimensions=True,
is_reduction=True)
self.cell_0 = Cell(in_channels_left=270, out_channels_left=216,
in_channels_right=540, out_channels_right=216,
match_prev_layer_dimensions=True)
self.cell_1 = Cell(in_channels_left=540, out_channels_left=216,
in_channels_right=1080, out_channels_right=216)
self.cell_2 = Cell(in_channels_left=1080, out_channels_left=216,
in_channels_right=1080, out_channels_right=216)
self.cell_3 = Cell(in_channels_left=1080, out_channels_left=216,
in_channels_right=1080, out_channels_right=216)
self.cell_4 = Cell(in_channels_left=1080, out_channels_left=432,
in_channels_right=1080, out_channels_right=432,
is_reduction=True, zero_pad=True)
self.cell_5 = Cell(in_channels_left=1080, out_channels_left=432,
in_channels_right=2160, out_channels_right=432,
match_prev_layer_dimensions=True)
self.cell_6 = Cell(in_channels_left=2160, out_channels_left=432,
in_channels_right=2160, out_channels_right=432)
self.cell_7 = Cell(in_channels_left=2160, out_channels_left=432,
in_channels_right=2160, out_channels_right=432)
self.cell_8 = Cell(in_channels_left=2160, out_channels_left=864,
in_channels_right=2160, out_channels_right=864,
is_reduction=True)
self.cell_9 = Cell(in_channels_left=2160, out_channels_left=864,
in_channels_right=4320, out_channels_right=864,
match_prev_layer_dimensions=True)
self.cell_10 = Cell(in_channels_left=4320, out_channels_left=864,
in_channels_right=4320, out_channels_right=864)
self.cell_11 = Cell(in_channels_left=4320, out_channels_left=864,
in_channels_right=4320, out_channels_right=864)
self.relu = nn.ReLU()
self.avg_pool = nn.AvgPool2d(11, stride=1, padding=0)
self.dropout = nn.Dropout(0.5)
self.last_linear = nn.Linear(4320, num_classes)
def features(self, x):
x_conv_0 = self.conv_0(x)
x_stem_0 = self.cell_stem_0(x_conv_0)
x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_0, x_stem_1)
x_cell_1 = self.cell_1(x_stem_1, x_cell_0)
x_cell_2 = self.cell_2(x_cell_0, x_cell_1)
x_cell_3 = self.cell_3(x_cell_1, x_cell_2)
x_cell_4 = self.cell_4(x_cell_2, x_cell_3)
x_cell_5 = self.cell_5(x_cell_3, x_cell_4)
x_cell_6 = self.cell_6(x_cell_4, x_cell_5)
x_cell_7 = self.cell_7(x_cell_5, x_cell_6)
x_cell_8 = self.cell_8(x_cell_6, x_cell_7)
x_cell_9 = self.cell_9(x_cell_7, x_cell_8)
x_cell_10 = self.cell_10(x_cell_8, x_cell_9)
x_cell_11 = self.cell_11(x_cell_9, x_cell_10)
return x_cell_11
def logits(self, features):
x = self.relu(features)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def pnasnet5large(ckpt_dir, num_classes=1001, pretrained='imagenet'):
if pretrained:
settings = pretrained_settings['pnasnet5large'][pretrained]
assert num_classes == settings[
'num_classes'], 'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = PNASNet5Large(num_classes=1001)
model.load_state_dict(torch.load(ckpt_dir, map_location = torch.device('cuda')))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(model.last_linear.in_features, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = PNASNet5Large(num_classes=num_classes)
return model
| 17,685 | 43.774684 | 88 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/models/resnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, state_dict_dir, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if state_dict_dir:
state_dict = torch.load(state_dict_dir)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(state_dict_dir = None, progress=True, **kwargs):
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], state_dict_dir, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 9,898 | 36.496212 | 97 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/models/senet.py
|
from __future__ import print_function, division, absolute_import
from collections import OrderedDict
import math
import torch
import torch.nn as nn
__all__ = ['SENet', 'senet154']
pretrained_settings = {
'senet154': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnet50': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnet101': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnet152': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnext50_32x4d': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnext101_32x4d': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
}
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,
stride=stride, padding=1, groups=groups,
bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,
stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,
groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,
stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SENet(nn.Module):
def __init__(self, block, layers, groups, reduction, dropout_p=0.2,
inplanes=128, input_3x3=True, downsample_kernel_size=3,
downsample_padding=1, num_classes=1000):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,
bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,
bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,
bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,
padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
# To preserve compatibility with Caffe weights `ceil_mode=True`
# is used instead of `padding=1`.
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,
ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=downsample_kernel_size, stride=stride,
padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, groups, reduction, stride,
downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def features(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.avg_pool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def initialize_pretrained_model(ckpt_dir, model, num_classes, settings):
assert num_classes == settings['num_classes'], \
'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
model.load_state_dict(torch.load(ckpt_dir, map_location = torch.device('cpu')))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
def senet154(ckpt_dir, num_classes=1000, pretrained='imagenet'):
model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16,
dropout_p=0.2, num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['senet154'][pretrained]
initialize_pretrained_model(ckpt_dir, model, num_classes, settings)
return model
| 13,630 | 34.590078 | 83 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/models/__init__.py
|
from .inceptionv3 import *
from .pnasnet import *
from .senet import *
from .resnet import *
| 92 | 22.25 | 26 |
py
|
linbp-attack
|
linbp-attack-master/attack/imagenet/models/inceptionv3.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 2048 x 1 x 1
x = F.dropout(x, training=self.training)
# N x 2048 x 1 x 1
x = torch.flatten(x, 1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return _InceptionOutputs(x, aux)
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = torch.flatten(x, 1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
| 11,536 | 36.33657 | 88 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/test.py
|
import os, sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data as data
import torchvision.transforms as transforms
import models
# import numpy as np
import torchvision.datasets as DATASETS
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='test')
parser.add_argument('--dir', type=str, default='')
args = parser.parse_args()
print(args)
cudnn.benchmark = False
cudnn.deterministic = True
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
target = torch.from_numpy(np.load(args.dir + '/labels.npy')).long()
if 'target' in args.dir:
label_switch = torch.tensor([1,2,3,4,5,6,7,8,9,0]).long()
target = label_switch[target]
gdas = models.__dict__['gdas']('attack/cifar10/models/ckpt/gdas-cifar10-best.pth')
gdas.to(device)
gdas.eval()
pyramidnet = models.__dict__['pyramidnet272'](num_classes = 10)
pyramidnet.load_state_dict(torch.load('attack/cifar10/models/ckpt/pyramidnet272-checkpoint.pth', map_location=device)['state_dict'])
pyramidnet.to(device)
pyramidnet.eval()
ResNeXt_29_8_64d = models.__dict__['resnext'](
cardinality=8,
num_classes=10,
depth=29,
widen_factor=4,
dropRate=0,
)
ResNeXt_29_8_64d = nn.DataParallel(ResNeXt_29_8_64d)
ResNeXt_29_8_64d.load_state_dict(torch.load('attack/cifar10/models/ckpt/resnext-8x64d/model_best.pth.tar', map_location=device)['state_dict'])
ResNeXt_29_8_64d.eval()
DenseNet_BC_L190_k40 = models.__dict__['densenet'](
num_classes=10,
depth=190,
growthRate=40,
compressionRate=2,
dropRate=0,
)
DenseNet_BC_L190_k40 = nn.DataParallel(DenseNet_BC_L190_k40)
DenseNet_BC_L190_k40.load_state_dict(torch.load('attack/cifar10/models/ckpt/densenet-bc-L190-k40/model_best.pth.tar', map_location=device)['state_dict'])
DenseNet_BC_L190_k40.eval()
WRN = models.__dict__['wrn'](
num_classes=10,
depth=28,
widen_factor=10,
dropRate=0.3,
)
WRN = nn.DataParallel(WRN)
WRN.load_state_dict(torch.load('attack/cifar10/models/ckpt/WRN-28-10-drop/model_best.pth.tar', map_location=device)['state_dict'])
WRN.eval()
vgg = models.__dict__['vgg19_bn'](num_classes=10)
vgg.features = nn.DataParallel(vgg.features)
vgg.load_state_dict(torch.load('attack/cifar10/models/ckpt/vgg19_bn/model_best.pth.tar', map_location=(device))['state_dict'])
vgg.to(device)
vgg.eval()
def get_pred(model, inputs):
return torch.argmax(model(inputs), dim=1).view(1,-1)
def normal(x):
ms = [(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)]
for i in range(x.shape[1]):
x[:,i,:,:] = (x[:,i,:,:] - ms[0][i]) / ms[1][i]
return x
vgg_fool = 0
WRN_fool = 0
ResNeXt_29_8_64d_fool = 0
DenseNet_BC_L190_k40_fool = 0
pyramidnet_fool = 0
gdas_fool = 0
advfile_ls = os.listdir(args.dir)
img_num = 0
for advfile_ind in range(len(advfile_ls)-1):
adv_batch = torch.from_numpy(np.load(args.dir + '/batch_{}.npy'.format(advfile_ind))).float() / 255
if advfile_ind == 0:
adv_batch_size = adv_batch.shape[0]
inputs_ori = adv_batch
img_num += inputs_ori.shape[0]
labels = target[advfile_ind*adv_batch_size : advfile_ind*adv_batch_size + adv_batch.shape[0]]
inputs = normal(inputs_ori.clone())
inputs, labels = inputs.to(device), labels.to(device)
with torch.no_grad():
WRN_pred = get_pred(WRN, inputs)
ResNeXt_29_8_64d_pred = get_pred(ResNeXt_29_8_64d, inputs)
DenseNet_BC_L190_k40_pred = get_pred(DenseNet_BC_L190_k40, inputs)
pyramidnet_pred = get_pred(pyramidnet, inputs)
gdas_pred = get_pred(gdas, inputs)
vgg_pred = get_pred(vgg, inputs)
WRN_fool += (labels != WRN_pred.squeeze(0)).sum().item()
ResNeXt_29_8_64d_fool += (labels != ResNeXt_29_8_64d_pred.squeeze(0)).sum().item()
DenseNet_BC_L190_k40_fool += (labels != DenseNet_BC_L190_k40_pred.squeeze(0)).sum().item()
pyramidnet_fool += (labels != pyramidnet_pred.squeeze(0)).sum().item()
gdas_fool += (labels != gdas_pred.squeeze(0)).sum().item()
vgg_fool += (labels != vgg_pred.squeeze(0)).sum().item()
def get_success_rate(fool_num, all_num):
return 1 - fool_num / all_num if 'target' in args.dir else fool_num / all_num
print('vgg19_bn', get_success_rate(vgg_fool, img_num))
print('WRN', get_success_rate(WRN_fool, img_num))
print('ResNeXt_29_8_64d', get_success_rate(ResNeXt_29_8_64d_fool, img_num))
print('DenseNet_BC_L190_k40', get_success_rate(DenseNet_BC_L190_k40_fool, img_num))
print('pyramidnet', get_success_rate(pyramidnet_fool, img_num))
print('gdas', get_success_rate(gdas_fool, img_num))
| 4,881 | 35.706767 | 153 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/utils.py
|
import os
import torch
import torchvision.transforms as T
from torch.utils.data import Dataset
import torch.nn as nn
import argparse
import models
import torch.nn.functional as F
from torch.backends import cudnn
import pickle
import numpy as np
import csv
import PIL.Image as Image
# Selected cifar-10. The .csv file format:
# class_index,data_index
# 3,0
# 8,1
# 8,2
# ...
class SelectedCifar10(Dataset):
def __init__(self, cifar10_dir, selected_images_csv, transform=None):
super(SelectedCifar10, self).__init__()
self.cifar10_dir = cifar10_dir
self.data = []
self.targets = []
file_path = os.path.join(cifar10_dir, 'test_batch')
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.transform = transform
self.selected_images_csv = selected_images_csv
self._load_csv()
def _load_csv(self):
reader = csv.reader(open(self.selected_images_csv, 'r'))
next(reader)
self.selected_list = list(reader)
def __getitem__(self, item):
t_class, t_ind = map(int, self.selected_list[item])
assert self.targets[t_ind] == t_class, 'Wrong targets in csv file.(line {})'.format(item+1)
img, target = self.data[int(self.selected_list[item][1])], self.targets[t_ind]
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.selected_list)
class Normalize(nn.Module):
def __init__(self,):
super(Normalize, self).__init__()
self.ms = [(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)]
def forward(self, input):
x = input.clone()
for i in range(x.shape[1]):
x[:,i] = (x[:,i] - self.ms[0][i]) / self.ms[1][i]
return x
# input diversity for mdi2fgsm
def input_diversity(img):
gg = torch.randint(0, 2, (1,)).item()
if gg == 0:
return img
else:
rnd = torch.randint(32,41, (1,)).item()
rescaled = F.interpolate(img, (rnd, rnd), mode = 'nearest')
h_rem = 40 - rnd
w_hem = 40 - rnd
pad_top = torch.randint(0, h_rem + 1, (1,)).item()
pad_bottom = h_rem - pad_top
pad_left = torch.randint(0, w_hem + 1, (1,)).item()
pad_right = w_hem - pad_left
padded = F.pad(rescaled, pad = (pad_left, pad_right, pad_top, pad_bottom))
padded = F.interpolate(padded, (32, 32), mode = 'nearest')
return padded
# vgg-19 forward
def vgg19_forw(model, x, linbp, linbp_layer):
x = model[0](x)
for ind, mm in enumerate(model[1].features.module):
if linbp and isinstance(mm, nn.ReLU) and ind >= linbp_layer:
x = linbp_relu(x)
else:
x = mm(x)
x = x.view(x.size(0), -1)
x = model[1].classifier(x)
return x
def linbp_relu(x):
x_p = F.relu(-x)
x = x + x_p.data
return x
def vgg19_ila_forw(model, x, ila_layer):
x = model[0](x)
for ind, mm in enumerate(model[1].features.module):
x = mm(x)
if ind == ila_layer:
return x
class ILAProjLoss(torch.nn.Module):
def __init__(self):
super(ILAProjLoss, self).__init__()
def forward(self, old_attack_mid, new_mid, original_mid, coeff):
n = old_attack_mid.shape[0]
x = (old_attack_mid - original_mid).view(n, -1)
y = (new_mid - original_mid).view(n, -1)
# x_norm = x / torch.norm(x, dim = 1, keepdim = True)
proj_loss = torch.sum(y * x) / n
return proj_loss
| 3,961 | 32.016667 | 99 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/attack_vgg19.py
|
import os
import torch
import torchvision.transforms as T
import torch.nn as nn
import argparse
import models
from torch.backends import cudnn
import numpy as np
from utils import Normalize, input_diversity, vgg19_forw, vgg19_ila_forw, ILAProjLoss, SelectedCifar10
parser = argparse.ArgumentParser()
parser.add_argument('--epsilon', type=float, default = 0.03)
parser.add_argument('--niters', type=int, default = 100)
parser.add_argument('--ila_niters', type=int, default = 100)
parser.add_argument('--method', type=str, default = 'linbp_ila_pgd')
parser.add_argument('--linbp_layer', type=int, default = 23)
parser.add_argument('--ila_layer', type=int, default = 23)
parser.add_argument('--save_dir', type=str, default = '')
parser.add_argument('--batch_size', type=int, default = 500)
parser.add_argument('--target_attack', default=False, action='store_true')
args = parser.parse_args()
if __name__ == '__main__':
print(args)
cudnn.benchmark = False
cudnn.deterministic = True
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
os.makedirs(args.save_dir, exist_ok=True)
epsilon = args.epsilon
batch_size = args.batch_size
method = args.method
ila_layer = args.ila_layer
linbp_layer = args.linbp_layer
save_dir = args.save_dir
niters = args.niters
ila_niters = args.ila_niters
target_attack = args.target_attack
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = models.__dict__['vgg19_bn'](num_classes=10)
model.features = nn.DataParallel(model.features)
model.load_state_dict(torch.load('attack/cifar10/models/ckpt/vgg19_bn/model_best.pth.tar', map_location=(device))['state_dict'])
model = nn.Sequential(
Normalize(),
model
)
model.to(device)
model.eval()
cifar10 = SelectedCifar10('data/cifar10/cifar-10-batches-py',
'data/cifar10/selected_cifar10.csv',
transform=T.ToTensor())
ori_loader = torch.utils.data.DataLoader(cifar10, batch_size=batch_size, shuffle=False, num_workers = 8)
if target_attack:
label_switch = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).long()
label_ls = []
for ind, (ori_img, label)in enumerate(ori_loader):
label_ls.append(label)
if target_attack:
label = label_switch[label]
ori_img = ori_img.to(device)
img = ori_img.clone()
m = 0
for i in range(niters):
# In our implementation of PGD, we incorporate randomness at each iteration to further enhance the transferability
elif 'pgd' in method:
img_x = img + img.new(img.size()).uniform_(-epsilon, epsilon)
else:
img_x = img
img_x.requires_grad_(True)
if 'linbp' in method:
output = vgg19_forw(model, input_diversity(img_x) if method == 'mdi2fgsm' or method == 'linbp_mdi2fgsm' else img_x, True, linbp_layer)
else:
output = vgg19_forw(model, input_diversity(img_x) if method == 'mdi2fgsm' or method == 'linbp_mdi2fgsm' else img_x, False, None)
loss = nn.CrossEntropyLoss()(output, label.to(device))
model.zero_grad()
loss.backward()
if 'mdi2fgsm' in method or 'mifgsm' in method:
g = img_x.grad.data
input_grad = 1 * m + g / torch.norm(g, dim=(1, 2, 3), p=1, keepdim=True)
m = input_grad
else:
input_grad = img_x.grad.data
if target_attack:
input_grad = -input_grad
if method == 'fgsm' or '_fgsm' in method:
img = img.data + 2 * epsilon * torch.sign(input_grad)
else:
img = img.data + 1./255 * torch.sign(input_grad)
img = torch.where(img > ori_img + epsilon, ori_img + epsilon, img)
img = torch.where(img < ori_img - epsilon, ori_img - epsilon, img)
img = torch.clamp(img, min=0, max=1)
if 'ila' in method:
m = 0
attack_img = img.clone()
img = ori_img.clone().to(device)
with torch.no_grad():
mid_output = vgg19_ila_forw(model, ori_img, ila_layer)
mid_original = torch.zeros(mid_output.size()).to(device)
mid_original.copy_(mid_output)
mid_output = vgg19_ila_forw(model, attack_img, ila_layer)
mid_attack_original = torch.zeros(mid_output.size()).to(device)
mid_attack_original.copy_(mid_output)
for _ in range(ila_niters):
img.requires_grad_(True)
mid_output = vgg19_ila_forw(model, img, ila_layer)
loss = ILAProjLoss()(
mid_attack_original.detach(), mid_output, mid_original.detach(), 1.0
)
model.zero_grad()
loss.backward()
input_grad = img.grad.data
if method == 'ila_fgsm':
img = img.data + 2 * epsilon * torch.sign(input_grad)
else:
img = img.data + 1./255 * torch.sign(input_grad)
img = torch.where(img > ori_img + epsilon, ori_img + epsilon, img)
img = torch.where(img < ori_img - epsilon, ori_img - epsilon, img)
img = torch.clamp(img, min=0, max=1)
np.save(save_dir + '/batch_{}.npy'.format(ind), torch.round(img.data*255).cpu().numpy())
print('batch_{}.npy saved'.format(ind))
label_ls = torch.cat(label_ls)
np.save(save_dir + '/labels.npy', label_ls.numpy())
print('all batches saved')
| 5,763 | 39.307692 | 150 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/vgg.py
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
import torch
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
| 3,731 | 27.707692 | 113 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/pyramidnet.py
|
import torch
import torch.nn as nn
import math
__all__ = ['pyramidnet272']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def calc_prob(curr_layer, total_layers, p_l):
"""Calculates drop prob depending on the current layer."""
return 1 - (float(curr_layer) / total_layers) * p_l
class Bottleneck(nn.Module):
outchannel_ratio = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, prob=1.):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
if stride == 1:
self.conv2 = nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,
padding=1, bias=False)
else:
self.conv2 = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)),
nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,
padding=0, bias=False))
self.bn3 = nn.BatchNorm2d((planes * 1))
self.conv3 = nn.Conv2d((planes * 1), planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False)
self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.prob = prob
self.padding = None
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn4(out)
# shake drop inference
# we may support shake drop training in a future version
assert not self.training
out = out * self.prob
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
if self.padding is None:
self.padding = torch.zeros(batch_size, residual_channel - shortcut_channel,
featuremap_size[0], featuremap_size[1])
self.padding = self.padding.to(x.device)
out += torch.cat((shortcut, self.padding), 1)
else:
out += shortcut
return out
class PyramidNet(nn.Module):
def __init__(self, depth, alpha, num_classes):
super(PyramidNet, self).__init__()
self.inplanes = 16
n = int((depth - 2) / 9)
block = Bottleneck
self.addrate = alpha / (3 * n * 1.0)
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.featuremap_dim = self.input_featuremap_dim
self.p_l = 0.5
self.layer_num = 1
self.total_layers = n * 3
self.layer1 = self.pyramidal_make_layer(block, n)
self.layer2 = self.pyramidal_make_layer(block, n, stride=2)
self.layer3 = self.pyramidal_make_layer(block, n, stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def pyramidal_make_layer(self, block, block_depth, stride=1):
downsample = None
if stride != 1: # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio:
downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)
layers = []
self.featuremap_dim = self.featuremap_dim + self.addrate
prob = calc_prob(self.layer_num, self.total_layers, self.p_l)
layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, prob))
self.layer_num += 1
for i in range(1, block_depth):
temp_featuremap_dim = self.featuremap_dim + self.addrate
prob = calc_prob(self.layer_num, self.total_layers, self.p_l)
layers.append(
block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1,
prob=prob))
self.layer_num += 1
self.featuremap_dim = temp_featuremap_dim
self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def pyramidnet272(**kwargs):
return PyramidNet(depth=272, alpha=200, **kwargs)
| 5,819 | 34.487805 | 115 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/densenet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['densenet']
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class BasicBlock(nn.Module):
def __init__(self, inplanes, expansion=1, growthRate=12, dropRate=0):
super(BasicBlock, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, inplanes, outplanes):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1,
bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, depth=22, block=Bottleneck,
dropRate=0, num_classes=10, growthRate=12, compressionRate=2):
super(DenseNet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6
self.growthRate = growthRate
self.dropRate = dropRate
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
self.bn = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(self.inplanes, growthRate=self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def densenet(**kwargs):
"""
Constructs a ResNet model.
"""
return DenseNet(**kwargs)
| 4,724 | 30.711409 | 99 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/resnext.py
|
from __future__ import division
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = x.view(-1, 1024)
return self.classifier(x)
def resnext(**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model
| 5,072 | 43.113043 | 144 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/__init__.py
|
from __future__ import absolute_import
from .vgg import *
from .resnext import *
from .wrn import *
from .densenet import *
from .pyramidnet import *
from .gdas import *
| 172 | 16.3 | 38 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/wrn.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
| 3,896 | 40.457447 | 116 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/__init__.py
|
import os
import os.path as osp
import torch
from models.gdas.lib.scheduler import load_config
from models.gdas.lib.scheduler import load_config
from models.gdas.lib.nas import model_types
from models.gdas.lib.nas import NetworkCIFAR as Network
__all__ = ['gdas']
def gdas(checkpoint_fname):
checkpoint = torch.load(checkpoint_fname, map_location='cpu')
xargs = checkpoint['args']
config = load_config(os.path.join(osp.dirname(__file__), xargs.model_config))
genotype = model_types[xargs.arch]
class_num = 10
model = Network(xargs.init_channels, class_num, xargs.layers, config.auxiliary, genotype)
model.load_state_dict(checkpoint['state_dict'])
return model
| 697 | 29.347826 | 93 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/__init__.py
| 0 | 0 | 0 |
py
|
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/scheduler/utils.py
|
import os, sys, json
from pathlib import Path
from collections import namedtuple
support_types = ('str', 'int', 'bool', 'float')
def convert_param(original_lists):
assert isinstance(original_lists, list), 'The type is not right : {:}'.format(original_lists)
ctype, value = original_lists[0], original_lists[1]
assert ctype in support_types, 'Ctype={:}, support={:}'.format(ctype, support_types)
is_list = isinstance(value, list)
if not is_list: value = [value]
outs = []
for x in value:
if ctype == 'int':
x = int(x)
elif ctype == 'str':
x = str(x)
elif ctype == 'bool':
x = bool(int(x))
elif ctype == 'float':
x = float(x)
else:
raise TypeError('Does not know this type : {:}'.format(ctype))
outs.append(x)
if not is_list: outs = outs[0]
return outs
def load_config(path):
path = str(path)
assert os.path.exists(path), 'Can not find {:}'.format(path)
# Reading data back
with open(path, 'r') as f:
data = json.load(f)
f.close()
content = { k: convert_param(v) for k,v in data.items()}
Arguments = namedtuple('Configure', ' '.join(content.keys()))
content = Arguments(**content)
return content
| 1,192 | 28.097561 | 95 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/scheduler/scheduler.py
|
import torch
from bisect import bisect_right
class MultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gammas, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {:}', milestones)
assert len(milestones) == len(gammas), '{:} vs {:}'.format(milestones, gammas)
self.milestones = milestones
self.gammas = gammas
super(MultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
LR = 1
for x in self.gammas[:bisect_right(self.milestones, self.last_epoch)]: LR = LR * x
return [base_lr * LR for base_lr in self.base_lrs]
def obtain_scheduler(config, optimizer):
if config.type == 'multistep':
scheduler = MultiStepLR(optimizer, milestones=config.milestones, gammas=config.gammas)
elif config.type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs)
else:
raise ValueError('Unknown learning rate scheduler type : {:}'.format(config.type))
return scheduler
| 1,124 | 35.290323 | 90 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/scheduler/__init__.py
|
from .utils import load_config
from .scheduler import MultiStepLR, obtain_scheduler
| 85 | 20.5 | 52 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/ImageNet.py
|
import torch
import torch.nn as nn
from .construct_utils import Cell, Transition
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
# NOTE: This batchnorm was omitted in my earlier implementation due to a typo.
# Commenting it out for consistency with the experiments in the paper.
# nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self.stem0 = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
if reduction and genotype.reduce is None:
cell = Transition(C_prev_prev, C_prev, C_curr, reduction_prev)
else:
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
else:
self.auxiliary_head = None
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
self.drop_path_prob = -1
def update_drop_path(self, drop_path_prob):
self.drop_path_prob = drop_path_prob
def get_drop_path(self):
return self.drop_path_prob
def auxiliary_param(self):
if self.auxiliary_head is None: return []
else: return list( self.auxiliary_head.parameters() )
def forward(self, input):
s0 = self.stem0(input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
#print ('{:} : {:} - {:}'.format(i, s0.size(), s1.size()))
if i == 2 * self._layers // 3:
if self.auxiliary_head and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
if self.auxiliary_head and self.training:
return logits, logits_aux
else:
return logits
| 3,272 | 30.171429 | 85 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/CifarNet.py
|
import torch
import torch.nn as nn
from .construct_utils import Cell, Transition
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
if reduction and genotype.reduce is None:
cell = Transition(C_prev_prev, C_prev, C_curr, reduction_prev)
else:
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells.append( cell )
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
else:
self.auxiliary_head = None
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.drop_path_prob = -1
def update_drop_path(self, drop_path_prob):
self.drop_path_prob = drop_path_prob
def auxiliary_param(self):
if self.auxiliary_head is None: return []
else: return list( self.auxiliary_head.parameters() )
def forward(self, inputs):
s0 = s1 = self.stem(inputs)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2*self._layers//3:
if self.auxiliary_head and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
if self.auxiliary_head and self.training:
return logits, logits_aux
else:
return logits
| 2,755 | 29.622222 | 89 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/model_search.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from .head_utils import CifarHEAD, ImageNetHEAD
from .operations import OPS, FactorizedReduce, ReLUConvBN
from .genotypes import PRIMITIVES, Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
clist = []
for j, h in enumerate(states):
x = self._ops[offset+j](h, weights[offset+j])
clist.append( x )
s = sum(clist)
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, steps=4, multiplier=4, stem_multiplier=3, head='cifar'):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
if head == 'cifar':
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
elif head == 'imagenet':
self.stem = ImageNetHEAD(C_curr, stride=1)
else:
raise ValueError('Invalid head : {:}'.format(head))
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
reduction_prev, cells = False, []
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
cells.append( cell )
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.cells = nn.ModuleList(cells)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
# initialize architecture parameters
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Parameter(torch.Tensor(k, num_ops))
self.alphas_reduce = Parameter(torch.Tensor(k, num_ops))
nn.init.normal_(self.alphas_normal, 0, 0.001)
nn.init.normal_(self.alphas_reduce, 0, 0.001)
def set_tau(self, tau):
return -1
def get_tau(self):
return -1
def arch_parameters(self):
return [self.alphas_normal, self.alphas_reduce]
def base_parameters(self):
lists = list(self.stem.parameters()) + list(self.cells.parameters())
lists += list(self.global_pooling.parameters())
lists += list(self.classifier.parameters())
return lists
def forward(self, inputs):
batch, C, H, W = inputs.size()
s0 = s1 = self.stem(inputs)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
out = out.view(batch, -1)
logits = self.classifier(out)
return logits
def genotype(self):
def _parse(weights):
gene, n, start = [], 2, 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j, float(W[j][k_best])))
start = end
n += 1
return gene
with torch.no_grad():
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| 5,177 | 30.005988 | 128 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/head_utils.py
|
import torch
import torch.nn as nn
class ImageNetHEAD(nn.Sequential):
def __init__(self, C, stride=2):
super(ImageNetHEAD, self).__init__()
self.add_module('conv1', nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False))
self.add_module('bn1' , nn.BatchNorm2d(C // 2))
self.add_module('relu1', nn.ReLU(inplace=True))
self.add_module('conv2', nn.Conv2d(C // 2, C, kernel_size=3, stride=stride, padding=1, bias=False))
self.add_module('bn2' , nn.BatchNorm2d(C))
class CifarHEAD(nn.Sequential):
def __init__(self, C):
super(CifarHEAD, self).__init__()
self.add_module('conv', nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False))
self.add_module('bn', nn.BatchNorm2d(C))
| 729 | 35.5 | 103 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/construct_utils.py
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from .operations import OPS, FactorizedReduce, ReLUConvBN, Identity
def random_select(length, ratio):
clist = []
index = random.randint(0, length-1)
for i in range(length):
if i == index or random.random() < ratio:
clist.append( 1 )
else:
clist.append( 0 )
return clist
def all_select(length):
return [1 for i in range(length)]
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1. - drop_prob
mask = x.new_zeros(x.size(0), 1, 1, 1)
mask = mask.bernoulli_(keep_prob)
x.div_(keep_prob)
x.mul_(mask)
return x
def return_alphas_str(basemodel):
string = 'normal : {:}'.format( F.softmax(basemodel.alphas_normal, dim=-1) )
if hasattr(basemodel, 'alphas_reduce'):
string = string + '\nreduce : {:}'.format( F.softmax(basemodel.alphas_reduce, dim=-1) )
return string
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
# print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices, values = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices, values = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, values, concat, reduction)
def _compile(self, C, op_names, indices, values, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops.append( op )
self._indices = indices
self._values = values
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class Transition(nn.Module):
def __init__(self, C_prev_prev, C_prev, C, reduction_prev, multiplier=4):
super(Transition, self).__init__()
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
self.multiplier = multiplier
self.reduction = True
self.ops1 = nn.ModuleList(
[nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1, 3), stride=(1, 2), padding=(0, 1), groups=8, bias=False),
nn.Conv2d(C, C, (3, 1), stride=(2, 1), padding=(1, 0), groups=8, bias=False),
nn.BatchNorm2d(C, affine=True),
nn.ReLU(inplace=False),
nn.Conv2d(C, C, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C, affine=True)),
nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1, 3), stride=(1, 2), padding=(0, 1), groups=8, bias=False),
nn.Conv2d(C, C, (3, 1), stride=(2, 1), padding=(1, 0), groups=8, bias=False),
nn.BatchNorm2d(C, affine=True),
nn.ReLU(inplace=False),
nn.Conv2d(C, C, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C, affine=True))])
self.ops2 = nn.ModuleList(
[nn.Sequential(
nn.MaxPool2d(3, stride=2, padding=1),
nn.BatchNorm2d(C, affine=True)),
nn.Sequential(
nn.MaxPool2d(3, stride=2, padding=1),
nn.BatchNorm2d(C, affine=True))])
def forward(self, s0, s1, drop_prob = -1):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
X0 = self.ops1[0] (s0)
X1 = self.ops1[1] (s1)
if self.training and drop_prob > 0.:
X0, X1 = drop_path(X0, drop_prob), drop_path(X1, drop_prob)
#X2 = self.ops2[0] (X0+X1)
X2 = self.ops2[0] (s0)
X3 = self.ops2[1] (s1)
if self.training and drop_prob > 0.:
X2, X3 = drop_path(X2, drop_prob), drop_path(X3, drop_prob)
return torch.cat([X0, X1, X2, X3], dim=1)
| 5,003 | 31.705882 | 99 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/__init__.py
|
from .model_search import Network
from .CifarNet import NetworkCIFAR
from .ImageNet import NetworkImageNet
# genotypes
from .genotypes import model_types
from .construct_utils import return_alphas_str
| 227 | 21.8 | 46 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/SE_Module.py
|
import torch
import torch.nn as nn
# Squeeze and Excitation module
class SqEx(nn.Module):
def __init__(self, n_features, reduction=16):
super(SqEx, self).__init__()
if n_features % reduction != 0:
raise ValueError('n_features must be divisible by reduction (default = 16)')
self.linear1 = nn.Linear(n_features, n_features // reduction, bias=True)
self.nonlin1 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(n_features // reduction, n_features, bias=True)
self.nonlin2 = nn.Sigmoid()
def forward(self, x):
y = F.avg_pool2d(x, kernel_size=x.size()[2:4])
y = y.permute(0, 2, 3, 1)
y = self.nonlin1(self.linear1(y))
y = self.nonlin2(self.linear2(y))
y = y.permute(0, 3, 1, 2)
y = x * y
return y
| 762 | 26.25 | 82 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/genotypes.py
|
from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal = [
('sep_conv_5x5', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('sep_conv_5x5', 0, 1.0),
('sep_conv_3x3', 0, 1.0),
('avg_pool_3x3', 1, 1.0),
('skip_connect', 0, 1.0),
('avg_pool_3x3', 0, 1.0),
('avg_pool_3x3', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('skip_connect', 1, 1.0),
],
normal_concat = [2, 3, 4, 5, 6],
reduce = [
('sep_conv_5x5', 1, 1.0),
('sep_conv_7x7', 0, 1.0),
('max_pool_3x3', 1, 1.0),
('sep_conv_7x7', 0, 1.0),
('avg_pool_3x3', 1, 1.0),
('sep_conv_5x5', 0, 1.0),
('skip_connect', 3, 1.0),
('avg_pool_3x3', 2, 1.0),
('sep_conv_3x3', 2, 1.0),
('max_pool_3x3', 1, 1.0),
],
reduce_concat = [4, 5, 6],
)
AmoebaNet = Genotype(
normal = [
('avg_pool_3x3', 0, 1.0),
('max_pool_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('sep_conv_5x5', 2, 1.0),
('sep_conv_3x3', 0, 1.0),
('avg_pool_3x3', 3, 1.0),
('sep_conv_3x3', 1, 1.0),
('skip_connect', 1, 1.0),
('skip_connect', 0, 1.0),
('avg_pool_3x3', 1, 1.0),
],
normal_concat = [4, 5, 6],
reduce = [
('avg_pool_3x3', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('max_pool_3x3', 0, 1.0),
('sep_conv_7x7', 2, 1.0),
('sep_conv_7x7', 0, 1.0),
('avg_pool_3x3', 1, 1.0),
('max_pool_3x3', 0, 1.0),
('max_pool_3x3', 1, 1.0),
('conv_7x1_1x7', 0, 1.0),
('sep_conv_3x3', 5, 1.0),
],
reduce_concat = [3, 4, 6]
)
DARTS_V1 = Genotype(
normal=[
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('skip_connect', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('skip_connect', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('skip_connect', 2, 1.0)],
normal_concat=[2, 3, 4, 5],
reduce=[
('max_pool_3x3', 0, 1.0),
('max_pool_3x3', 1, 1.0),
('skip_connect', 2, 1.0),
('max_pool_3x3', 0, 1.0),
('max_pool_3x3', 0, 1.0),
('skip_connect', 2, 1.0),
('skip_connect', 2, 1.0),
('avg_pool_3x3', 0, 1.0)],
reduce_concat=[2, 3, 4, 5]
)
DARTS_V2 = Genotype(
normal=[
('sep_conv_3x3', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 1, 1.0),
('skip_connect', 0, 1.0),
('skip_connect', 0, 1.0),
('dil_conv_3x3', 2, 1.0)],
normal_concat=[2, 3, 4, 5],
reduce=[
('max_pool_3x3', 0, 1.0),
('max_pool_3x3', 1, 1.0),
('skip_connect', 2, 1.0),
('max_pool_3x3', 1, 1.0),
('max_pool_3x3', 0, 1.0),
('skip_connect', 2, 1.0),
('skip_connect', 2, 1.0),
('max_pool_3x3', 1, 1.0)],
reduce_concat=[2, 3, 4, 5]
)
PNASNet = Genotype(
normal = [
('sep_conv_5x5', 0, 1.0),
('max_pool_3x3', 0, 1.0),
('sep_conv_7x7', 1, 1.0),
('max_pool_3x3', 1, 1.0),
('sep_conv_5x5', 1, 1.0),
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 4, 1.0),
('max_pool_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('skip_connect', 1, 1.0),
],
normal_concat = [2, 3, 4, 5, 6],
reduce = [
('sep_conv_5x5', 0, 1.0),
('max_pool_3x3', 0, 1.0),
('sep_conv_7x7', 1, 1.0),
('max_pool_3x3', 1, 1.0),
('sep_conv_5x5', 1, 1.0),
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 4, 1.0),
('max_pool_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('skip_connect', 1, 1.0),
],
reduce_concat = [2, 3, 4, 5, 6],
)
# https://arxiv.org/pdf/1802.03268.pdf
ENASNet = Genotype(
normal = [
('sep_conv_3x3', 1, 1.0),
('skip_connect', 1, 1.0),
('sep_conv_5x5', 1, 1.0),
('skip_connect', 0, 1.0),
('avg_pool_3x3', 0, 1.0),
('sep_conv_3x3', 1, 1.0),
('sep_conv_3x3', 0, 1.0),
('avg_pool_3x3', 1, 1.0),
('sep_conv_5x5', 1, 1.0),
('avg_pool_3x3', 0, 1.0),
],
normal_concat = [2, 3, 4, 5, 6],
reduce = [
('sep_conv_5x5', 0, 1.0),
('sep_conv_3x3', 1, 1.0), # 2
('sep_conv_3x3', 1, 1.0),
('avg_pool_3x3', 1, 1.0), # 3
('sep_conv_3x3', 1, 1.0),
('avg_pool_3x3', 1, 1.0), # 4
('avg_pool_3x3', 1, 1.0),
('sep_conv_5x5', 4, 1.0), # 5
('sep_conv_3x3', 5, 1.0),
('sep_conv_5x5', 0, 1.0),
],
reduce_concat = [2, 3, 4, 5, 6],
)
DARTS = DARTS_V2
# Search by normal and reduce
GDAS_V1 = Genotype(
normal=[('skip_connect', 0, 0.13017432391643524), ('skip_connect', 1, 0.12947972118854523), ('skip_connect', 0, 0.13062666356563568), ('sep_conv_5x5', 2, 0.12980839610099792), ('sep_conv_3x3', 3, 0.12923765182495117), ('skip_connect', 0, 0.12901571393013), ('sep_conv_5x5', 4, 0.12938997149467468), ('sep_conv_3x3', 3, 0.1289220005273819)],
normal_concat=range(2, 6),
reduce=[('sep_conv_5x5', 0, 0.12862831354141235), ('sep_conv_3x3', 1, 0.12783904373645782), ('sep_conv_5x5', 2, 0.12725995481014252), ('sep_conv_5x5', 1, 0.12705285847187042), ('dil_conv_5x5', 2, 0.12797553837299347), ('sep_conv_3x3', 1, 0.12737272679805756), ('sep_conv_5x5', 0, 0.12833961844444275), ('sep_conv_5x5', 1, 0.12758426368236542)],
reduce_concat=range(2, 6)
)
# Search by normal and fixing reduction
GDAS_F1 = Genotype(
normal=[('skip_connect', 0, 0.16), ('skip_connect', 1, 0.13), ('skip_connect', 0, 0.17), ('sep_conv_3x3', 2, 0.15), ('skip_connect', 0, 0.17), ('sep_conv_3x3', 2, 0.15), ('skip_connect', 0, 0.16), ('sep_conv_3x3', 2, 0.15)],
normal_concat=[2, 3, 4, 5],
reduce=None,
reduce_concat=[2, 3, 4, 5],
)
# Combine DMS_V1 and DMS_F1
GDAS_GF = Genotype(
normal=[('skip_connect', 0, 0.13017432391643524), ('skip_connect', 1, 0.12947972118854523), ('skip_connect', 0, 0.13062666356563568), ('sep_conv_5x5', 2, 0.12980839610099792), ('sep_conv_3x3', 3, 0.12923765182495117), ('skip_connect', 0, 0.12901571393013), ('sep_conv_5x5', 4, 0.12938997149467468), ('sep_conv_3x3', 3, 0.1289220005273819)],
normal_concat=range(2, 6),
reduce=None,
reduce_concat=range(2, 6)
)
GDAS_FG = Genotype(
normal=[('skip_connect', 0, 0.16), ('skip_connect', 1, 0.13), ('skip_connect', 0, 0.17), ('sep_conv_3x3', 2, 0.15), ('skip_connect', 0, 0.17), ('sep_conv_3x3', 2, 0.15), ('skip_connect', 0, 0.16), ('sep_conv_3x3', 2, 0.15)],
normal_concat=range(2, 6),
reduce=[('sep_conv_5x5', 0, 0.12862831354141235), ('sep_conv_3x3', 1, 0.12783904373645782), ('sep_conv_5x5', 2, 0.12725995481014252), ('sep_conv_5x5', 1, 0.12705285847187042), ('dil_conv_5x5', 2, 0.12797553837299347), ('sep_conv_3x3', 1, 0.12737272679805756), ('sep_conv_5x5', 0, 0.12833961844444275), ('sep_conv_5x5', 1, 0.12758426368236542)],
reduce_concat=range(2, 6)
)
model_types = {'DARTS_V1': DARTS_V1,
'DARTS_V2': DARTS_V2,
'NASNet' : NASNet,
'PNASNet' : PNASNet,
'AmoebaNet': AmoebaNet,
'ENASNet' : ENASNet,
'GDAS_V1' : GDAS_V1,
'GDAS_F1' : GDAS_F1,
'GDAS_GF' : GDAS_GF,
'GDAS_FG' : GDAS_FG}
| 7,085 | 31.063348 | 346 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas/operations.py
|
import torch
import torch.nn as nn
OPS = {
'none' : lambda C, stride, affine: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine: nn.Sequential(
nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
nn.BatchNorm2d(C, affine=False) ),
'max_pool_3x3' : lambda C, stride, affine: nn.Sequential(
nn.MaxPool2d(3, stride=stride, padding=1),
nn.BatchNorm2d(C, affine=False) ),
'skip_connect' : lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3' : lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5' : lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7' : lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3' : lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5' : lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7' : lambda C, stride, affine: Conv717(C, C, stride, affine),
}
class Conv717(nn.Module):
def __init__(self, C_in, C_out, stride, affine):
super(Conv717, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in , C_out, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C_out, C_out, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.conv_1(x), self.conv_2(y[:,:,1:,1:])], dim=1)
out = self.bn(out)
return out
| 4,318 | 34.113821 | 129 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/test_dataset.py
|
import os, sys, torch
import torchvision.transforms as transforms
from .TieredImageNet import TieredImageNet
from .MetaBatchSampler import MetaBatchSampler
root_dir = os.environ['TORCH_HOME'] + '/tiered-imagenet'
print ('root : {:}'.format(root_dir))
means, stds = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(84, padding=8), transforms.ToTensor(), transforms.Normalize(means, stds)]
transform = transforms.Compose(lists)
dataset = TieredImageNet(root_dir, 'val-test', transform)
image, label = dataset[111]
print ('image shape = {:}, label = {:}'.format(image.size(), label))
print ('image : min = {:}, max = {:} ||| label : {:}'.format(image.min(), image.max(), label))
sampler = MetaBatchSampler(dataset.labels, 250, 100, 10)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=sampler)
print ('the length of dataset : {:}'.format( len(dataset) ))
print ('the length of loader : {:}'.format( len(dataloader) ))
for images, labels in dataloader:
print ('images : {:}'.format( images.size() ))
print ('labels : {:}'.format( labels.size() ))
for i in range(3):
print ('image-value-[{:}] : {:} ~ {:}, mean={:}, std={:}'.format(i, images[:,i].min(), images[:,i].max(), images[:,i].mean(), images[:,i].std()))
print('-----')
| 1,324 | 37.970588 | 149 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/test_NLP.py
|
import os, sys, torch
from .LanguageDataset import SentCorpus, BatchSentLoader
if __name__ == '__main__':
path = '../../data/data/penn'
corpus = SentCorpus( path )
loader = BatchSentLoader(corpus.test, 10)
for i, d in enumerate(loader):
print('{:} :: {:}'.format(i, d.size()))
| 291 | 25.545455 | 56 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/get_dataset_with_transform.py
|
import os, sys, torch
import os.path as osp
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from ..utils import Cutout
from .TieredImageNet import TieredImageNet
Dataset2Class = {'cifar10' : 10,
'cifar100': 100,
'tiered' : -1,
'imagenet-1k' : 1000,
'imagenet-100': 100}
def get_datasets(name, root, cutout):
# Mean + Std
if name == 'cifar10':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
elif name == 'cifar100':
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
elif name == 'tiered':
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
elif name == 'imagenet-1k' or name == 'imagenet-100':
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
else: raise TypeError("Unknow dataset : {:}".format(name))
# Data Argumentation
if name == 'cifar10' or name == 'cifar100':
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)]
if cutout > 0 : lists += [Cutout(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
elif name == 'tiered':
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(80, padding=4), transforms.ToTensor(), transforms.Normalize(mean, std)]
if cutout > 0 : lists += [Cutout(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose([transforms.CenterCrop(80), transforms.ToTensor(), transforms.Normalize(mean, std)])
elif name == 'imagenet-1k' or name == 'imagenet-100':
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
else: raise TypeError("Unknow dataset : {:}".format(name))
if name == 'cifar10':
train_data = dset.CIFAR10 (root, train=True , transform=train_transform, download=True)
test_data = dset.CIFAR10 (root, train=False, transform=test_transform , download=True)
elif name == 'cifar100':
train_data = dset.CIFAR100(root, train=True , transform=train_transform, download=True)
test_data = dset.CIFAR100(root, train=False, transform=test_transform , download=True)
elif name == 'imagenet-1k' or name == 'imagenet-100':
train_data = dset.ImageFolder(osp.join(root, 'train'), train_transform)
test_data = dset.ImageFolder(osp.join(root, 'val'), test_transform)
else: raise TypeError("Unknow dataset : {:}".format(name))
class_num = Dataset2Class[name]
return train_data, test_data, class_num
| 3,189 | 40.973684 | 141 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/TieredImageNet.py
|
from __future__ import print_function
import numpy as np
from PIL import Image
import pickle as pkl
import os, cv2, csv, glob
import torch
import torch.utils.data as data
class TieredImageNet(data.Dataset):
def __init__(self, root_dir, split, transform=None):
self.split = split
self.root_dir = root_dir
self.transform = transform
splits = split.split('-')
images, labels, last = [], [], 0
for split in splits:
labels_name = '{:}/{:}_labels.pkl'.format(self.root_dir, split)
images_name = '{:}/{:}_images.npz'.format(self.root_dir, split)
# decompress images if npz not exits
if not os.path.exists(images_name):
png_pkl = images_name[:-4] + '_png.pkl'
if os.path.exists(png_pkl):
decompress(images_name, png_pkl)
else:
raise ValueError('png_pkl {:} not exits'.format( png_pkl ))
assert os.path.exists(images_name) and os.path.exists(labels_name), '{:} & {:}'.format(images_name, labels_name)
print ("Prepare {:} done".format(images_name))
try:
with open(labels_name) as f:
data = pkl.load(f)
label_specific = data["label_specific"]
except:
with open(labels_name, 'rb') as f:
data = pkl.load(f, encoding='bytes')
label_specific = data[b'label_specific']
with np.load(images_name, mmap_mode="r", encoding='latin1') as data:
image_data = data["images"]
images.append( image_data )
label_specific = label_specific + last
labels.append( label_specific )
last = np.max(label_specific) + 1
print ("Load {:} done, with image shape = {:}, label shape = {:}, [{:} ~ {:}]".format(images_name, image_data.shape, label_specific.shape, np.min(label_specific), np.max(label_specific)))
images, labels = np.concatenate(images), np.concatenate(labels)
self.images = images
self.labels = labels
self.n_classes = int( np.max(labels) + 1 )
self.dict_index_label = {}
for cls in range(self.n_classes):
idxs = np.where(labels==cls)[0]
self.dict_index_label[cls] = idxs
self.length = len(labels)
print ("There are {:} images, {:} labels [{:} ~ {:}]".format(images.shape, labels.shape, np.min(labels), np.max(labels)))
def __repr__(self):
return ('{name}(length={length}, classes={n_classes})'.format(name=self.__class__.__name__, **self.__dict__))
def __len__(self):
return self.length
def __getitem__(self, index):
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
image = self.images[index].copy()
label = int(self.labels[index])
image = Image.fromarray(image[:,:,::-1].astype('uint8'), 'RGB')
if self.transform is not None:
image = self.transform( image )
return image, label
def decompress(path, output):
with open(output, 'rb') as f:
array = pkl.load(f, encoding='bytes')
images = np.zeros([len(array), 84, 84, 3], dtype=np.uint8)
for ii, item in enumerate(array):
im = cv2.imdecode(item, 1)
images[ii] = im
np.savez(path, images=images)
| 3,090 | 35.364706 | 193 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/LanguageDataset.py
|
import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class SentCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
sents = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if not line:
continue
words = line.split() + ['<eos>']
sent = torch.LongTensor(len(words))
for i, word in enumerate(words):
sent[i] = self.dictionary.word2idx[word]
sents.append(sent)
return sents
class BatchSentLoader(object):
def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):
self.sents = sents
self.batch_size = batch_size
self.sort_sents = sorted(sents, key=lambda x: x.size(0))
self.cuda = cuda
self.volatile = volatile
self.pad_id = pad_id
def __next__(self):
if self.idx >= len(self.sort_sents):
raise StopIteration
batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)
batch = self.sort_sents[self.idx:self.idx+batch_size]
max_len = max([s.size(0) for s in batch])
tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id)
for i in range(len(batch)):
s = batch[i]
tensor[:s.size(0),i].copy_(s)
if self.cuda:
tensor = tensor.cuda()
self.idx += batch_size
return tensor
next = __next__
def __iter__(self):
self.idx = 0
return self
| 3,362 | 26.341463 | 78 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/__init__.py
|
from .MetaBatchSampler import MetaBatchSampler
from .TieredImageNet import TieredImageNet
from .LanguageDataset import Corpus
from .get_dataset_with_transform import get_datasets
| 179 | 35 | 52 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/MetaBatchSampler.py
|
# coding=utf-8
import numpy as np
import torch
class MetaBatchSampler(object):
def __init__(self, labels, classes_per_it, num_samples, iterations):
'''
Initialize MetaBatchSampler
Args:
- labels: an iterable containing all the labels for the current dataset
samples indexes will be infered from this iterable.
- classes_per_it: number of random classes for each iteration
- num_samples: number of samples for each iteration for each class (support + query)
- iterations: number of iterations (episodes) per epoch
'''
super(MetaBatchSampler, self).__init__()
self.labels = labels.copy()
self.classes_per_it = classes_per_it
self.sample_per_class = num_samples
self.iterations = iterations
self.classes, self.counts = np.unique(self.labels, return_counts=True)
assert len(self.classes) == np.max(self.classes) + 1 and np.min(self.classes) == 0
assert classes_per_it < len(self.classes), '{:} vs. {:}'.format(classes_per_it, len(self.classes))
self.classes = torch.LongTensor(self.classes)
# create a matrix, indexes, of dim: classes X max(elements per class)
# fill it with nans
# for every class c, fill the relative row with the indices samples belonging to c
# in numel_per_class we store the number of samples for each class/row
self.indexes = { x.item() : [] for x in self.classes }
indexes = { x.item() : [] for x in self.classes }
for idx, label in enumerate(self.labels):
indexes[ label.item() ].append( idx )
for key, value in indexes.items():
self.indexes[ key ] = torch.LongTensor( value )
def __iter__(self):
# yield a batch of indexes
spc = self.sample_per_class
cpi = self.classes_per_it
for it in range(self.iterations):
batch_size = spc * cpi
batch = torch.LongTensor(batch_size)
assert cpi < len(self.classes), '{:} vs. {:}'.format(cpi, len(self.classes))
c_idxs = torch.randperm(len(self.classes))[:cpi]
for i, cls in enumerate(self.classes[c_idxs]):
s = slice(i * spc, (i + 1) * spc)
num = self.indexes[ cls.item() ].nelement()
assert spc < num, '{:} vs. {:}'.format(spc, num)
sample_idxs = torch.randperm( num )[:spc]
batch[s] = self.indexes[ cls.item() ][sample_idxs]
batch = batch[torch.randperm(len(batch))]
yield batch
def __len__(self):
# returns the number of iterations (episodes) per epoch
return self.iterations
| 2,497 | 36.848485 | 102 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/utils.py
|
import torch
import torch.nn as nn
import os, shutil
import numpy as np
def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, use_cuda):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
if use_cuda: return data.cuda()
else : return data
def get_batch(source, i, seq_len):
seq_len = min(seq_len, len(source) - 1 - i)
data = source[i:i+seq_len].clone()
target = source[i+1:i+1+seq_len].clone()
return data, target
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask.requires_grad_(True)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = torch.nn.functional.embedding(
words, masked_embed_weight,
padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse)
return X
class LockedDropout(nn.Module):
def __init__(self):
super(LockedDropout, self).__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = m.div_(1 - dropout).detach()
mask = mask.expand_as(x)
return mask * x
def mask2d(B, D, keep_prob, cuda=True):
m = torch.floor(torch.rand(B, D) + keep_prob) / keep_prob
if cuda: return m.cuda()
else : return m
| 1,812 | 26.059701 | 133 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/basemodel.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .genotypes import STEPS
from .utils import mask2d, LockedDropout, embedded_dropout
INITRANGE = 0.04
def none_func(x):
return x * 0
class DARTSCell(nn.Module):
def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):
super(DARTSCell, self).__init__()
self.nhid = nhid
self.dropouth = dropouth
self.dropoutx = dropoutx
self.genotype = genotype
# genotype is None when doing arch search
steps = len(self.genotype.recurrent) if self.genotype is not None else STEPS
self._W0 = nn.Parameter(torch.Tensor(ninp+nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE))
self._Ws = nn.ParameterList([
nn.Parameter(torch.Tensor(nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE)) for i in range(steps)
])
def forward(self, inputs, hidden, arch_probs):
T, B = inputs.size(0), inputs.size(1)
if self.training:
x_mask = mask2d(B, inputs.size(2), keep_prob=1.-self.dropoutx)
h_mask = mask2d(B, hidden.size(2), keep_prob=1.-self.dropouth)
else:
x_mask = h_mask = None
hidden = hidden[0]
hiddens = []
for t in range(T):
hidden = self.cell(inputs[t], hidden, x_mask, h_mask, arch_probs)
hiddens.append(hidden)
hiddens = torch.stack(hiddens)
return hiddens, hiddens[-1].unsqueeze(0)
def _compute_init_state(self, x, h_prev, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h_prev], dim=-1)
c0, h0 = torch.split(xh_prev.mm(self._W0), self.nhid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h_prev + c0 * (h0-h_prev)
return s0
def _get_activation(self, name):
if name == 'tanh':
f = torch.tanh
elif name == 'relu':
f = torch.relu
elif name == 'sigmoid':
f = torch.sigmoid
elif name == 'identity':
f = lambda x: x
elif name == 'none':
f = none_func
else:
raise NotImplementedError
return f
def cell(self, x, h_prev, x_mask, h_mask, _):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
states = [s0]
for i, (name, pred) in enumerate(self.genotype.recurrent):
s_prev = states[pred]
if self.training:
ch = (s_prev * h_mask).mm(self._Ws[i])
else:
ch = s_prev.mm(self._Ws[i])
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
fn = self._get_activation(name)
h = fn(h)
s = s_prev + c * (h-s_prev)
states += [s]
output = torch.mean(torch.stack([states[i] for i in self.genotype.concat], -1), -1)
return output
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nhidlast,
dropout=0.5, dropouth=0.5, dropoutx=0.5, dropouti=0.5, dropoute=0.1,
cell_cls=None, genotype=None):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
assert ninp == nhid == nhidlast
if cell_cls == DARTSCell:
assert genotype is not None
rnns = [cell_cls(ninp, nhid, dropouth, dropoutx, genotype)]
else:
assert genotype is None
rnns = [cell_cls(ninp, nhid, dropouth, dropoutx)]
self.rnns = torch.nn.ModuleList(rnns)
self.decoder = nn.Linear(ninp, ntoken)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.arch_weights = None
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.dropout = dropout
self.dropouti = dropouti
self.dropoute = dropoute
self.ntoken = ntoken
self.cell_cls = cell_cls
# acceleration
self.tau = None
self.use_gumbel = False
def set_gumbel(self, use_gumbel, set_check):
self.use_gumbel = use_gumbel
for i, rnn in enumerate(self.rnns):
rnn.set_check(set_check)
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def init_weights(self):
self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)
def forward(self, input, hidden, return_h=False):
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
if self.arch_weights is None:
arch_probs = None
else:
if self.use_gumbel: arch_probs = F.gumbel_softmax(self.arch_weights, self.tau, False)
else : arch_probs = F.softmax(self.arch_weights, dim=-1)
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l], arch_probs)
new_hidden.append(new_h)
raw_outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
logit = self.decoder(output.view(-1, self.ninp))
log_prob = nn.functional.log_softmax(logit, dim=-1)
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.ntoken)
if return_h: return model_output, hidden, raw_outputs, outputs
else : return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).clone()
return [weight.new(1, bsz, self.nhid).zero_()]
| 5,547 | 29.483516 | 102 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/model_search.py
|
import copy, torch
import torch.nn as nn
import torch.nn.functional as F
from collections import namedtuple
from .genotypes import PRIMITIVES, STEPS, CONCAT, Genotype
from .basemodel import DARTSCell, RNNModel
class DARTSCellSearch(DARTSCell):
def __init__(self, ninp, nhid, dropouth, dropoutx):
super(DARTSCellSearch, self).__init__(ninp, nhid, dropouth, dropoutx, genotype=None)
self.bn = nn.BatchNorm1d(nhid, affine=False)
self.check_zero = False
def set_check(self, check_zero):
self.check_zero = check_zero
def cell(self, x, h_prev, x_mask, h_mask, arch_probs):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
s0 = self.bn(s0)
if self.check_zero:
arch_probs_cpu = arch_probs.cpu().tolist()
#arch_probs = F.softmax(self.weights, dim=-1)
offset = 0
states = s0.unsqueeze(0)
for i in range(STEPS):
if self.training:
masked_states = states * h_mask.unsqueeze(0)
else:
masked_states = states
ch = masked_states.view(-1, self.nhid).mm(self._Ws[i]).view(i+1, -1, 2*self.nhid)
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
s = torch.zeros_like(s0)
for k, name in enumerate(PRIMITIVES):
if name == 'none':
continue
fn = self._get_activation(name)
unweighted = states + c * (fn(h) - states)
if self.check_zero:
INDEX, INDDX = [], []
for jj in range(offset, offset+i+1):
if arch_probs_cpu[jj][k] > 0:
INDEX.append(jj)
INDDX.append(jj-offset)
if len(INDEX) == 0: continue
s += torch.sum(arch_probs[INDEX, k].unsqueeze(-1).unsqueeze(-1) * unweighted[INDDX, :, :], dim=0)
else:
s += torch.sum(arch_probs[offset:offset+i+1, k].unsqueeze(-1).unsqueeze(-1) * unweighted, dim=0)
s = self.bn(s)
states = torch.cat([states, s.unsqueeze(0)], 0)
offset += i+1
output = torch.mean(states[-CONCAT:], dim=0)
return output
class RNNModelSearch(RNNModel):
def __init__(self, *args):
super(RNNModelSearch, self).__init__(*args)
self._args = copy.deepcopy( args )
k = sum(i for i in range(1, STEPS+1))
self.arch_weights = nn.Parameter(torch.Tensor(k, len(PRIMITIVES)))
nn.init.normal_(self.arch_weights, 0, 0.001)
def base_parameters(self):
lists = list(self.lockdrop.parameters())
lists += list(self.encoder.parameters())
lists += list(self.rnns.parameters())
lists += list(self.decoder.parameters())
return lists
def arch_parameters(self):
return [self.arch_weights]
def genotype(self):
def _parse(probs):
gene = []
start = 0
for i in range(STEPS):
end = start + i + 1
W = probs[start:end].copy()
#j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[0]
j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) ))[0]
k_best = None
for k in range(len(W[j])):
#if k != PRIMITIVES.index('none'):
# if k_best is None or W[j][k] > W[j][k_best]:
# k_best = k
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
return gene
with torch.no_grad():
gene = _parse(F.softmax(self.arch_weights, dim=-1).cpu().numpy())
genotype = Genotype(recurrent=gene, concat=list(range(STEPS+1)[-CONCAT:]))
return genotype
| 3,544 | 32.761905 | 124 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/__init__.py
|
# utils
from .utils import batchify, get_batch, repackage_hidden
# models
from .model_search import RNNModelSearch
from .model_search import DARTSCellSearch
from .basemodel import DARTSCell, RNNModel
# architecture
from .genotypes import DARTS_V1, DARTS_V2
from .genotypes import GDAS
| 285 | 27.6 | 56 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/genotypes.py
|
from collections import namedtuple
Genotype = namedtuple('Genotype', 'recurrent concat')
PRIMITIVES = [
'none',
'tanh',
'relu',
'sigmoid',
'identity'
]
STEPS = 8
CONCAT = 8
ENAS = Genotype(
recurrent = [
('tanh', 0),
('tanh', 1),
('relu', 1),
('tanh', 3),
('tanh', 3),
('relu', 3),
('relu', 4),
('relu', 7),
('relu', 8),
('relu', 8),
('relu', 8),
],
concat = [2, 5, 6, 9, 10, 11]
)
DARTS_V1 = Genotype(
recurrent = [
('relu', 0),
('relu', 1),
('tanh', 2),
('relu', 3), ('relu', 4), ('identity', 1), ('relu', 5), ('relu', 1)
],
concat=range(1, 9)
)
DARTS_V2 = Genotype(
recurrent = [
('sigmoid', 0), ('relu', 1), ('relu', 1),
('identity', 1), ('tanh', 2), ('sigmoid', 5),
('tanh', 3), ('relu', 5)
],
concat=range(1, 9)
)
GDAS = Genotype(
recurrent=[('relu', 0), ('relu', 0), ('identity', 1), ('relu', 1), ('tanh', 0), ('relu', 2), ('identity', 4), ('identity', 2)],
concat=range(1, 9)
)
| 1,057 | 17.892857 | 129 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/save_meta.py
|
import torch
import os, sys
import os.path as osp
import numpy as np
def tensor2np(x):
if isinstance(x, np.ndarray): return x
if x.is_cuda: x = x.cpu()
return x.numpy()
class Save_Meta():
def __init__(self):
self.reset()
def __repr__(self):
return ('{name}'.format(name=self.__class__.__name__)+'(number of data = {})'.format(len(self)))
def reset(self):
self.predictions = []
self.groundtruth = []
def __len__(self):
return len(self.predictions)
def append(self, _pred, _ground):
_pred, _ground = tensor2np(_pred), tensor2np(_ground)
assert _ground.shape[0] == _pred.shape[0] and len(_pred.shape) == 2 and len(_ground.shape) == 1, 'The shapes are wrong : {} & {}'.format(_pred.shape, _ground.shape)
self.predictions.append(_pred)
self.groundtruth.append(_ground)
def save(self, save_dir, filename, test=True):
meta = {'predictions': self.predictions,
'groundtruth': self.groundtruth}
filename = osp.join(save_dir, filename)
torch.save(meta, filename)
if test:
predictions = np.concatenate(self.predictions)
groundtruth = np.concatenate(self.groundtruth)
predictions = np.argmax(predictions, axis=1)
accuracy = np.sum(groundtruth==predictions) * 100.0 / predictions.size
else:
accuracy = None
print ('save save_meta into {} with accuracy = {}'.format(filename, accuracy))
def load(self, filename):
assert os.path.isfile(filename), '{} is not a file'.format(filename)
checkpoint = torch.load(filename)
self.predictions = checkpoint['predictions']
self.groundtruth = checkpoint['groundtruth']
| 1,649 | 31.352941 | 168 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/model_utils.py
|
import torch
import torch.nn as nn
import numpy as np
def count_parameters_in_MB(model):
if isinstance(model, nn.Module):
return np.sum(np.prod(v.size()) for v in model.parameters())/1e6
else:
return np.sum(np.prod(v.size()) for v in model)/1e6
class Cutout(object):
def __init__(self, length):
self.length = length
def __repr__(self):
return ('{name}(length={length})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
| 923 | 24.666667 | 92 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/flop_benchmark.py
|
import copy, torch
def print_FLOPs(model, shape, logs):
print_log, log = logs
model = copy.deepcopy( model )
model = add_flops_counting_methods(model)
model = model.cuda()
model.eval()
cache_inputs = torch.zeros(*shape).cuda()
#print_log('In the calculating function : cache input size : {:}'.format(cache_inputs.size()), log)
_ = model(cache_inputs)
FLOPs = compute_average_flops_cost( model ) / 1e6
print_log('FLOPs : {:} MB'.format(FLOPs), log)
torch.cuda.empty_cache()
# ---- Public functions
def add_flops_counting_methods( model ):
model.__batch_counter__ = 0
add_batch_counter_hook_function( model )
model.apply( add_flops_counter_variable_or_reset )
model.apply( add_flops_counter_hook_function )
return model
def compute_average_flops_cost(model):
"""
A method that will be available after add_flops_counting_methods() is called on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = model.__batch_counter__
flops_sum = 0
for module in model.modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
flops_sum += module.__flops__
return flops_sum / batches_count
# ---- Internal functions
def pool_flops_counter_hook(pool_module, inputs, output):
batch_size = inputs[0].size(0)
kernel_size = pool_module.kernel_size
out_C, output_height, output_width = output.shape[1:]
assert out_C == inputs[0].size(1), '{:} vs. {:}'.format(out_C, inputs[0].size())
overall_flops = batch_size * out_C * output_height * output_width * kernel_size * kernel_size
pool_module.__flops__ += overall_flops
def fc_flops_counter_hook(fc_module, inputs, output):
batch_size = inputs[0].size(0)
xin, xout = fc_module.in_features, fc_module.out_features
assert xin == inputs[0].size(1) and xout == output.size(1), 'IO=({:}, {:})'.format(xin, xout)
overall_flops = batch_size * xin * xout
if fc_module.bias is not None:
overall_flops += batch_size * xout
fc_module.__flops__ += overall_flops
def conv_flops_counter_hook(conv_module, inputs, output):
batch_size = inputs[0].size(0)
output_height, output_width = output.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
conv_per_position_flops = kernel_height * kernel_width * in_channels * out_channels / groups
active_elements_count = batch_size * output_height * output_width
overall_flops = conv_per_position_flops * active_elements_count
if conv_module.bias is not None:
overall_flops += out_channels * active_elements_count
conv_module.__flops__ += overall_flops
def batch_counter_hook(module, inputs, output):
# Can have multiple inputs, getting the first one
inputs = inputs[0]
batch_size = inputs.shape[0]
module.__batch_counter__ += batch_size
def add_batch_counter_hook_function(module):
if not hasattr(module, '__batch_counter_handle__'):
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def add_flops_counter_variable_or_reset(module):
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear) \
or isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if isinstance(module, torch.nn.Conv2d):
if not hasattr(module, '__flops_handle__'):
handle = module.register_forward_hook(conv_flops_counter_hook)
module.__flops_handle__ = handle
elif isinstance(module, torch.nn.Linear):
if not hasattr(module, '__flops_handle__'):
handle = module.register_forward_hook(fc_flops_counter_hook)
module.__flops_handle__ = handle
elif isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d):
if not hasattr(module, '__flops_handle__'):
handle = module.register_forward_hook(pool_flops_counter_hook)
module.__flops_handle__ = handle
| 4,077 | 35.088496 | 103 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/draw_pts.py
|
import os, sys, time
import numpy as np
import matplotlib
import random
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def draw_points(points, labels, save_path):
title = 'the visualized features'
dpi = 100
width, height = 1000, 1000
legend_fontsize = 10
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
classes = np.unique(labels).tolist()
colors = cm.rainbow(np.linspace(0, 1, len(classes)))
legends = []
legendnames = []
for cls, c in zip(classes, colors):
indexes = labels == cls
ptss = points[indexes, :]
x = ptss[:,0]
y = ptss[:,1]
if cls % 2 == 0: marker = 'x'
else: marker = 'o'
legend = plt.scatter(x, y, color=c, s=1, marker=marker)
legendname = '{:02d}'.format(cls+1)
legends.append( legend )
legendnames.append( legendname )
plt.legend(legends, legendnames, scatterpoints=1, ncol=5, fontsize=8)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
print ('---- save figure {} into {}'.format(title, save_path))
plt.close(fig)
| 1,132 | 25.97619 | 71 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/utils.py
|
import os, sys, time
import numpy as np
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class RecorderMeter(object):
"""Computes and stores the minimum loss value and its epoch index"""
def __init__(self, total_epoch):
self.reset(total_epoch)
def reset(self, total_epoch):
assert total_epoch > 0
self.total_epoch = total_epoch
self.current_epoch = 0
self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_losses = self.epoch_losses - 1
self.epoch_accuracy= np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_accuracy= self.epoch_accuracy
def update(self, idx, train_loss, train_acc, val_loss, val_acc):
assert idx >= 0 and idx < self.total_epoch, 'total_epoch : {} , but update with the {} index'.format(self.total_epoch, idx)
self.epoch_losses [idx, 0] = train_loss
self.epoch_losses [idx, 1] = val_loss
self.epoch_accuracy[idx, 0] = train_acc
self.epoch_accuracy[idx, 1] = val_acc
self.current_epoch = idx + 1
return self.max_accuracy(False) == self.epoch_accuracy[idx, 1]
def max_accuracy(self, istrain):
if self.current_epoch <= 0: return 0
if istrain: return self.epoch_accuracy[:self.current_epoch, 0].max()
else: return self.epoch_accuracy[:self.current_epoch, 1].max()
def plot_curve(self, save_path):
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
title = 'the accuracy/loss curve of train/val'
dpi = 100
width, height = 1600, 1000
legend_fontsize = 10
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
x_axis = np.array([i for i in range(self.total_epoch)]) # epochs
y_axis = np.zeros(self.total_epoch)
plt.xlim(0, self.total_epoch)
plt.ylim(0, 100)
interval_y = 5
interval_x = 5
plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))
plt.yticks(np.arange(0, 100 + interval_y, interval_y))
plt.grid()
plt.title(title, fontsize=20)
plt.xlabel('the training epoch', fontsize=16)
plt.ylabel('accuracy', fontsize=16)
y_axis[:] = self.epoch_accuracy[:, 0]
plt.plot(x_axis, y_axis, color='g', linestyle='-', label='train-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_accuracy[:, 1]
plt.plot(x_axis, y_axis, color='y', linestyle='-', label='valid-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 0]
plt.plot(x_axis, y_axis*50, color='g', linestyle=':', label='train-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 1]
plt.plot(x_axis, y_axis*50, color='y', linestyle=':', label='valid-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
print ('---- save figure {} into {}'.format(title, save_path))
plt.close(fig)
def print_log(print_string, log):
print ("{:}".format(print_string))
if log is not None:
log.write('{}\n'.format(print_string))
log.flush()
def time_file_str():
ISOTIMEFORMAT='%Y-%m-%d'
string = '{}'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string + '-{}'.format(random.randint(1, 10000))
def time_string():
ISOTIMEFORMAT='%Y-%m-%d-%X'
string = '[{}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def convert_secs2time(epoch_time, return_str=False):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
if return_str == False:
return need_hour, need_mins, need_secs
else:
return '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
def test_imagenet_data(imagenet):
total_length = len(imagenet)
assert total_length == 1281166 or total_length == 50000, 'The length of ImageNet is wrong : {}'.format(total_length)
map_id = {}
for index in range(total_length):
path, target = imagenet.imgs[index]
folder, image_name = os.path.split(path)
_, folder = os.path.split(folder)
if folder not in map_id:
map_id[folder] = target
else:
assert map_id[folder] == target, 'Class : {} is not {}'.format(folder, target)
assert image_name.find(folder) == 0, '{} is wrong.'.format(path)
print ('Check ImageNet Dataset OK')
| 4,845 | 34.115942 | 127 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/evaluation_utils.py
|
import torch
def obtain_accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| 453 | 25.705882 | 65 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/gpu_manager.py
|
import os
class GPUManager():
queries = ('index', 'gpu_name', 'memory.free', 'memory.used', 'memory.total', 'power.draw', 'power.limit')
def __init__(self):
all_gpus = self.query_gpu(False)
def get_info(self, ctype):
cmd = 'nvidia-smi --query-gpu={} --format=csv,noheader'.format(ctype)
lines = os.popen(cmd).readlines()
lines = [line.strip('\n') for line in lines]
return lines
def query_gpu(self, show=True):
num_gpus = len( self.get_info('index') )
all_gpus = [ {} for i in range(num_gpus) ]
for query in self.queries:
infos = self.get_info(query)
for idx, info in enumerate(infos):
all_gpus[idx][query] = info
if 'CUDA_VISIBLE_DEVICES' in os.environ:
CUDA_VISIBLE_DEVICES = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
selected_gpus = []
for idx, CUDA_VISIBLE_DEVICE in enumerate(CUDA_VISIBLE_DEVICES):
find = False
for gpu in all_gpus:
if gpu['index'] == CUDA_VISIBLE_DEVICE:
assert find==False, 'Duplicate cuda device index : {}'.format(CUDA_VISIBLE_DEVICE)
find = True
selected_gpus.append( gpu.copy() )
selected_gpus[-1]['index'] = '{}'.format(idx)
assert find, 'Does not find the device : {}'.format(CUDA_VISIBLE_DEVICE)
all_gpus = selected_gpus
if show:
allstrings = ''
for gpu in all_gpus:
string = '| '
for query in self.queries:
if query.find('memory') == 0: xinfo = '{:>9}'.format(gpu[query])
else: xinfo = gpu[query]
string = string + query + ' : ' + xinfo + ' | '
allstrings = allstrings + string + '\n'
return allstrings
else:
return all_gpus
def select_by_memory(self, numbers=1):
all_gpus = self.query_gpu(False)
assert numbers <= len(all_gpus), 'Require {} gpus more than you have'.format(numbers)
alls = []
for idx, gpu in enumerate(all_gpus):
free_memory = gpu['memory.free']
free_memory = free_memory.split(' ')[0]
free_memory = int(free_memory)
index = gpu['index']
alls.append((free_memory, index))
alls.sort(reverse = True)
alls = [ int(alls[i][1]) for i in range(numbers) ]
return sorted(alls)
"""
if __name__ == '__main__':
manager = GPUManager()
manager.query_gpu(True)
indexes = manager.select_by_memory(3)
print (indexes)
"""
| 2,418 | 33.070423 | 108 |
py
|
linbp-attack
|
linbp-attack-master/attack/cifar10/models/gdas/lib/utils/__init__.py
|
from .utils import AverageMeter, RecorderMeter, convert_secs2time
from .utils import time_file_str, time_string
from .utils import test_imagenet_data
from .utils import print_log
from .evaluation_utils import obtain_accuracy
#from .draw_pts import draw_points
from .gpu_manager import GPUManager
from .save_meta import Save_Meta
from .model_utils import count_parameters_in_MB
from .model_utils import Cutout
from .flop_benchmark import print_FLOPs
| 452 | 29.2 | 65 |
py
|
s2anet
|
s2anet-master/setup.py
|
import os
import platform
import subprocess
import time
from setuptools import Extension, dist, find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
dist.Distribution().fetch_build_eggs(['Cython', 'numpy>=1.11.1'])
import numpy as np # noqa: E402, isort:skip
from Cython.Build import cythonize # noqa: E402, isort:skip
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
MAJOR = 1
MINOR = 0
PATCH = ''
SUFFIX = 'rc1'
if PATCH:
SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX)
else:
SHORT_VERSION = '{}.{}{}'.format(MAJOR, MINOR, SUFFIX)
version_file = 'mmdet/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
"""
sha = get_hash()
VERSION = SHORT_VERSION + '+' + sha
with open(version_file, 'w') as f:
f.write(content.format(time.asctime(), VERSION, SHORT_VERSION))
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources):
define_macros = []
define_macros += [("WITH_CUDA", None)]
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
def make_cython_ext(name, module, sources):
extra_compile_args = None
if platform.system() != 'Windows':
extra_compile_args = {
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
}
extension = Extension(
'{}.{}'.format(module, name),
[os.path.join(*module.split('.'), p) for p in sources],
include_dirs=[np.get_include()],
language='c++',
extra_compile_args=extra_compile_args)
extension, = cythonize(extension)
return extension
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='mmdet',
version=get_version(),
description='Open MMLab Detection Toolbox and Benchmark',
long_description=readme(),
author='OpenMMLab',
author_email='[email protected]',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'mmdet.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
license='Apache License 2.0',
setup_requires=['pytest-runner', 'cython', 'numpy'],
tests_require=['pytest', 'xdoctest'],
install_requires=get_requirements(),
ext_modules=[
make_cython_ext(
name='soft_nms_cpu',
module='mmdet.ops.nms',
sources=['src/soft_nms_cpu.pyx']),
make_cuda_ext(
name='nms_cpu',
module='mmdet.ops.nms',
sources=['src/nms_cpu.cpp']),
make_cuda_ext(
name='nms_cuda',
module='mmdet.ops.nms',
sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu']),
make_cuda_ext(
name='roi_align_cuda',
module='mmdet.ops.roi_align',
sources=['src/roi_align_cuda.cpp', 'src/roi_align_kernel.cu']),
make_cuda_ext(
name='roi_pool_cuda',
module='mmdet.ops.roi_pool',
sources=['src/roi_pool_cuda.cpp', 'src/roi_pool_kernel.cu']),
make_cuda_ext(
name='deform_conv_cuda',
module='mmdet.ops.dcn',
sources=[
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu'
]),
make_cuda_ext(
name='deform_pool_cuda',
module='mmdet.ops.dcn',
sources=[
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda_kernel.cu'
]),
make_cuda_ext(
name='sigmoid_focal_loss_cuda',
module='mmdet.ops.sigmoid_focal_loss',
sources=[
'src/sigmoid_focal_loss.cpp',
'src/sigmoid_focal_loss_cuda.cu'
]),
make_cuda_ext(
name='masked_conv2d_cuda',
module='mmdet.ops.masked_conv',
sources=[
'src/masked_conv2d_cuda.cpp', 'src/masked_conv2d_kernel.cu'
]),
make_cuda_ext(
name='box_iou_rotated_cuda',
module='mmdet.ops.box_iou_rotated',
sources=['src/box_iou_rotated_cpu.cpp', 'src/box_iou_rotated_cuda.cu']),
make_cuda_ext(
name='nms_rotated_cuda',
module='mmdet.ops.nms_rotated',
sources=['src/nms_rotated_cpu.cpp', 'src/nms_rotated_cuda.cu']),
make_cuda_ext(
name='ml_nms_rotated_cuda',
module='mmdet.ops.ml_nms_rotated',
sources=['src/nms_rotated_cpu.cpp', 'src/nms_rotated_cuda.cu']),
make_cuda_ext(
name='roi_align_rotated_cuda',
module='mmdet.ops.roi_align_rotated',
sources=['src/ROIAlignRotated_cpu.cpp', 'src/ROIAlignRotated_cuda.cu']),
make_cuda_ext(
name='orn_cuda',
module='mmdet.ops.orn',
sources=['src/vision.cpp',
'src/cpu/ActiveRotatingFilter_cpu.cpp', 'src/cpu/RotationInvariantEncoding_cpu.cpp',
'src/cuda/ActiveRotatingFilter_cuda.cu', 'src/cuda/RotationInvariantEncoding_cuda.cu',
]),
make_cuda_ext(
name='sort_vertices_cuda',
module='mmdet.ops.box_iou_rotated_diff',
sources=['src/sort_vert.cpp', 'src/sort_vert_kernel.cu',]),
],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 8,115 | 32.958159 | 111 |
py
|
s2anet
|
s2anet-master/tools/test.py
|
import argparse
import os
import os.path as osp
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint
from mmdet.apis import init_dist
from mmdet.core import coco_eval, results2json, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN,),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--json_out',
help='output result file name without extension',
type=str)
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
# add dataset type for more dataset eval other than coco
parser.add_argument(
'--data',
choices=['coco', 'dota', 'dota_large', 'dota_hbb', 'hrsc2016', 'voc'],
default='dota',
type=str,
help='eval dataset type')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.json_out, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show" or "--json_out"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
if args.json_out is not None and args.json_out.endswith('.json'):
args.json_out = args.json_out[:-5]
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# cfg.model.rpn_pretrained = None
# cfg.model.rcnn_pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
eval_types = args.eval
data_name = args.data
if data_name == 'coco':
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
coco_eval(result_file, eval_types, dataset.coco)
else:
if not isinstance(outputs[0], dict):
result_files = results2json(dataset, outputs, args.out)
coco_eval(result_files, eval_types, dataset.coco)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out + '.{}'.format(name)
result_files = results2json(dataset, outputs_,
result_file)
coco_eval(result_files, eval_types, dataset.coco)
elif data_name in ['dota', 'hrsc2016']:
eval_kwargs = cfg.get('evaluation', {}).copy()
work_dir = osp.dirname(args.out)
dataset.evaluate(outputs, work_dir, **eval_kwargs)
# Save predictions in the COCO json format
if args.json_out and rank == 0:
if not isinstance(outputs[0], dict):
results2json(dataset, outputs, args.json_out)
else:
for name in outputs[0]:
outputs_ = [out[name] for out in outputs]
result_file = args.json_out + '.{}'.format(name)
results2json(dataset, outputs_, result_file)
if __name__ == '__main__':
main()
| 8,637 | 35.447257 | 79 |
py
|
s2anet
|
s2anet-master/tools/voc_eval.py
|
from argparse import ArgumentParser
import mmcv
import numpy as np
from mmdet import datasets
from mmdet.core import eval_map
def voc_eval(result_file, dataset, iou_thr=0.5):
det_results = mmcv.load(result_file)
gt_bboxes = []
gt_labels = []
gt_ignore = []
for i in range(len(dataset)):
ann = dataset.get_ann_info(i)
bboxes = ann['bboxes']
labels = ann['labels']
if 'bboxes_ignore' in ann:
ignore = np.concatenate([
np.zeros(bboxes.shape[0], dtype=np.bool),
np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
])
gt_ignore.append(ignore)
bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
labels = np.concatenate([labels, ann['labels_ignore']])
gt_bboxes.append(bboxes)
gt_labels.append(labels)
if not gt_ignore:
gt_ignore = None
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
eval_map(
det_results,
gt_bboxes,
gt_labels,
gt_ignore=gt_ignore,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
print_summary=True)
def main():
parser = ArgumentParser(description='VOC Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for evaluation')
args = parser.parse_args()
cfg = mmcv.Config.fromfile(args.config)
test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
voc_eval(args.result, test_dataset, args.iou_thr)
if __name__ == '__main__':
main()
| 1,819 | 27.888889 | 69 |
py
|
s2anet
|
s2anet-master/tools/convert_model.py
|
import argparse
import subprocess
from collections import OrderedDict
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
for key, val in in_state_dict.items():
if 'rbox_head' in key:
key = key.replace('rbox_head','bbox_head')
out_state_dict[key] = val
checkpoint['state_dict'] = out_state_dict
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,357 | 29.863636 | 77 |
py
|
s2anet
|
s2anet-master/tools/get_flops.py
|
import argparse
from mmcv import Config
from mmdet.models import build_detector
from mmdet.utils import get_model_complexity_info
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
split_line, input_shape, flops, params))
if __name__ == '__main__':
main()
| 1,401 | 25.45283 | 73 |
py
|
s2anet
|
s2anet-master/tools/publish_model.py
|
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,028 | 27.583333 | 77 |
py
|
s2anet
|
s2anet-master/tools/analyze_logs.py
|
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i],
'-' * 5))
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print('slowest epoch {}, average time is {:.4f}'.format(
slowest_epoch + 1, epoch_ave_time[slowest_epoch]))
print('fastest epoch {}, average time is {:.4f}'.format(
fastest_epoch + 1, epoch_ave_time[fastest_epoch]))
print('time std over epochs is {:.4f}'.format(std_over_epoch))
print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
print()
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append('{}_{}'.format(json_log, metric))
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print('plot curve of {}, metric is {}'.format(
args.json_logs[i], metric))
if metric not in log_dict[epochs[0]]:
raise KeyError('{} does not contain metric {}'.format(
args.json_logs[i], metric))
if 'mAP' in metric:
xs = np.arange(1, max(epochs) + 1)
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
ax = plt.gca()
ax.set_xticks(xs)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if log_dict[epoch]['mode'][-1] == 'val':
iters = iters[:-1]
xs.append(
np.array(iters) + (epoch - 1) * num_iters_per_epoch)
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(
xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print('save curve to: {}'.format(args.out))
plt.savefig(args.out)
plt.cla()
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['bbox_mAP'],
help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for l in log_file:
log = json.loads(l.strip())
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
if __name__ == '__main__':
main()
| 6,272 | 34.044693 | 79 |
py
|
s2anet
|
s2anet-master/tools/upgrade_model_version.py
|
import argparse
import re
from collections import OrderedDict
import torch
def convert(in_file, out_file):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
for key, val in in_state_dict.items():
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
if m is not None:
param = m.groups()[1]
new_key = key.replace(param, 'conv.{}'.format(param))
out_state_dict[new_key] = val
continue
out_state_dict[key] = val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,322 | 29.767442 | 77 |
py
|
s2anet
|
s2anet-master/tools/test_robustness.py
|
import argparse
import copy
import os
import os.path as osp
import shutil
import tempfile
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from robustness_eval import get_results
from mmdet import datasets
from mmdet.apis import init_dist, set_random_seed
from mmdet.core import (eval_map, fast_eval_recall, results2json,
wrap_fp16_model)
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in [
'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
]
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if result_types == ['proposal_fast']:
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for i, num in enumerate(max_dets):
print('AR@{}\t= {:.4f}'.format(num, ar[i]))
return
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
print_summary=True,
only_ap=True):
det_results = mmcv.load(result_file)
gt_bboxes = []
gt_labels = []
gt_ignore = []
for i in range(len(dataset)):
ann = dataset.get_ann_info(i)
bboxes = ann['bboxes']
labels = ann['labels']
if 'bboxes_ignore' in ann:
ignore = np.concatenate([
np.zeros(bboxes.shape[0], dtype=np.bool),
np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
])
gt_ignore.append(ignore)
bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
labels = np.concatenate([labels, ann['labels_ignore']])
gt_bboxes.append(bboxes)
gt_labels.append(labels)
if not gt_ignore:
gt_ignore = gt_ignore
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
gt_bboxes,
gt_labels,
gt_ignore=gt_ignore,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
print_summary=print_summary)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print('\nTesting {} at severity {}'.format(corruption,
corruption_severity))
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info()
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, args.summaries)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print('Starting evaluate {}'.format(
' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = results2json(
dataset, outputs, args.out)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ '.{}'.format(name)
result_files = results2json(
dataset, outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
# print filan results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 17,478 | 35.953488 | 79 |
py
|
s2anet
|
s2anet-master/tools/coco_error_analysis.py
|
import copy
import os
from argparse import ArgumentParser
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def makeplot(rs, ps, outDir, class_name, iou_type):
cs = np.vstack([
np.ones((2, 3)),
np.array([.31, .51, .74]),
np.array([.75, .31, .30]),
np.array([.36, .90, .38]),
np.array([.50, .39, .64]),
np.array([1, .6, 0])
])
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
for i in range(len(areaNames)):
area_ps = ps[..., i, 0]
figure_tile = iou_type + '-' + class_name + '-' + areaNames[i]
aps = [ps_.mean() for ps_ in area_ps]
ps_curve = [
ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
]
ps_curve.insert(0, np.zeros(ps_curve[0].shape))
fig = plt.figure()
ax = plt.subplot(111)
for k in range(len(types)):
ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
ax.fill_between(
rs,
ps_curve[k],
ps_curve[k + 1],
color=cs[k],
label=str('[{:.3f}'.format(aps[k]) + ']' + types[k]))
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1.)
plt.ylim(0, 1.)
plt.title(figure_tile)
plt.legend()
# plt.show()
fig.savefig(outDir + '/{}.png'.format(figure_tile))
plt.close(fig)
def analyze_individual_category(k, cocoDt, cocoGt, catId, iou_type):
nm = cocoGt.loadCats(catId)[0]
print('--------------analyzing {}-{}---------------'.format(
k + 1, nm['name']))
ps_ = {}
dt = copy.deepcopy(cocoDt)
nm = cocoGt.loadCats(catId)[0]
imgIds = cocoGt.getImgIds()
dt_anns = dt.dataset['annotations']
select_dt_anns = []
for ann in dt_anns:
if ann['category_id'] == catId:
select_dt_anns.append(ann)
dt.dataset['annotations'] = select_dt_anns
dt.createIndex()
# compute precision but ignore superclass confusion
gt = copy.deepcopy(cocoGt)
child_catIds = gt.getCatIds(supNms=[nm['supercategory']])
for idx, ann in enumerate(gt.dataset['annotations']):
if (ann['category_id'] in child_catIds
and ann['category_id'] != catId):
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [.1]
cocoEval.params.useCats = 1
cocoEval.evaluate()
cocoEval.accumulate()
ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_supercategory'] = ps_supercategory
# compute precision but ignore any class confusion
gt = copy.deepcopy(cocoGt)
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [.1]
cocoEval.params.useCats = 1
cocoEval.evaluate()
cocoEval.accumulate()
ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_allcategory'] = ps_allcategory
return k, ps_
def analyze_results(res_file, ann_file, res_types, out_dir):
for res_type in res_types:
assert res_type in ['bbox', 'segm']
directory = os.path.dirname(out_dir + '/')
if not os.path.exists(directory):
print('-------------create {}-----------------'.format(out_dir))
os.makedirs(directory)
cocoGt = COCO(ann_file)
cocoDt = cocoGt.loadRes(res_file)
imgIds = cocoGt.getImgIds()
for res_type in res_types:
res_out_dir = out_dir + '/' + res_type + '/'
res_directory = os.path.dirname(res_out_dir)
if not os.path.exists(res_directory):
print(
'-------------create {}-----------------'.format(res_out_dir))
os.makedirs(res_directory)
iou_type = res_type
cocoEval = COCOeval(
copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.iouThrs = [.75, .5, .1]
cocoEval.params.maxDets = [100]
cocoEval.evaluate()
cocoEval.accumulate()
ps = cocoEval.eval['precision']
ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))])
catIds = cocoGt.getCatIds()
recThrs = cocoEval.params.recThrs
with Pool(processes=48) as pool:
args = [(k, cocoDt, cocoGt, catId, iou_type)
for k, catId in enumerate(catIds)]
analyze_results = pool.starmap(analyze_individual_category, args)
for k, catId in enumerate(catIds):
nm = cocoGt.loadCats(catId)[0]
print('--------------saving {}-{}---------------'.format(
k + 1, nm['name']))
analyze_result = analyze_results[k]
assert k == analyze_result[0]
ps_supercategory = analyze_result[1]['ps_supercategory']
ps_allcategory = analyze_result[1]['ps_allcategory']
# compute precision but ignore superclass confusion
ps[3, :, k, :, :] = ps_supercategory
# compute precision but ignore any class confusion
ps[4, :, k, :, :] = ps_allcategory
# fill in background and false negative errors and plot
ps[ps == -1] = 0
ps[5, :, k, :, :] = (ps[4, :, k, :, :] > 0)
ps[6, :, k, :, :] = 1.0
makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type)
makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
def main():
parser = ArgumentParser(description='COCO Error Analysis Tool')
parser.add_argument('result', help='result file (json format) path')
parser.add_argument('out_dir', help='dir to save analyze result images')
parser.add_argument(
'--ann',
default='data/coco/annotations/instances_val2017.json',
help='annotation file path')
parser.add_argument(
'--types', type=str, nargs='+', default=['bbox'], help='result types')
args = parser.parse_args()
analyze_results(args.result, args.ann, args.types, out_dir=args.out_dir)
if __name__ == '__main__':
main()
| 6,784 | 37.771429 | 78 |
py
|
s2anet
|
s2anet-master/tools/robustness_eval.py
|
import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
def print_coco_results(results):
def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
iStr = ' {:<18} {} @[ IoU={:<9} | \
area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(.5, .95) \
if iouThr is None else '{:0.2f}'.format(iouThr)
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, result))
stats = np.zeros((12, ))
stats[0] = _print(results[0], 1)
stats[1] = _print(results[1], 1, iouThr=.5)
stats[2] = _print(results[2], 1, iouThr=.75)
stats[3] = _print(results[3], 1, areaRng='small')
stats[4] = _print(results[4], 1, areaRng='medium')
stats[5] = _print(results[5], 1, areaRng='large')
stats[6] = _print(results[6], 0, maxDets=1)
stats[7] = _print(results[7], 0, maxDets=10)
stats[8] = _print(results[8], 0)
stats[9] = _print(results[9], 0, areaRng='small')
stats[10] = _print(results[10], 0, areaRng='medium')
stats[11] = _print(results[11], 0, areaRng='large')
def get_coco_style_results(filename,
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
if metric is None:
metrics = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
elif isinstance(metric, list):
metrics = metric
else:
metrics = [metric]
for metric_name in metrics:
assert metric_name in [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32')
for corr_i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
for metric_j, metric_name in enumerate(metrics):
mAP = eval_output[distortion][severity][task][metric_name]
results[corr_i, severity, metric_j] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print('\nmodel: {}'.format(osp.basename(filename)))
if metric is None:
if 'P' in prints:
print('Performance on Clean Data [P] ({})'.format(task))
print_coco_results(P)
if 'mPC' in prints:
print('Mean Performance under Corruption [mPC] ({})'.format(task))
print_coco_results(mPC)
if 'rPC' in prints:
print('Realtive Performance under Corruption [rPC] ({})'.format(
task))
print_coco_results(rPC)
else:
if 'P' in prints:
print('Performance on Clean Data [P] ({})'.format(task))
for metric_i, metric_name in enumerate(metrics):
print('{:5} = {:0.3f}'.format(metric_name, P[metric_i]))
if 'mPC' in prints:
print('Mean Performance under Corruption [mPC] ({})'.format(task))
for metric_i, metric_name in enumerate(metrics):
print('{:5} = {:0.3f}'.format(metric_name, mPC[metric_i]))
if 'rPC' in prints:
print('Relative Performance under Corruption [rPC] ({})'.format(
task))
for metric_i, metric_name in enumerate(metrics):
print('{:5} => {:0.1f} %'.format(metric_name,
rPC[metric_i] * 100))
return results
def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, 20), dtype='float32')
for i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
mAP = [
eval_output[distortion][severity][j]['ap']
for j in range(len(eval_output[distortion][severity]))
]
results[i, severity, :] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print('\nmodel: {}'.format(osp.basename(filename)))
if 'P' in prints:
print('{:48} = {:0.3f}'.format('Performance on Clean Data [P] in AP50',
np.mean(P)))
if 'mPC' in prints:
print('{:48} = {:0.3f}'.format(
'Mean Performance under Corruption [mPC] in AP50', np.mean(mPC)))
if 'rPC' in prints:
print('{:48} = {:0.1f}'.format(
'Realtive Performance under Corruption [rPC] in %',
np.mean(rPC) * 100))
return np.mean(results, axis=2, keepdims=True)
def get_results(filename,
dataset='coco',
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert dataset in ['coco', 'voc', 'cityscapes']
if dataset in ['coco', 'cityscapes']:
results = get_coco_style_results(
filename,
task=task,
metric=metric,
prints=prints,
aggregate=aggregate)
elif dataset == 'voc':
if task != 'bbox':
print('Only bbox analysis is supported for Pascal VOC')
print('Will report bbox results\n')
if metric not in [None, ['AP'], ['AP50']]:
print('Only the AP50 metric is supported for Pascal VOC')
print('Will report AP50 metric\n')
results = get_voc_style_results(
filename, prints=prints, aggregate=aggregate)
return results
def get_distortions_from_file(filename):
eval_output = mmcv.load(filename)
return get_distortions_from_results(eval_output)
def get_distortions_from_results(eval_output):
distortions = []
for i, distortion in enumerate(eval_output):
distortions.append(distortion.replace("_", " "))
return distortions
def main():
parser = ArgumentParser(description='Corruption Result Analysis')
parser.add_argument('filename', help='result file path')
parser.add_argument(
'--dataset',
type=str,
choices=['coco', 'voc', 'cityscapes'],
default='coco',
help='dataset type')
parser.add_argument(
'--task',
type=str,
nargs='+',
choices=['bbox', 'segm'],
default=['bbox'],
help='task to report')
parser.add_argument(
'--metric',
nargs='+',
choices=[
None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
],
default=None,
help='metric to report')
parser.add_argument(
'--prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print')
parser.add_argument(
'--aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those \
for benchmark corruptions')
args = parser.parse_args()
for task in args.task:
get_results(
args.filename,
dataset=args.dataset,
task=task,
metric=args.metric,
prints=args.prints,
aggregate=args.aggregate)
if __name__ == '__main__':
main()
| 8,374 | 31.587549 | 79 |
py
|
s2anet
|
s2anet-master/tools/train.py
|
from __future__ import division
import argparse
import os
import os.path as osp
import warnings
import torch
from mmcv import Config
from mmdet import __version__
from mmdet.apis import (get_root_logger, init_dist, set_random_seed,
train_detector)
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
warnings.filterwarnings("ignore")
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# init logger before other steps
logger = get_root_logger(cfg.log_level)
logger.info('Distributed training: {}'.format(distributed))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}'.format(args.seed))
set_random_seed(args.seed)
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
datasets.append(build_dataset(cfg.data.val))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text,
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
logger=logger)
if __name__ == '__main__':
main()
| 3,887 | 31.672269 | 83 |
py
|
s2anet
|
s2anet-master/tools/coco_eval.py
|
from argparse import ArgumentParser
from mmdet.core import coco_eval
def main():
parser = ArgumentParser(description='COCO Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('--ann', help='annotation file path')
parser.add_argument(
'--types',
type=str,
nargs='+',
choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
default=['bbox'],
help='result types')
parser.add_argument(
'--max-dets',
type=int,
nargs='+',
default=[100, 300, 1000],
help='proposal numbers, only used for recall evaluation')
parser.add_argument(
'--classwise', action='store_true', help='whether eval class wise ap')
args = parser.parse_args()
coco_eval(args.result, args.types, args.ann, args.max_dets, args.classwise)
if __name__ == '__main__':
main()
| 914 | 28.516129 | 79 |
py
|
s2anet
|
s2anet-master/tools/detectron2pytorch.py
|
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict,
'res{}_{}_branch1'.format(i + 1, j),
'layer{}.{}.downsample.0'.format(i, j),
converted_names)
convert_bn(blobs, state_dict,
'res{}_{}_branch1_bn'.format(i + 1, j),
'layer{}.{}.downsample.1'.format(i, j),
converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
'res{}_{}_branch2{}'.format(i + 1, j, letter),
'layer{}.{}.conv{}'.format(i, j, k + 1),
converted_names)
convert_bn(blobs, state_dict,
'res{}_{}_branch2{}_bn'.format(i + 1, j, letter),
'layer{}.{}.bn{}'.format(i, j,
k + 1), converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print('Not Convert: {}'.format(key))
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,830 | 42.044944 | 78 |
py
|
s2anet
|
s2anet-master/tools/convert_datasets/pascal_voc.py
|
import argparse
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from mmdet.core import voc_classes
label_ids = {name: i + 1 for i, name in enumerate(voc_classes())}
def parse_xml(args):
xml_path, img_path = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'ann': {
'bboxes': bboxes.astype(np.float32),
'labels': labels.astype(np.int64),
'bboxes_ignore': bboxes_ignore.astype(np.float32),
'labels_ignore': labels_ignore.astype(np.int64)
}
}
return annotation
def cvt_annotations(devkit_path, years, split, out_file):
if not isinstance(years, list):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path,
'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
if not osp.isfile(filelist):
print('filelist does not exist: {}, skip voc{} {}'.format(
filelist, year, split))
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [
osp.join(devkit_path,
'VOC{}/Annotations/{}.xml'.format(year, img_name))
for img_name in img_names
]
img_paths = [
'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)
for img_name in img_names
]
part_annotations = mmcv.track_progress(parse_xml,
list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
mmcv.dump(annotations, out_file)
return annotations
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = args.out_dir if args.out_dir else devkit_path
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if '2007' in years and '2012' in years:
years.append(['2007', '2012'])
if not years:
raise IOError('The devkit path {} contains neither "VOC2007" nor '
'"VOC2012" subfolder'.format(devkit_path))
for year in years:
if year == '2007':
prefix = 'voc07'
elif year == '2012':
prefix = 'voc12'
elif year == ['2007', '2012']:
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = prefix + '_' + split
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, split,
osp.join(out_dir, dataset_name + '.pkl'))
if not isinstance(year, list):
dataset_name = prefix + '_test'
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, 'test',
osp.join(out_dir, dataset_name + '.pkl'))
print('Done!')
if __name__ == '__main__':
main()
| 4,612 | 31.485915 | 79 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/dota_evaluation_task1.py
|
# --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
# import cPickle
import numpy as np
try:
from polyiou import polyiou
except:
from DOTA_devkit.polyiou import polyiou
def parse_gt(filename):
"""
:param filename: ground truth file to parse
:return: all instances in a picture
"""
objects = []
with open(filename, 'r') as f:
while True:
line = f.readline()
if line:
splitlines = line.strip().split(' ')
object_struct = {}
if (len(splitlines) < 9):
continue
object_struct['name'] = splitlines[8]
if (len(splitlines) == 9):
object_struct['difficult'] = 0
elif (len(splitlines) == 10):
object_struct['difficult'] = int(splitlines[9])
object_struct['bbox'] = [float(splitlines[0]),
float(splitlines[1]),
float(splitlines[2]),
float(splitlines[3]),
float(splitlines[4]),
float(splitlines[5]),
float(splitlines[6]),
float(splitlines[7])]
objects.append(object_struct)
else:
break
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
# if not os.path.isdir(cachedir):
# os.mkdir(cachedir)
# cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
recs = {}
for i, imagename in enumerate(imagenames):
# print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets from Task1* files
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
## note the usage only in numpy not for list
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
## compute det bb with each BBGT
if BBGT.size > 0:
# compute overlaps
# intersection
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
# pdb.set_trace()
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
bb_xmin = np.min(bb[0::2])
bb_ymin = np.min(bb[1::2])
bb_xmax = np.max(bb[0::2])
bb_ymax = np.max(bb[1::2])
ixmin = np.maximum(BBGT_xmin, bb_xmin)
iymin = np.maximum(BBGT_ymin, bb_ymin)
ixmax = np.minimum(BBGT_xmax, bb_xmax)
iymax = np.minimum(BBGT_ymax, bb_ymax)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
(BBGT_xmax - BBGT_xmin + 1.) *
(BBGT_ymax - BBGT_ymin + 1.) - inters)
overlaps = inters / uni
BBGT_keep_mask = overlaps > 0
BBGT_keep = BBGT[BBGT_keep_mask, :]
BBGT_keep_index = np.where(overlaps > 0)[0]
def calcoverlaps(BBGT_keep, bb):
overlaps = []
for index, GT in enumerate(BBGT_keep):
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
overlaps.append(overlap)
return overlaps
if len(BBGT_keep) > 0:
overlaps = calcoverlaps(BBGT_keep, bb)
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
# pdb.set_trace()
jmax = BBGT_keep_index[jmax]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def main():
detpath = r'/home/hjm/mmdetection/work_dirs/cascade_s2anet_r50_fpn_1x_dota/results_after_nms/{:s}.txt'
annopath = r'data/dota/test/labelTxt/{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
imagesetfile = r'data/dota/test/test.txt'
# For DOTA-v1.5
# classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
# 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter', 'container-crane']
# For DOTA-v1.0
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter']
classaps = []
map = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
# # umcomment to show p-r curve of each category
# plt.figure(figsize=(8,4))
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.xticks(fontsize=11)
# plt.yticks(fontsize=11)
# plt.xlim(0, 1)
# plt.ylim(0, 1)
# ax = plt.gca()
# ax.spines['top'].set_color('none')
# ax.spines['right'].set_color('none')
# plt.plot(rec, prec)
# # plt.show()
# plt.savefig('pr_curve/{}.png'.format(classname))
map = map / len(classnames)
print('map:', map)
classaps = 100 * np.array(classaps)
print('classaps: ', classaps)
if __name__ == '__main__':
main()
| 11,008 | 35.333333 | 149 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/SplitOnlyImage_multi_process.py
|
import copy
import os
from functools import partial
from multiprocessing import Pool
import cv2
import dota_utils as util
import numpy as np
def split_single_warp(name, split_base, rate, extent):
split_base.SplitSingle(name, rate, extent)
class splitbase():
def __init__(self,
srcpath,
dstpath,
gap=100,
subsize=1024,
ext='.png',
padding=True,
num_process=32):
self.srcpath = srcpath
self.outpath = dstpath
self.gap = gap
self.subsize = subsize
self.slide = self.subsize - self.gap
self.srcpath = srcpath
self.dstpath = dstpath
self.ext = ext
self.padding = padding
self.pool = Pool(num_process)
if not os.path.isdir(self.outpath):
os.mkdir(self.outpath)
def saveimagepatches(self, img, subimgname, left, up, ext='.png'):
subimg = copy.deepcopy(
img[up: (up + self.subsize), left: (left + self.subsize)])
outdir = os.path.join(self.dstpath, subimgname + ext)
h, w, c = np.shape(subimg)
if (self.padding):
outimg = np.zeros((self.subsize, self.subsize, 3))
outimg[0:h, 0:w, :] = subimg
cv2.imwrite(outdir, outimg)
else:
cv2.imwrite(outdir, subimg)
def SplitSingle(self, name, rate, extent):
img = cv2.imread(os.path.join(self.srcpath, name + extent))
assert np.shape(img) != ()
if (rate != 1):
resizeimg = cv2.resize(
img, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
else:
resizeimg = img
outbasename = name + '__' + str(rate) + '__'
weight = np.shape(resizeimg)[1]
height = np.shape(resizeimg)[0]
# if (max(weight, height) < self.subsize/2):
# return
left, up = 0, 0
while (left < weight):
if (left + self.subsize >= weight):
left = max(weight - self.subsize, 0)
up = 0
while (up < height):
if (up + self.subsize >= height):
up = max(height - self.subsize, 0)
subimgname = outbasename + str(left) + '___' + str(up)
self.saveimagepatches(resizeimg, subimgname, left, up)
if (up + self.subsize >= height):
break
else:
up = up + self.slide
if (left + self.subsize >= weight):
break
else:
left = left + self.slide
def splitdata(self, rate):
imagelist = util.GetFileFromThisRootDir(self.srcpath)
imagenames = [util.custombasename(x) for x in imagelist if (
util.custombasename(x) != 'Thumbs')]
# worker = partial(self.SplitSingle, rate=rate, extent=self.ext)
worker = partial(split_single_warp, split_base=self,
rate=rate, extent=self.ext)
self.pool.map(worker, imagenames)
#
# for name in imagenames:
# self.SplitSingle(name, rate, self.ext)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
if __name__ == '__main__':
split = splitbase(r'data/dota/test/images', r'/workfs/jmhan/dota_ms_1024/test_split/images',
gap=200, subsize=1024, num_process=32)
split.splitdata(1)
split.splitdata(0.5)
split.splitdata(1.5)
| 3,649 | 30.73913 | 96 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/dota-v1.5_evaluation_task2.py
|
# --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import numpy as np
def parse_gt(filename):
objects = []
with open(filename, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
for splitline in splitlines:
object_struct = {}
object_struct['name'] = splitline[8]
object_struct['difficult'] = 0
object_struct['bbox'] = [int(float(splitline[0])),
int(float(splitline[1])),
int(float(splitline[4])),
int(float(splitline[5]))]
w = int(float(splitline[4])) - int(float(splitline[0]))
h = int(float(splitline[5])) - int(float(splitline[1]))
object_struct['area'] = w * h
objects.append(object_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# print('imagenames: ', imagenames)
# if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
# print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
# if there exist 2
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def main():
detpath = r'PATH_TO_BE_CONFIGURED/Task2_{:s}.txt'
annopath = r'PATH_TO_BE_CONFIGURED/{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
imagesetfile = r'PATH_TO_BE_CONFIGURED/valset.txt'
# For DOTA v1.5
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter', 'container-crane']
# For DOTA v1.0
# classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
# 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter']
classaps = []
map = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
# uncomment to plot p-r curve for each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map = map / len(classnames)
print('map:', map)
classaps = 100 * np.array(classaps)
print('classaps: ', classaps)
if __name__ == '__main__':
main()
| 8,788 | 35.774059 | 144 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/dota-v1.5_evaluation_task1.py
|
# --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import numpy as np
try:
from polyiou import polyiou
except:
from DOTA_devkit.polyiou import polyiou
def parse_gt(filename):
"""
:param filename: ground truth file to parse
:return: all instances in a picture
"""
objects = []
with open(filename, 'r') as f:
while True:
line = f.readline()
if line:
splitlines = line.strip().split(' ')
object_struct = {}
if (len(splitlines) < 9):
continue
object_struct['name'] = splitlines[8]
# if (len(splitlines) == 9):
# object_struct['difficult'] = 0
# elif (len(splitlines) == 10):
# object_struct['difficult'] = int(splitlines[9])
object_struct['difficult'] = 0
object_struct['bbox'] = [float(splitlines[0]),
float(splitlines[1]),
float(splitlines[2]),
float(splitlines[3]),
float(splitlines[4]),
float(splitlines[5]),
float(splitlines[6]),
float(splitlines[7])]
objects.append(object_struct)
else:
break
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
# if not os.path.isdir(cachedir):
# os.mkdir(cachedir)
# cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# print('imagenames: ', imagenames)
# if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
# print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets from Task1* files
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
# print('check confidence: ', confidence)
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
# print('check sorted_scores: ', sorted_scores)
# print('check sorted_ind: ', sorted_ind)
## note the usage only in numpy not for list
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# print('check imge_ids: ', image_ids)
# print('imge_ids len:', len(image_ids))
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
## compute det bb with each BBGT
if BBGT.size > 0:
# compute overlaps
# intersection
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
# pdb.set_trace()
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
bb_xmin = np.min(bb[0::2])
bb_ymin = np.min(bb[1::2])
bb_xmax = np.max(bb[0::2])
bb_ymax = np.max(bb[1::2])
ixmin = np.maximum(BBGT_xmin, bb_xmin)
iymin = np.maximum(BBGT_ymin, bb_ymin)
ixmax = np.minimum(BBGT_xmax, bb_xmax)
iymax = np.minimum(BBGT_ymax, bb_ymax)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
(BBGT_xmax - BBGT_xmin + 1.) *
(BBGT_ymax - BBGT_ymin + 1.) - inters)
overlaps = inters / uni
BBGT_keep_mask = overlaps > 0
BBGT_keep = BBGT[BBGT_keep_mask, :]
BBGT_keep_index = np.where(overlaps > 0)[0]
# pdb.set_trace()
def calcoverlaps(BBGT_keep, bb):
overlaps = []
for index, GT in enumerate(BBGT_keep):
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
overlaps.append(overlap)
return overlaps
if len(BBGT_keep) > 0:
overlaps = calcoverlaps(BBGT_keep, bb)
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
# pdb.set_trace()
jmax = BBGT_keep_index[jmax]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def main():
detpath = r'work_dirs/temp/result_merge/Task1_{:s}.txt'
annopath = r'data/dota15/test/dota1_5_labelTxt/{:s}.txt'
imagesetfile = r'data/dota15/test/testset.txt'
# For DOTA-v1.5
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter', 'container-crane']
classaps = []
map = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
# umcomment to show p-r curve of each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map = map / len(classnames)
print('map:', map)
classaps = 100 * np.array(classaps)
print('classaps: ', classaps)
if __name__ == '__main__':
main()
| 10,688 | 34.277228 | 124 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/hrsc2016_evaluation.py
|
# --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import xml.etree.ElementTree as ET
import numpy as np
try:
from polyiou import polyiou
except:
from DOTA_devkit.polyiou import polyiou
from mmdet.core import rotated_box_to_poly_single
def parse_gt(filename):
objects = []
tree = ET.parse(filename)
root = tree.getroot()
for obj in root.findall('HRSC_Objects')[0].findall('HRSC_Object'):
object_struct = {}
object_struct['name'] = 'ship'
object_struct['difficult'] = int(obj.find('difficult').text)
bbox = []
for key in ['mbox_cx', 'mbox_cy', 'mbox_w', 'mbox_h', 'mbox_ang']:
bbox.append(obj.find(key).text)
# Coordinates may be float type
cx, cy, w, h, a = list(map(float, bbox))
bbox = [cx, cy, w, h, a]
poly = rotated_box_to_poly_single(bbox)
object_struct['bbox'] = poly.tolist()
objects.append(object_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
# if not os.path.isdir(cachedir):
# os.mkdir(cachedir)
# cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# print('imagenames: ', imagenames)
# if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
# print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
# if i % 100 == 0:
# print ('Reading annotation for {:d}/{:d}'.format(
# i + 1, len(imagenames)) )
# save
# print ('Saving cached annotations to {:s}'.format(cachefile))
# with open(cachefile, 'w') as f:
# cPickle.dump(recs, f)
# else:
# load
# with open(cachefile, 'r') as f:
# recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets from Task1* files
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
# print('check confidence: ', confidence)
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
# print('check sorted_scores: ', sorted_scores)
# print('check sorted_ind: ', sorted_ind)
## note the usage only in numpy not for list
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# print('check imge_ids: ', image_ids)
# print('imge_ids len:', len(image_ids))
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
## compute det bb with each BBGT
if BBGT.size > 0:
# compute overlaps
# intersection
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
# pdb.set_trace()
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
bb_xmin = np.min(bb[0::2])
bb_ymin = np.min(bb[1::2])
bb_xmax = np.max(bb[0::2])
bb_ymax = np.max(bb[1::2])
ixmin = np.maximum(BBGT_xmin, bb_xmin)
iymin = np.maximum(BBGT_ymin, bb_ymin)
ixmax = np.minimum(BBGT_xmax, bb_xmax)
iymax = np.minimum(BBGT_ymax, bb_ymax)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
(BBGT_xmax - BBGT_xmin + 1.) *
(BBGT_ymax - BBGT_ymin + 1.) - inters)
overlaps = inters / uni
BBGT_keep_mask = overlaps > 0
BBGT_keep = BBGT[BBGT_keep_mask, :]
BBGT_keep_index = np.where(overlaps > 0)[0]
# pdb.set_trace()
def calcoverlaps(BBGT_keep, bb):
overlaps = []
for index, GT in enumerate(BBGT_keep):
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
overlaps.append(overlap)
return overlaps
if len(BBGT_keep) > 0:
overlaps = calcoverlaps(BBGT_keep, bb)
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
# pdb.set_trace()
jmax = BBGT_keep_index[jmax]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def main():
detpath = r'work_dirs/s2anet_r50_fpn_3x_hrsc2016/result_raw/Task1_{:s}.txt'
annopath = r'data/HRSC2016/Test/labelTxt/{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
imagesetfile = r'data/HRSC2016/Test/test.txt'
# For HRSC2016
classnames = ['ship']
classaps = []
map = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
# umcomment to show p-r curve of each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map = map / len(classnames)
print('map:', map)
classaps = 100 * np.array(classaps)
print('classaps: ', classaps)
if __name__ == '__main__':
main()
| 10,349 | 33.5 | 150 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/dota_evaluation_task2.py
|
# --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import numpy as np
def parse_gt(filename):
objects = []
with open(filename, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
for splitline in splitlines:
object_struct = {}
object_struct['name'] = splitline[8]
if (len(splitline) == 9):
object_struct['difficult'] = 0
elif (len(splitline) == 10):
object_struct['difficult'] = int(splitline[9])
# object_struct['difficult'] = 0
object_struct['bbox'] = [int(float(splitline[0])),
int(float(splitline[1])),
int(float(splitline[4])),
int(float(splitline[5]))]
w = int(float(splitline[4])) - int(float(splitline[0]))
h = int(float(splitline[5])) - int(float(splitline[1]))
object_struct['area'] = w * h
objects.append(object_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_gt(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
## if there exist 2
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
print('check fp:', fp)
print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def main():
# detpath = r'E:\documentation\OneDrive\documentation\DotaEvaluation\evluation_task2\evluation_task2\faster-rcnn-nms_0.3_task2\nms_0.3_task\Task2_{:s}.txt'
# annopath = r'I:\dota\testset\ReclabelTxt-utf-8\{:s}.txt'
# imagesetfile = r'I:\dota\testset\va.txt'
detpath = r'work_dirs/temp/result_merge/Task2_{:s}.txt'
annopath = r'data/dota/test/hbb_label_txt/{:s}.txt' # change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
imagesetfile = r'data/dota/test/testset.txt'
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter']
classaps = []
map = 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=True)
map = map + ap
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: ', ap)
classaps.append(ap)
## uncomment to plot p-r curve for each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map = map / len(classnames)
print('map:', map)
classaps = 100 * np.array(classaps)
print('classaps: ', classaps)
if __name__ == '__main__':
main()
| 8,788 | 35.020492 | 159 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/ResultMerge_multi_process.py
|
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import os
import re
import sys
import numpy as np
sys.path.insert(0, '..')
import DOTA_devkit.dota_utils as util
import DOTA_devkit.polyiou.polyiou as polyiou
import pdb
import math
from multiprocessing import Pool
from functools import partial
## the thresh for nms when merge image
nms_thresh = 0.1
def py_cpu_nms_poly(dets, thresh):
scores = dets[:, 8]
polys = []
areas = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
for j in range(order.size - 1):
iou = polyiou.iou_poly(polys[i], polys[order[j + 1]])
ovr.append(iou)
ovr = np.array(ovr)
# print('ovr: ', ovr)
# print('thresh: ', thresh)
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(ovr <= thresh)[0]
# print('inds: ', inds)
order = order[inds + 1]
return keep
def py_cpu_nms_poly_fast(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
# if order.size == 0:
# break
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# w = np.maximum(0.0, xx2 - xx1 + 1)
# h = np.maximum(0.0, yy2 - yy1 + 1)
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
# h_keep_inds = np.where(hbb_ovr == 0)[0]
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
# ovr.append(iou)
# ovr_index.append(tmp_order[j])
# ovr = np.array(ovr)
# ovr_index = np.array(ovr_index)
# print('ovr: ', ovr)
# print('thresh: ', thresh)
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
# order_obb = ovr_index[inds]
# print('inds: ', inds)
# order_hbb = order[h_keep_inds + 1]
order = order[inds + 1]
# pdb.set_trace()
# order = np.concatenate((order_obb, order_hbb), axis=0).astype(np.int)
return keep
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
# print('dets:', dets)
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
## index for dets
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def nmsbynamedict(nameboxdict, nms, thresh):
nameboxnmsdict = {x: [] for x in nameboxdict}
for imgname in nameboxdict:
# print('imgname:', imgname)
# keep = py_cpu_nms(np.array(nameboxdict[imgname]), thresh)
# print('type nameboxdict:', type(nameboxnmsdict))
# print('type imgname:', type(imgname))
# print('type nms:', type(nms))
keep = nms(np.array(nameboxdict[imgname]), thresh)
# print('keep:', keep)
outdets = []
# print('nameboxdict[imgname]: ', nameboxnmsdict[imgname])
for index in keep:
# print('index:', index)
outdets.append(nameboxdict[imgname][index])
nameboxnmsdict[imgname] = outdets
return nameboxnmsdict
def poly2origpoly(poly, x, y, rate):
origpoly = []
for i in range(int(len(poly) / 2)):
tmp_x = float(poly[i * 2] + x) / float(rate)
tmp_y = float(poly[i * 2 + 1] + y) / float(rate)
origpoly.append(tmp_x)
origpoly.append(tmp_y)
return origpoly
def mergesingle(dstpath, nms, fullname):
name = util.custombasename(fullname)
# print('name:', name)
dstname = os.path.join(dstpath, name + '.txt')
print(dstname)
with open(fullname, 'r') as f_in:
nameboxdict = {}
lines = f_in.readlines()
splitlines = [x.strip().split(' ') for x in lines]
for splitline in splitlines:
subname = splitline[0]
splitname = subname.split('__')
oriname = splitname[0]
pattern1 = re.compile(r'__\d+___\d+')
# print('subname:', subname)
x_y = re.findall(pattern1, subname)
x_y_2 = re.findall(r'\d+', x_y[0])
x, y = int(x_y_2[0]), int(x_y_2[1])
pattern2 = re.compile(r'__([\d+\.]+)__\d+___')
rate = re.findall(pattern2, subname)[0]
confidence = splitline[1]
poly = list(map(float, splitline[2:]))
origpoly = poly2origpoly(poly, x, y, rate)
det = origpoly
det.append(confidence)
det = list(map(float, det))
if (oriname not in nameboxdict):
nameboxdict[oriname] = []
nameboxdict[oriname].append(det)
nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
with open(dstname, 'w') as f_out:
for imgname in nameboxnmsdict:
for det in nameboxnmsdict[imgname]:
# print('det:', det)
confidence = det[-1]
bbox = det[0:-1]
outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(map(str, bbox))
# print('outline:', outline)
f_out.write(outline + '\n')
def mergebase_parallel(srcpath, dstpath, nms):
pool = Pool(16)
filelist = util.GetFileFromThisRootDir(srcpath)
mergesingle_fn = partial(mergesingle, dstpath, nms)
# pdb.set_trace()
pool.map(mergesingle_fn, filelist)
def mergebase(srcpath, dstpath, nms):
filelist = util.GetFileFromThisRootDir(srcpath)
for filename in filelist:
mergesingle(dstpath, nms, filename)
def mergebyrec(srcpath, dstpath):
"""
srcpath: result files before merge and nms
dstpath: result files after merge and nms
"""
# srcpath = r'E:\bod-dataset\results\bod-v3_rfcn_2000000'
# dstpath = r'E:\bod-dataset\results\bod-v3_rfcn_2000000_nms'
mergebase(srcpath,
dstpath,
py_cpu_nms)
def mergebypoly(srcpath, dstpath):
"""
srcpath: result files before merge and nms
dstpath: result files after merge and nms
"""
# srcpath = r'/home/dingjian/evaluation_task1/result/faster-rcnn-59/comp4_test_results'
# dstpath = r'/home/dingjian/evaluation_task1/result/faster-rcnn-59/testtime'
# mergebase(srcpath,
# dstpath,
# py_cpu_nms_poly)
mergebase_parallel(srcpath,
dstpath,
py_cpu_nms_poly_fast)
if __name__ == '__main__':
mergebyrec(r'work_dirs/temp/result_raw', r'work_dirs/temp/result_task2')
# mergebyrec()
| 8,833 | 30.663082 | 124 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/convert_dota_to_mmdet.py
|
import os
import os.path as osp
import mmcv
import numpy as np
from PIL import Image
from mmdet.core import poly_to_rotated_box_single
wordname_15 = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter']
label_ids = {name: i + 1 for i, name in enumerate(wordname_15)}
def parse_ann_info(label_base_path, img_name):
lab_path = osp.join(label_base_path, img_name + '.txt')
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
with open(lab_path, 'r') as f:
for ann_line in f.readlines():
ann_line = ann_line.strip().split(' ')
bbox = [float(ann_line[i]) for i in range(8)]
# 8 point to 5 point xywha
bbox = tuple(poly_to_rotated_box_single(bbox).tolist())
class_name = ann_line[8]
difficult = int(ann_line[9])
# ignore difficult =2
if difficult == 0:
bboxes.append(bbox)
labels.append(label_ids[class_name])
elif difficult == 1:
bboxes_ignore.append(bbox)
labels_ignore.append(label_ids[class_name])
return bboxes, labels, bboxes_ignore, labels_ignore
def convert_dota_to_mmdet(src_path, out_path, trainval=True, filter_empty_gt=True, ext='.png'):
"""Generate .pkl format annotation that is consistent with mmdet.
Args:
src_path: dataset path containing images and labelTxt folders.
out_path: output pkl file path
trainval: trainval or test
"""
img_path = os.path.join(src_path, 'images')
label_path = os.path.join(src_path, 'labelTxt')
img_lists = os.listdir(img_path)
data_dict = []
for id, img in enumerate(img_lists):
img_info = {}
img_name = osp.splitext(img)[0]
label = os.path.join(label_path, img_name + '.txt')
img = Image.open(osp.join(img_path, img))
img_info['filename'] = img_name + ext
img_info['height'] = img.height
img_info['width'] = img.width
if trainval:
if not os.path.exists(label):
print('Label:' + img_name + '.txt' + ' Not Exist')
continue
# filter images without gt to speed up training
if filter_empty_gt & (osp.getsize(label) == 0):
continue
bboxes, labels, bboxes_ignore, labels_ignore = parse_ann_info(label_path, img_name)
ann = {}
ann['bboxes'] = np.array(bboxes, dtype=np.float32)
ann['labels'] = np.array(labels, dtype=np.int64)
ann['bboxes_ignore'] = np.array(bboxes_ignore, dtype=np.float32)
ann['labels_ignore'] = np.array(labels_ignore, dtype=np.int64)
img_info['ann'] = ann
data_dict.append(img_info)
mmcv.dump(data_dict, out_path)
if __name__ == '__main__':
convert_dota_to_mmdet('data/dota_1024/trainval_split',
'data/dota_1024/trainval_split/trainval_s2anet.pkl')
convert_dota_to_mmdet('data/dota_1024/test_split',
'data/dota_1024/test_split/test_s2anet.pkl', trainval=False)
print('done!')
| 3,332 | 38.678571 | 95 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/dota_utils.py
|
import codecs
import math
import os
import re
import sys
import numpy as np
import shapely.geometry as shgeo
"""
some basic functions which are useful for process DOTA data
"""
wordname_15 = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter']
CLASSNAMES = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool',
'helicopter', 'container-crane']
def custombasename(fullname):
return os.path.basename(os.path.splitext(fullname)[0])
def GetFileFromThisRootDir(dir, ext=None):
allfiles = []
needExtFilter = (ext != None)
for root, dirs, files in os.walk(dir):
for filespath in files:
filepath = os.path.join(root, filespath)
extension = os.path.splitext(filepath)[1][1:]
if needExtFilter and extension in ext:
allfiles.append(filepath)
elif not needExtFilter:
allfiles.append(filepath)
return allfiles
def TuplePoly2Poly(poly):
outpoly = [poly[0][0], poly[0][1],
poly[1][0], poly[1][1],
poly[2][0], poly[2][1],
poly[3][0], poly[3][1]
]
return outpoly
def parse_dota_poly(filename):
"""
parse the dota ground truth in the format:
[(x1, y1), (x2, y2), (x3, y3), (x4, y4)]
"""
objects = []
# print('filename:', filename)
f = []
if (sys.version_info >= (3, 5)):
fd = open(filename, 'r')
f = fd
elif (sys.version_info >= 2.7):
fd = codecs.open(filename, 'r')
f = fd
# count = 0
while True:
line = f.readline()
# count = count + 1
# if count < 2:
# continue
if line:
splitlines = line.strip().split(' ')
object_struct = {}
# clear the wrong name after check all the data
# if (len(splitlines) >= 9) and (splitlines[8] in classname):
if (len(splitlines) < 9):
continue
if (len(splitlines) >= 9):
object_struct['name'] = splitlines[8]
if (len(splitlines) == 9):
object_struct['difficult'] = '0'
elif (len(splitlines) >= 10):
# if splitlines[9] == '1':
# if (splitlines[9] == 'tr'):
# object_struct['difficult'] = '1'
# else:
object_struct['difficult'] = splitlines[9]
# else:
# object_struct['difficult'] = 0
object_struct['poly'] = [(float(splitlines[0]), float(splitlines[1])),
(float(splitlines[2]),
float(splitlines[3])),
(float(splitlines[4]),
float(splitlines[5])),
(float(splitlines[6]),
float(splitlines[7]))
]
gtpoly = shgeo.Polygon(object_struct['poly'])
object_struct['area'] = gtpoly.area
objects.append(object_struct)
else:
break
return objects
def parse_dota_poly2(filename):
"""
parse the dota ground truth in the format:
[x1, y1, x2, y2, x3, y3, x4, y4]
"""
objects = parse_dota_poly(filename)
for obj in objects:
obj['poly'] = TuplePoly2Poly(obj['poly'])
obj['poly'] = list(map(int, obj['poly']))
return objects
def parse_dota_rec(filename):
"""
parse the dota ground truth in the bounding box format:
"xmin, ymin, xmax, ymax"
"""
objects = parse_dota_poly(filename)
for obj in objects:
poly = obj['poly']
bbox = dots4ToRec4(poly)
obj['bndbox'] = bbox
return objects
# bounding box transfer for varies format
def dots4ToRec4(poly):
xmin, xmax, ymin, ymax = min(poly[0][0], min(poly[1][0], min(poly[2][0], poly[3][0]))), \
max(poly[0][0], max(poly[1][0], max(poly[2][0], poly[3][0]))), \
min(poly[0][1], min(poly[1][1], min(poly[2][1], poly[3][1]))), \
max(poly[0][1], max(poly[1][1], max(poly[2][1], poly[3][1])))
return xmin, ymin, xmax, ymax
def dots4ToRec8(poly):
xmin, ymin, xmax, ymax = dots4ToRec4(poly)
return xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax
def dots2ToRec8(rec):
xmin, ymin, xmax, ymax = rec[0], rec[1], rec[2], rec[3]
return xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax
def groundtruth2Task1(srcpath, dstpath):
filelist = GetFileFromThisRootDir(srcpath)
filedict = {}
for cls in wordname_15:
fd = open(os.path.join(dstpath, 'Task1_') + cls + r'.txt', 'w')
filedict[cls] = fd
for filepath in filelist:
objects = parse_dota_poly2(filepath)
subname = custombasename(filepath)
pattern2 = re.compile(r'__([\d+\.]+)__\d+___')
rate = re.findall(pattern2, subname)[0]
for obj in objects:
category = obj['name']
difficult = obj['difficult']
poly = obj['poly']
if difficult == '2':
continue
if rate == '0.5':
outline = custombasename(
filepath) + ' ' + '1' + ' ' + ' '.join(map(str, poly))
elif rate == '1':
outline = custombasename(
filepath) + ' ' + '0.8' + ' ' + ' '.join(map(str, poly))
elif rate == '2':
outline = custombasename(
filepath) + ' ' + '0.6' + ' ' + ' '.join(map(str, poly))
filedict[category].write(outline + '\n')
def Task2groundtruth_poly(srcpath, dstpath):
thresh = 0.1
filedict = {}
Tasklist = GetFileFromThisRootDir(srcpath, '.txt')
for Taskfile in Tasklist:
idname = custombasename(Taskfile).split('_')[-1]
# idname = datamap_inverse[idname]
f = open(Taskfile, 'r')
lines = f.readlines()
for line in lines:
if len(line) == 0:
continue
# print('line:', line)
splitline = line.strip().split(' ')
filename = splitline[0]
confidence = splitline[1]
bbox = splitline[2:]
if float(confidence) > thresh:
if filename not in filedict:
# filedict[filename] = codecs.open(os.path.join(dstpath, filename + '.txt'), 'w', 'utf_16')
filedict[filename] = codecs.open(
os.path.join(dstpath, filename + '.txt'), 'w')
# poly = util.dots2ToRec8(bbox)
poly = bbox
filedict[filename].write(' '.join(poly) + ' ' + idname + '\n')
def polygonToRotRectangle(bbox):
"""
:param bbox: The polygon stored in format [x1, y1, x2, y2, x3, y3, x4, y4]
:return: Rotated Rectangle in format [cx, cy, w, h, theta]
"""
bbox = np.array(bbox, dtype=np.float32)
bbox = np.reshape(bbox, newshape=(2, 4), order='F')
angle = math.atan2(-(bbox[0, 1] - bbox[0, 0]), bbox[1, 1] - bbox[1, 0])
center = [[0], [0]]
for i in range(4):
center[0] += bbox[0, i]
center[1] += bbox[1, i]
center = np.array(center, dtype=np.float32) / 4.0
R = np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]], dtype=np.float32)
normalized = np.matmul(R.transpose(), bbox - center)
xmin = np.min(normalized[0, :])
xmax = np.max(normalized[0, :])
ymin = np.min(normalized[1, :])
ymax = np.max(normalized[1, :])
w = xmax - xmin + 1
h = ymax - ymin + 1
return [float(center[0]), float(center[1]), w, h, angle]
def cal_line_length(point1, point2):
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def get_best_begin_point(coordinate):
x1 = coordinate[0][0]
y1 = coordinate[0][1]
x2 = coordinate[1][0]
y2 = coordinate[1][1]
x3 = coordinate[2][0]
y3 = coordinate[2][1]
x4 = coordinate[3][0]
y4 = coordinate[3][1]
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
xmax = max(x1, x2, x3, x4)
ymax = max(y1, y2, y3, y4)
combinate = [[[x1, y1], [x2, y2], [x3, y3], [x4, y4]], [[x2, y2], [x3, y3], [x4, y4], [x1, y1]],
[[x3, y3], [x4, y4], [x1, y1], [x2, y2]], [[x4, y4], [x1, y1], [x2, y2], [x3, y3]]]
dst_coordinate = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
force = 100000000.0
force_flag = 0
for i in range(4):
temp_force = cal_line_length(combinate[i][0], dst_coordinate[0]) + cal_line_length(combinate[i][1],
dst_coordinate[
1]) + cal_line_length(
combinate[i][2], dst_coordinate[2]) + cal_line_length(combinate[i][3], dst_coordinate[3])
if temp_force < force:
force = temp_force
force_flag = i
if force_flag != 0:
print("choose one direction!")
return combinate[force_flag]
| 9,669 | 33.412811 | 117 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/__init__.py
| 0 | 0 | 0 |
py
|
|
s2anet
|
s2anet-master/DOTA_devkit/prepare_dota1_ms.py
|
import os
import os.path as osp
from DOTA_devkit.ImgSplit_multi_process import splitbase as splitbase_trainval
from DOTA_devkit.SplitOnlyImage_multi_process import splitbase as splitbase_test
from DOTA_devkit.convert_dota_to_mmdet import convert_dota_to_mmdet
def mkdir_if_not_exists(path):
if not osp.exists(path):
os.mkdir(path)
def prepare_multi_scale_data(src_path, dst_path, gap=200, subsize=1024, scales=[0.5, 1.0, 1.5], num_process=32):
dst_trainval_path = osp.join(dst_path, 'trainval_split')
dst_test_base_path = osp.join(dst_path, 'test_split')
dst_test_path = osp.join(dst_path, 'test_split/images')
# make dst path if not exist
mkdir_if_not_exists(dst_path)
mkdir_if_not_exists(dst_trainval_path)
mkdir_if_not_exists(dst_test_base_path)
mkdir_if_not_exists(dst_test_path)
# split train data
print('split train data')
split_train = splitbase_trainval(osp.join(src_path, 'train'), dst_trainval_path,
gap=gap, subsize=subsize, num_process=num_process)
for scale in scales:
split_train.splitdata(scale)
print('split val data')
# split val data
split_val = splitbase_trainval(osp.join(src_path, 'val'), dst_trainval_path,
gap=gap, subsize=subsize, num_process=num_process)
for scale in scales:
split_val.splitdata(scale)
# split test data
print('split test data')
split_test = splitbase_test(osp.join(src_path, 'test/images'), dst_test_path,
gap=gap, subsize=subsize, num_process=num_process)
for scale in scales:
split_test.splitdata(scale)
convert_dota_to_mmdet(dst_trainval_path,
osp.join(dst_trainval_path, 'trainval1024.pkl'))
convert_dota_to_mmdet(dst_test_base_path,
osp.join(dst_test_base_path, 'test1024.pkl'), trainval=False)
print('done!')
if __name__ == '__main__':
prepare_multi_scale_data('/data/hjm/dota', '/data/hjm/dota_1024', gap=200, subsize=1024, scales=[1.0],
num_process=32)
| 2,130 | 39.207547 | 112 |
py
|
s2anet
|
s2anet-master/DOTA_devkit/ImgSplit_multi_process.py
|
"""
-------------
This is the multi-process version
"""
import codecs
import copy
import math
import os
from functools import partial
from multiprocessing import Pool
import cv2
import dota_utils as util
import numpy as np
import shapely.geometry as shgeo
from dota_utils import GetFileFromThisRootDir
def choose_best_pointorder_fit_another(poly1, poly2):
"""
To make the two polygons best fit with each point
"""
x1 = poly1[0]
y1 = poly1[1]
x2 = poly1[2]
y2 = poly1[3]
x3 = poly1[4]
y3 = poly1[5]
x4 = poly1[6]
y4 = poly1[7]
combinate = [np.array([x1, y1, x2, y2, x3, y3, x4, y4]), np.array([x2, y2, x3, y3, x4, y4, x1, y1]),
np.array([x3, y3, x4, y4, x1, y1, x2, y2]), np.array([x4, y4, x1, y1, x2, y2, x3, y3])]
dst_coordinate = np.array(poly2)
distances = np.array([np.sum((coord - dst_coordinate) ** 2)
for coord in combinate])
sorted = distances.argsort()
return combinate[sorted[0]]
def cal_line_length(point1, point2):
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def split_single_warp(name, split_base, rate, extent):
split_base.SplitSingle(name, rate, extent)
class splitbase():
def __init__(self,
basepath,
outpath,
code='utf-8',
gap=512,
subsize=1024,
thresh=0.7,
choosebestpoint=True,
ext='.png',
padding=True,
num_process=8
):
"""
:param basepath: base path for dota data
:param outpath: output base path for dota data,
the basepath and outputpath have the similar subdirectory, 'images' and 'labelTxt'
:param code: encodeing format of txt file
:param gap: overlap between two patches
:param subsize: subsize of patch
:param thresh: the thresh determine whether to keep the instance if the instance is cut down in the process of split
:param choosebestpoint: used to choose the first point for the
:param ext: ext for the image format
:param padding: if to padding the images so that all the images have the same size
"""
self.basepath = basepath
self.outpath = outpath
self.code = code
self.gap = gap
self.subsize = subsize
self.slide = self.subsize - self.gap
self.thresh = thresh
self.imagepath = os.path.join(self.basepath, 'images')
self.labelpath = os.path.join(self.basepath, 'labelTxt')
self.outimagepath = os.path.join(self.outpath, 'images')
self.outlabelpath = os.path.join(self.outpath, 'labelTxt')
self.choosebestpoint = choosebestpoint
self.ext = ext
self.padding = padding
self.num_process = num_process
self.pool = Pool(num_process)
print('padding:', padding)
# pdb.set_trace()
if not os.path.isdir(self.outpath):
os.mkdir(self.outpath)
if not os.path.isdir(self.outimagepath):
# pdb.set_trace()
os.mkdir(self.outimagepath)
if not os.path.isdir(self.outlabelpath):
os.mkdir(self.outlabelpath)
# pdb.set_trace()
# point: (x, y), rec: (xmin, ymin, xmax, ymax)
# def __del__(self):
# self.f_sub.close()
# grid --> (x, y) position of grids
def polyorig2sub(self, left, up, poly):
polyInsub = np.zeros(len(poly))
for i in range(int(len(poly) / 2)):
polyInsub[i * 2] = int(poly[i * 2] - left)
polyInsub[i * 2 + 1] = int(poly[i * 2 + 1] - up)
return polyInsub
def calchalf_iou(self, poly1, poly2):
"""
It is not the iou on usual, the iou is the value of intersection over poly1
"""
inter_poly = poly1.intersection(poly2)
inter_area = inter_poly.area
poly1_area = poly1.area
half_iou = inter_area / poly1_area
return inter_poly, half_iou
def saveimagepatches(self, img, subimgname, left, up):
subimg = copy.deepcopy(
img[up: (up + self.subsize), left: (left + self.subsize)])
outdir = os.path.join(self.outimagepath, subimgname + self.ext)
h, w, c = np.shape(subimg)
if (self.padding):
outimg = np.zeros((self.subsize, self.subsize, 3))
outimg[0:h, 0:w, :] = subimg
cv2.imwrite(outdir, outimg)
else:
cv2.imwrite(outdir, subimg)
def GetPoly4FromPoly5(self, poly):
distances = [cal_line_length((poly[i * 2], poly[i * 2 + 1]), (poly[(
i + 1) * 2], poly[(i + 1) * 2 + 1]))
for i in range(int(len(poly) / 2 - 1))]
distances.append(cal_line_length(
(poly[0], poly[1]), (poly[8], poly[9])))
pos = np.array(distances).argsort()[0]
count = 0
outpoly = []
while count < 5:
# print('count:', count)
if (count == pos):
outpoly.append(
(poly[count * 2] + poly[(count * 2 + 2) % 10]) / 2)
outpoly.append(
(poly[(count * 2 + 1) % 10] + poly[(count * 2 + 3) % 10]) / 2)
count = count + 1
elif (count == (pos + 1) % 5):
count = count + 1
continue
else:
outpoly.append(poly[count * 2])
outpoly.append(poly[count * 2 + 1])
count = count + 1
return outpoly
def savepatches(self, resizeimg, objects, subimgname, left, up, right, down):
outdir = os.path.join(self.outlabelpath, subimgname + '.txt')
mask_poly = []
imgpoly = shgeo.Polygon([(left, up), (right, up), (right, down),
(left, down)])
with codecs.open(outdir, 'w', self.code) as f_out:
for obj in objects:
gtpoly = shgeo.Polygon([(obj['poly'][0], obj['poly'][1]),
(obj['poly'][2], obj['poly'][3]),
(obj['poly'][4], obj['poly'][5]),
(obj['poly'][6], obj['poly'][7])])
if (gtpoly.area <= 0):
continue
inter_poly, half_iou = self.calchalf_iou(gtpoly, imgpoly)
# print('writing...')
if (half_iou == 1):
polyInsub = self.polyorig2sub(left, up, obj['poly'])
outline = ' '.join(list(map(str, polyInsub)))
outline = outline + ' ' + \
obj['name'] + ' ' + str(obj['difficult'])
f_out.write(outline + '\n')
elif (half_iou > 0):
# elif (half_iou > self.thresh):
# print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
inter_poly = shgeo.polygon.orient(inter_poly, sign=1)
out_poly = list(inter_poly.exterior.coords)[0: -1]
if len(out_poly) < 4:
continue
out_poly2 = []
for i in range(len(out_poly)):
out_poly2.append(out_poly[i][0])
out_poly2.append(out_poly[i][1])
if (len(out_poly) == 5):
# print('==========================')
out_poly2 = self.GetPoly4FromPoly5(out_poly2)
elif (len(out_poly) > 5):
"""
if the cut instance is a polygon with points more than 5, we do not handle it currently
"""
continue
if (self.choosebestpoint):
out_poly2 = choose_best_pointorder_fit_another(
out_poly2, obj['poly'])
polyInsub = self.polyorig2sub(left, up, out_poly2)
for index, item in enumerate(polyInsub):
if (item <= 1):
polyInsub[index] = 1
elif (item >= self.subsize):
polyInsub[index] = self.subsize
outline = ' '.join(list(map(str, polyInsub)))
if (half_iou > self.thresh):
outline = outline + ' ' + \
obj['name'] + ' ' + str(obj['difficult'])
else:
# if the left part is too small, label as '2'
outline = outline + ' ' + obj['name'] + ' ' + '2'
f_out.write(outline + '\n')
# else:
# mask_poly.append(inter_poly)
self.saveimagepatches(resizeimg, subimgname, left, up)
def SplitSingle(self, name, rate, extent):
"""
split a single image and ground truth
:param name: image name
:param rate: the resize scale for the image
:param extent: the image format
:return:
"""
img = cv2.imread(os.path.join(self.imagepath, name + extent))
if np.shape(img) == ():
return
fullname = os.path.join(self.labelpath, name + '.txt')
objects = util.parse_dota_poly2(fullname)
for obj in objects:
obj['poly'] = list(map(lambda x: rate * x, obj['poly']))
# obj['poly'] = list(map(lambda x: ([2 * y for y in x]), obj['poly']))
if (rate != 1):
resizeimg = cv2.resize(
img, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
else:
resizeimg = img
outbasename = name + '__' + str(rate) + '__'
weight = np.shape(resizeimg)[1]
height = np.shape(resizeimg)[0]
left, up = 0, 0
while (left < weight):
if (left + self.subsize >= weight):
left = max(weight - self.subsize, 0)
up = 0
while (up < height):
if (up + self.subsize >= height):
up = max(height - self.subsize, 0)
right = min(left + self.subsize, weight - 1)
down = min(up + self.subsize, height - 1)
subimgname = outbasename + str(left) + '___' + str(up)
# self.f_sub.write(name + ' ' + subimgname + ' ' + str(left) + ' ' + str(up) + '\n')
self.savepatches(resizeimg, objects,
subimgname, left, up, right, down)
if (up + self.subsize >= height):
break
else:
up = up + self.slide
if (left + self.subsize >= weight):
break
else:
left = left + self.slide
def splitdata(self, rate):
"""
:param rate: resize rate before cut
"""
imagelist = GetFileFromThisRootDir(self.imagepath)
imagenames = [util.custombasename(x) for x in imagelist if (
util.custombasename(x) != 'Thumbs')]
if self.num_process == 1:
for name in imagenames:
self.SplitSingle(name, rate, self.ext)
else:
# worker = partial(self.SplitSingle, rate=rate, extent=self.ext)
worker = partial(split_single_warp, split_base=self,
rate=rate, extent=self.ext)
self.pool.map(worker, imagenames)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
if __name__ == '__main__':
split = splitbase(r'data/dota/train', r'data/dota_train_1024/train_split',
gap=200, subsize=1024, num_process=32)
split.splitdata(1)
| 12,045 | 38.237785 | 124 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.