id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
1,112 | import json
import requests
import openai
import tiktoken
import os
import time
from functools import wraps
import threading
def timeout_decorator(timeout):
class TimeoutException(Exception):
pass
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = [TimeoutException('Function call timed out')] # Nonlocal mutable variable
def target():
try:
result[0] = func(*args, **kwargs)
except Exception as e:
result[0] = e
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print(f"Function {func.__name__} timed out, retrying...")
return wrapper(*args, **kwargs)
if isinstance(result[0], Exception):
raise result[0]
return result[0]
return wrapper
return decorator | null |
1,113 | import json
import requests
import openai
import tiktoken
import os
import time
from functools import wraps
import threading
def send_chat_request(request):
endpoint = 'http://10.15.82.10:8006/v1/chat/completions'
model = 'gpt-3.5-turbo'
# gpt4 gpt4-32k和gpt-3.5-turbo
headers = {
'Content-Type': 'application/json',
}
temperature = 0.7
top_p = 0.95
frequency_penalty = 0
presence_penalty = 0
max_tokens = 8000
stream = False
stop = None
messages = [{"role": "user", "content": request}]
data = {
'model': model,
'messages': messages,
'temperature': temperature,
'top_p': top_p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty,
'max_tokens': max_tokens,
'stream': stream,
'stop': stop,
}
response = requests.post(endpoint, headers=headers, data=json.dumps(data))
if response.status_code == 200:
data = json.loads(response.text)
data_res = data['choices'][0]['message']
return data_res
else:
raise Exception(f"Request failed with status code {response.status_code}: {response.text}") | null |
1,114 | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import torch.optim.lr_scheduler as lr_scheduler
import time
import os
import glob
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, parse_args, get_resume_file
def train(base_loader, val_loader, model, optimization, start_epoch, stop_epoch, params):
if optimization == 'Adam':
optimizer = torch.optim.Adam(model.parameters())
else:
raise ValueError('Unknown optimization, please define by yourself')
max_acc = 0
for epoch in range(start_epoch,stop_epoch):
model.train()
model.train_loop(epoch, base_loader, optimizer ) #model are called by reference, no need to return
model.eval()
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
acc = model.test_loop( val_loader)
if acc > max_acc : #for baseline and baseline++, we don't use validation in default and we let acc = -1, but we allow options to validate with DB index
print("best model! save...")
max_acc = acc
outfile = os.path.join(params.checkpoint_dir, 'best_model.tar')
torch.save({'epoch':epoch, 'state':model.state_dict()}, outfile)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict()}, outfile)
return model | null |
1,115 | import torch
import numpy as np
def one_hot(y, num_class):
return torch.zeros((len(y), num_class)).scatter_(1, y.unsqueeze(1), 1) | null |
1,116 | import torch
import numpy as np
def DBindex(cl_data_file):
class_list = cl_data_file.keys()
cl_num= len(class_list)
cl_means = []
stds = []
DBs = []
for cl in class_list:
cl_means.append( np.mean(cl_data_file[cl], axis = 0) )
stds.append( np.sqrt(np.mean( np.sum(np.square( cl_data_file[cl] - cl_means[-1]), axis = 1))))
mu_i = np.tile( np.expand_dims( np.array(cl_means), axis = 0), (len(class_list),1,1) )
mu_j = np.transpose(mu_i,(1,0,2))
mdists = np.sqrt(np.sum(np.square(mu_i - mu_j), axis = 2))
for i in range(cl_num):
DBs.append( np.max([ (stds[i]+ stds[j])/mdists[i,j] for j in range(cl_num) if j != i ]) )
return np.mean(DBs) | null |
1,117 | import torch
import numpy as np
def sparsity(cl_data_file):
class_list = cl_data_file.keys()
cl_sparsity = []
for cl in class_list:
cl_sparsity.append(np.mean([np.sum(x!=0) for x in cl_data_file[cl] ]) )
return np.mean(cl_sparsity) | null |
1,118 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0,math.sqrt(2.0/float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0) | null |
1,119 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class ConvNet(nn.Module):
def __init__(self, depth, flatten = True):
def forward(self,x):
def Conv4():
return ConvNet(4) | null |
1,120 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class ConvNet(nn.Module):
def __init__(self, depth, flatten = True):
def forward(self,x):
def Conv6():
return ConvNet(6) | null |
1,121 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class ConvNetNopool(nn.Module): #Relation net use a 4 layer conv with pooling in only first two layers, else no pooling
def __init__(self, depth):
super(ConvNetNopool,self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i in [0,1] ), padding = 0 if i in[0,1] else 1 ) #only first two layer has pooling and no padding
trunk.append(B)
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = [64,19,19]
def forward(self,x):
out = self.trunk(x)
return out
def Conv4NP():
return ConvNetNopool(4) | null |
1,122 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class ConvNetNopool(nn.Module): #Relation net use a 4 layer conv with pooling in only first two layers, else no pooling
def __init__(self, depth):
super(ConvNetNopool,self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i in [0,1] ), padding = 0 if i in[0,1] else 1 ) #only first two layer has pooling and no padding
trunk.append(B)
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = [64,19,19]
def forward(self,x):
out = self.trunk(x)
return out
def Conv6NP():
return ConvNetNopool(6) | null |
1,123 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class ConvNetS(nn.Module):
def __init__(self, depth, flatten = True):
def forward(self,x):
def Conv4S():
return ConvNetS(4) | null |
1,124 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class ConvNetSNopool(nn.Module):
def __init__(self, depth):
def forward(self,x):
def Conv4SNP():
return ConvNetSNopool(4) | null |
1,125 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet10( flatten = True):
return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten) | null |
1,126 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet18( flatten = True):
return ResNet(SimpleBlock, [2,2,2,2],[64,128,256,512], flatten) | null |
1,127 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet34( flatten = True):
return ResNet(SimpleBlock, [3,4,6,3],[64,128,256,512], flatten) | null |
1,128 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class BottleneckBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim/4)
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = BatchNorm2d_fw(bottleneckdim)
self.C2 = Conv2d_fw(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = BatchNorm2d_fw(bottleneckdim)
self.C3 = Conv2d_fw(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.BatchNorm2d(bottleneckdim)
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = nn.BatchNorm2d(bottleneckdim)
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet50( flatten = True):
return ResNet(BottleneckBlock, [3,4,6,3], [256,512,1024,2048], flatten) | null |
1,129 | import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
class BottleneckBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim/4)
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = BatchNorm2d_fw(bottleneckdim)
self.C2 = Conv2d_fw(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = BatchNorm2d_fw(bottleneckdim)
self.C3 = Conv2d_fw(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.BatchNorm2d(bottleneckdim)
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = nn.BatchNorm2d(bottleneckdim)
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet101( flatten = True):
return ResNet(BottleneckBlock, [3,4,23,3],[256,512,1024,2048], flatten) | null |
1,130 | import numpy as np
import os
import glob
import argparse
import backbone
def parse_args(script):
parser = argparse.ArgumentParser(description= 'few-shot script %s' %(script))
parser.add_argument('--dataset' , default='CUB', help='CUB/miniImagenet/cross/omniglot/cross_char')
parser.add_argument('--model' , default='Conv4', help='model: Conv{4|6} / ResNet{10|18|34|50|101}') # 50 and 101 are not used in the paper
parser.add_argument('--method' , default='baseline', help='baseline/baseline++/protonet/matchingnet/relationnet{_softmax}/maml{_approx}') #relationnet_softmax replace L2 norm with softmax to expedite training, maml_approx use first-order approximation in the gradient for efficiency
parser.add_argument('--train_n_way' , default=5, type=int, help='class num to classify for training') #baseline and baseline++ would ignore this parameter
parser.add_argument('--test_n_way' , default=5, type=int, help='class num to classify for testing (validation) ') #baseline and baseline++ only use this parameter in finetuning
parser.add_argument('--n_shot' , default=5, type=int, help='number of labeled data in each class, same as n_support') #baseline and baseline++ only use this parameter in finetuning
parser.add_argument('--train_aug' , action='store_true', help='perform data augmentation or not during training ') #still required for save_features.py and test.py to find the model path correctly
if script == 'train':
parser.add_argument('--num_classes' , default=200, type=int, help='total number of classes in softmax, only used in baseline') #make it larger than the maximum label value in base class
parser.add_argument('--save_freq' , default=50, type=int, help='Save frequency')
parser.add_argument('--start_epoch' , default=0, type=int,help ='Starting epoch')
parser.add_argument('--stop_epoch' , default=-1, type=int, help ='Stopping epoch') #for meta-learning methods, each epoch contains 100 episodes. The default epoch number is dataset dependent. See train.py
parser.add_argument('--resume' , action='store_true', help='continue from previous trained model with largest epoch')
parser.add_argument('--warmup' , action='store_true', help='continue from baseline, neglected if resume is true') #never used in the paper
elif script == 'save_features':
parser.add_argument('--split' , default='novel', help='base/val/novel') #default novel, but you can also test base/val class accuracy if you want
parser.add_argument('--save_iter', default=-1, type=int,help ='save feature from the model trained in x epoch, use the best model if x is -1')
elif script == 'test':
parser.add_argument('--split' , default='novel', help='base/val/novel') #default novel, but you can also test base/val class accuracy if you want
parser.add_argument('--save_iter', default=-1, type=int,help ='saved feature from the model trained in x epoch, use the best model if x is -1')
parser.add_argument('--adaptation' , action='store_true', help='further adaptation in test time or not')
else:
raise ValueError('Unknown script')
return parser.parse_args() | null |
1,131 | import numpy as np
import os
import glob
import argparse
import backbone
def get_assigned_file(checkpoint_dir,num):
assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))
return assign_file | null |
1,132 | import numpy as np
import os
import glob
import argparse
import backbone
def get_resume_file(checkpoint_dir):
filelist = glob.glob(os.path.join(checkpoint_dir, '*.tar'))
if len(filelist) == 0:
return None
filelist = [ x for x in filelist if os.path.basename(x) != 'best_model.tar' ]
epochs = np.array([int(os.path.splitext(os.path.basename(x))[0]) for x in filelist])
max_epoch = np.max(epochs)
resume_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(max_epoch))
return resume_file
def get_best_file(checkpoint_dir):
best_file = os.path.join(checkpoint_dir, 'best_model.tar')
if os.path.isfile(best_file):
return best_file
else:
return get_resume_file(checkpoint_dir) | null |
1,133 | import numpy as np
import torch
from torch.autograd import Variable
import os
import glob
import h5py
import configs
import backbone
from data.datamgr import SimpleDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
def save_features(model, data_loader, outfile ):
f = h5py.File(outfile, 'w')
max_count = len(data_loader)*data_loader.batch_size
all_labels = f.create_dataset('all_labels',(max_count,), dtype='i')
all_feats=None
count=0
for i, (x,y) in enumerate(data_loader):
if i%10 == 0:
print('{:d}/{:d}'.format(i, len(data_loader)))
x = x.cuda()
x_var = Variable(x)
feats = model(x_var)
if all_feats is None:
all_feats = f.create_dataset('all_feats', [max_count] + list( feats.size()[1:]) , dtype='f')
all_feats[count:count+feats.size(0)] = feats.data.cpu().numpy()
all_labels[count:count+feats.size(0)] = y.cpu().numpy()
count = count + feats.size(0)
count_var = f.create_dataset('count', (1,), dtype='i')
count_var[0] = count
f.close() | null |
1,134 | import torch
import numpy as np
import h5py
class SimpleHDF5Dataset:
def __init__(self, file_handle = None):
if file_handle == None:
self.f = ''
self.all_feats_dset = []
self.all_labels = []
self.total = 0
else:
self.f = file_handle
self.all_feats_dset = self.f['all_feats'][...]
self.all_labels = self.f['all_labels'][...]
self.total = self.f['count'][0]
# print('here')
def __getitem__(self, i):
return torch.Tensor(self.all_feats_dset[i,:]), int(self.all_labels[i])
def __len__(self):
return self.total
def init_loader(filename):
with h5py.File(filename, 'r') as f:
fileset = SimpleHDF5Dataset(f)
#labels = [ l for l in fileset.all_labels if l != 0]
feats = fileset.all_feats_dset
labels = fileset.all_labels
while np.sum(feats[-1]) == 0:
feats = np.delete(feats,-1,axis = 0)
labels = np.delete(labels,-1,axis = 0)
class_list = np.unique(np.array(labels)).tolist()
inds = range(len(labels))
cl_data_file = {}
for cl in class_list:
cl_data_file[cl] = []
for ind in inds:
cl_data_file[labels[ind]].append( feats[ind])
return cl_data_file | null |
1,135 | import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
def euclidean_dist( x, y):
# x: N x D
# y: M x D
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2) | null |
1,136 | import backbone
import utils
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
def DBindex(cl_data_file):
#For the definition Davis Bouldin index (DBindex), see https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index
#DB index present the intra-class variation of the data
#As baseline/baseline++ do not train few-shot classifier in training, this is an alternative metric to evaluate the validation set
#Emperically, this only works for CUB dataset but not for miniImagenet dataset
class_list = cl_data_file.keys()
cl_num= len(class_list)
cl_means = []
stds = []
DBs = []
for cl in class_list:
cl_means.append( np.mean(cl_data_file[cl], axis = 0) )
stds.append( np.sqrt(np.mean( np.sum(np.square( cl_data_file[cl] - cl_means[-1]), axis = 1))))
mu_i = np.tile( np.expand_dims( np.array(cl_means), axis = 0), (len(class_list),1,1) )
mu_j = np.transpose(mu_i,(1,0,2))
mdists = np.sqrt(np.sum(np.square(mu_i - mu_j), axis = 2))
for i in range(cl_num):
DBs.append( np.max([ (stds[i]+ stds[j])/mdists[i,j] for j in range(cl_num) if j != i ]) )
return np.mean(DBs) | null |
1,137 | import logging
import os
import time
import numpy as np
import numpy.ma as ma
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
from utils.utils import AverageMeter
from utils.utils import get_confusion_matrix
from utils.utils import adjust_learning_rate
import utils.distributed as dist
def reduce_tensor(inp):
"""
Reduce the loss from all processes so that
process with rank 0 has the averaged results.
"""
world_size = dist.get_world_size()
if world_size < 2:
return inp
with torch.no_grad():
reduced_inp = inp
torch.distributed.reduce(reduced_inp, dst=0)
return reduced_inp / world_size
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def adjust_learning_rate(optimizer, base_lr, max_iters,
cur_iters, power=0.9, nbb_mult=10):
lr = base_lr*((1-float(cur_iters)/max_iters)**(power))
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) == 2:
optimizer.param_groups[1]['lr'] = lr * nbb_mult
return lr
def train(config, epoch, num_epoch, epoch_iters, base_lr,
num_iters, trainloader, optimizer, model, writer_dict):
# Training
model.train()
batch_time = AverageMeter()
ave_loss = AverageMeter()
tic = time.time()
cur_iters = epoch*epoch_iters
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
for i_iter, batch in enumerate(trainloader, 0):
images, labels, _, _ = batch
images = images.cuda()
labels = labels.long().cuda()
losses, _ = model(images, labels)
loss = losses.mean()
if dist.is_distributed():
reduced_loss = reduce_tensor(loss)
else:
reduced_loss = loss
model.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - tic)
tic = time.time()
# update average loss
ave_loss.update(reduced_loss.item())
lr = adjust_learning_rate(optimizer,
base_lr,
num_iters,
i_iter+cur_iters)
if i_iter % config.PRINT_FREQ == 0 and dist.get_rank() == 0:
msg = 'Epoch: [{}/{}] Iter:[{}/{}], Time: {:.2f}, ' \
'lr: {}, Loss: {:.6f}' .format(
epoch, num_epoch, i_iter, epoch_iters,
batch_time.average(), [x['lr'] for x in optimizer.param_groups], ave_loss.average())
logging.info(msg)
writer.add_scalar('train_loss', ave_loss.average(), global_steps)
writer_dict['train_global_steps'] = global_steps + 1 | null |
1,138 | import logging
import os
import time
import numpy as np
import numpy.ma as ma
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
from utils.utils import AverageMeter
from utils.utils import get_confusion_matrix
from utils.utils import adjust_learning_rate
import utils.distributed as dist
def reduce_tensor(inp):
class AverageMeter(object):
def __init__(self):
def initialize(self, val, weight):
def update(self, val, weight=1):
def add(self, val, weight):
def value(self):
def average(self):
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
def validate(config, testloader, model, writer_dict):
model.eval()
ave_loss = AverageMeter()
nums = config.MODEL.NUM_OUTPUTS
confusion_matrix = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES, nums))
with torch.no_grad():
for idx, batch in enumerate(testloader):
image, label, _, _ = batch
size = label.size()
image = image.cuda()
label = label.long().cuda()
losses, pred = model(image, label)
if not isinstance(pred, (list, tuple)):
pred = [pred]
for i, x in enumerate(pred):
x = F.interpolate(
input=x, size=size[-2:],
mode='bilinear', align_corners=config.MODEL.ALIGN_CORNERS
)
confusion_matrix[..., i] += get_confusion_matrix(
label,
x,
size,
config.DATASET.NUM_CLASSES,
config.TRAIN.IGNORE_LABEL
)
if idx % 10 == 0:
print(idx)
loss = losses.mean()
if dist.is_distributed():
reduced_loss = reduce_tensor(loss)
else:
reduced_loss = loss
ave_loss.update(reduced_loss.item())
if dist.is_distributed():
confusion_matrix = torch.from_numpy(confusion_matrix).cuda()
reduced_confusion_matrix = reduce_tensor(confusion_matrix)
confusion_matrix = reduced_confusion_matrix.cpu().numpy()
for i in range(nums):
pos = confusion_matrix[..., i].sum(1)
res = confusion_matrix[..., i].sum(0)
tp = np.diag(confusion_matrix[..., i])
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
if dist.get_rank() <= 0:
logging.info('{} {} {}'.format(i, IoU_array, mean_IoU))
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('valid_loss', ave_loss.average(), global_steps)
writer.add_scalar('valid_mIoU', mean_IoU, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return ave_loss.average(), mean_IoU, IoU_array | null |
1,139 | import logging
import os
import time
import numpy as np
import numpy.ma as ma
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
from utils.utils import AverageMeter
from utils.utils import get_confusion_matrix
from utils.utils import adjust_learning_rate
import utils.distributed as dist
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(
label.cpu().numpy()[:, :size[-2], :size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label,
i_pred] = label_count[cur_index]
return confusion_matrix
def testval(config, test_dataset, testloader, model,
sv_dir='', sv_pred=False):
model.eval()
confusion_matrix = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
with torch.no_grad():
for index, batch in enumerate(tqdm(testloader)):
image, label, _, name, *border_padding = batch
size = label.size()
pred = test_dataset.multi_scale_inference(
config,
model,
image,
scales=config.TEST.SCALE_LIST,
flip=config.TEST.FLIP_TEST)
if len(border_padding) > 0:
border_padding = border_padding[0]
pred = pred[:, :, 0:pred.size(2) - border_padding[0], 0:pred.size(3) - border_padding[1]]
if pred.size()[-2] != size[-2] or pred.size()[-1] != size[-1]:
pred = F.interpolate(
pred, size[-2:],
mode='bilinear', align_corners=config.MODEL.ALIGN_CORNERS
)
confusion_matrix += get_confusion_matrix(
label,
pred,
size,
config.DATASET.NUM_CLASSES,
config.TRAIN.IGNORE_LABEL)
if sv_pred:
sv_path = os.path.join(sv_dir, 'test_results')
if not os.path.exists(sv_path):
os.mkdir(sv_path)
test_dataset.save_pred(pred, sv_path, name)
if index % 100 == 0:
logging.info('processing: %d images' % index)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
logging.info('mIoU: %.4f' % (mean_IoU))
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
pixel_acc = tp.sum()/pos.sum()
mean_acc = (tp/np.maximum(1.0, pos)).mean()
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
return mean_IoU, IoU_array, pixel_acc, mean_acc | null |
1,140 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yacs.config import CfgNode as CN
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze() | null |
1,141 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
from .bn_helper import BatchNorm2d, BatchNorm2d_class, relu_inplace
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) | 3x3 convolution with padding |
1,142 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
from .bn_helper import BatchNorm2d, BatchNorm2d_class, relu_inplace
class HighResolutionNet(nn.Module):
def __init__(self, config, **kwargs):
global ALIGN_CORNERS
extra = config.MODEL.EXTRA
super(HighResolutionNet, self).__init__()
ALIGN_CORNERS = config.MODEL.ALIGN_CORNERS
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
self.last_layer = nn.Sequential(
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels,
kernel_size=1,
stride=1,
padding=0),
BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace),
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=config.DATASET.NUM_CLASSES,
kernel_size=extra.FINAL_CONV_KERNEL,
stride=1,
padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0)
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x = torch.cat([x[0], x1, x2, x3], 1)
x = self.last_layer(x)
return x
def init_weights(self, pretrained='',):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, BatchNorm2d_class):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
for k, _ in pretrained_dict.items():
logger.info(
'=> loading {} pretrained model {}'.format(k, pretrained))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def get_seg_model(cfg, **kwargs):
model = HighResolutionNet(cfg, **kwargs)
model.init_weights(cfg.MODEL.PRETRAINED)
return model | null |
1,144 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
from .bn_helper import BatchNorm2d, BatchNorm2d_class, relu_inplace
class HighResolutionNet(nn.Module):
def __init__(self, config, **kwargs):
global ALIGN_CORNERS
extra = config.MODEL.EXTRA
super(HighResolutionNet, self).__init__()
ALIGN_CORNERS = config.MODEL.ALIGN_CORNERS
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
ocr_mid_channels = config.MODEL.OCR.MID_CHANNELS
ocr_key_channels = config.MODEL.OCR.KEY_CHANNELS
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(last_inp_channels, ocr_mid_channels,
kernel_size=3, stride=1, padding=1),
BatchNorm2d(ocr_mid_channels),
nn.ReLU(inplace=relu_inplace),
)
self.ocr_gather_head = SpatialGather_Module(config.DATASET.NUM_CLASSES)
self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
)
self.cls_head = nn.Conv2d(
ocr_mid_channels, config.DATASET.NUM_CLASSES, kernel_size=1, stride=1, padding=0, bias=True)
self.aux_head = nn.Sequential(
nn.Conv2d(last_inp_channels, last_inp_channels,
kernel_size=1, stride=1, padding=0),
BatchNorm2d(last_inp_channels),
nn.ReLU(inplace=relu_inplace),
nn.Conv2d(last_inp_channels, config.DATASET.NUM_CLASSES,
kernel_size=1, stride=1, padding=0, bias=True)
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w),
mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w),
mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w),
mode='bilinear', align_corners=ALIGN_CORNERS)
feats = torch.cat([x[0], x1, x2, x3], 1)
out_aux_seg = []
# ocr
out_aux = self.aux_head(feats)
# compute contrast feature
feats = self.conv3x3_ocr(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux_seg.append(out_aux)
out_aux_seg.append(out)
return out_aux_seg
def init_weights(self, pretrained='',):
logger.info('=> init weights from normal distribution')
for name, m in self.named_modules():
if any(part in name for part in {'cls', 'aux', 'ocr'}):
# print('skipped', name)
continue
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, BatchNorm2d_class):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location={'cuda:0': 'cpu'})
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k.replace('last_layer', 'aux_head').replace('model.', ''): v for k, v in pretrained_dict.items()}
print(set(model_dict) - set(pretrained_dict))
print(set(pretrained_dict) - set(model_dict))
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
# for k, _ in pretrained_dict.items():
# logger.info(
# '=> loading {} pretrained model {}'.format(k, pretrained))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
elif pretrained:
raise RuntimeError('No such file {}'.format(pretrained))
def get_seg_model(cfg, **kwargs):
model = HighResolutionNet(cfg, **kwargs)
model.init_weights(cfg.MODEL.PRETRAINED)
return model | null |
1,145 | import os
import logging
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation) | 3x3 convolution with padding |
1,146 | import os
import logging
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
1,147 | import os
import logging
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
def _hrnet(arch, pretrained, progress, **kwargs):
try:
from ..config.hrnet_config import MODEL_CONFIGS
except ImportError:
from segmentation.config.hrnet_config import MODEL_CONFIGS
model = HighResolutionNet(MODEL_CONFIGS[arch], **kwargs)
if pretrained:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `hrnet18` function. Write a Python function `def hrnet18(pretrained=True, progress=True, **kwargs)` to solve the following problem:
r"""HRNet-18 model
Here is the function:
def hrnet18(pretrained=True, progress=True, **kwargs):
r"""HRNet-18 model
"""
return _hrnet('hrnet18', pretrained, progress,
**kwargs) | r"""HRNet-18 model |
1,148 | import os
import logging
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
def _hrnet(arch, pretrained, progress, **kwargs):
try:
from ..config.hrnet_config import MODEL_CONFIGS
except ImportError:
from segmentation.config.hrnet_config import MODEL_CONFIGS
model = HighResolutionNet(MODEL_CONFIGS[arch], **kwargs)
if pretrained:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `hrnet32` function. Write a Python function `def hrnet32(pretrained=True, progress=True, **kwargs)` to solve the following problem:
r"""HRNet-32 model
Here is the function:
def hrnet32(pretrained=True, progress=True, **kwargs):
r"""HRNet-32 model
"""
return _hrnet('hrnet32', pretrained, progress,
**kwargs) | r"""HRNet-32 model |
1,149 | import os
import logging
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
def _hrnet(arch, pretrained, progress, **kwargs):
try:
from ..config.hrnet_config import MODEL_CONFIGS
except ImportError:
from segmentation.config.hrnet_config import MODEL_CONFIGS
model = HighResolutionNet(MODEL_CONFIGS[arch], **kwargs)
if pretrained:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `hrnet48` function. Write a Python function `def hrnet48(pretrained=True, progress=True, **kwargs)` to solve the following problem:
r"""HRNet-48 model
Here is the function:
def hrnet48(pretrained=True, progress=True, **kwargs):
r"""HRNet-48 model
"""
return _hrnet('hrnet48', pretrained, progress,
**kwargs) | r"""HRNet-48 model |
1,150 | from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
def _check(fn, *args, **kwargs):
success = fn(*args, **kwargs)
if not success:
raise RuntimeError("CUDA Error encountered in {}".format(fn)) | null |
1,151 | from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
def _broadcast_shape(x):
out_size = []
for i, s in enumerate(x.size()):
if i != 1:
out_size.append(1)
else:
out_size.append(s)
return out_size | null |
1,152 | from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
def _reduce(x):
if len(x.size()) == 2:
return x.sum(dim=0)
else:
n, c = x.size()[0:2]
return x.contiguous().view((n, c, -1)).sum(2).sum(0) | null |
1,153 | from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
def _count_samples(x):
count = 1
for i, s in enumerate(x.size()):
if i != 1:
count *= s
return count | null |
1,154 | from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
_backend = load(name="inplace_abn",
extra_cflags=["-O3"],
sources=[path.join(_src_path, f) for f in [
"inplace_abn.cpp",
"inplace_abn_cpu.cpp",
"inplace_abn_cuda.cu"
]],
extra_cuda_cflags=["--expt-extended-lambda"])
ACT_LEAKY_RELU = "leaky_relu"
ACT_ELU = "elu"
ACT_NONE = "none"
def _act_forward(ctx, x):
if ctx.activation == ACT_LEAKY_RELU:
_backend.leaky_relu_forward(x, ctx.slope)
elif ctx.activation == ACT_ELU:
_backend.elu_forward(x)
elif ctx.activation == ACT_NONE:
pass | null |
1,155 | from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
_backend = load(name="inplace_abn",
extra_cflags=["-O3"],
sources=[path.join(_src_path, f) for f in [
"inplace_abn.cpp",
"inplace_abn_cpu.cpp",
"inplace_abn_cuda.cu"
]],
extra_cuda_cflags=["--expt-extended-lambda"])
ACT_LEAKY_RELU = "leaky_relu"
ACT_ELU = "elu"
ACT_NONE = "none"
def _act_backward(ctx, x, dx):
if ctx.activation == ACT_LEAKY_RELU:
_backend.leaky_relu_backward(x, dx, ctx.slope)
elif ctx.activation == ACT_ELU:
_backend.elu_backward(x, dx)
elif ctx.activation == ACT_NONE:
pass | null |
1,156 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
def create_logger(cfg, cfg_name, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = root_output_dir / dataset / cfg_name
print('=> creating {}'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
(cfg_name + '_' + time_str)
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir) | null |
1,157 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
from collections import namedtuple
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `get_model_summary` function. Write a Python function `def get_model_summary(model, *input_tensors, item_length=26, verbose=False)` to solve the following problem:
:param model: :param input_tensors: :param item_length: :return:
Here is the function:
def get_model_summary(model, *input_tensors, item_length=26, verbose=False):
"""
:param model:
:param input_tensors:
:param item_length:
:return:
"""
summary = []
ModuleDetails = namedtuple(
"Layer", ["name", "input_size", "output_size", "num_parameters", "multiply_adds"])
hooks = []
layer_instances = {}
def add_hooks(module):
def hook(module, input, output):
class_name = str(module.__class__.__name__)
instance_index = 1
if class_name not in layer_instances:
layer_instances[class_name] = instance_index
else:
instance_index = layer_instances[class_name] + 1
layer_instances[class_name] = instance_index
layer_name = class_name + "_" + str(instance_index)
params = 0
if class_name.find("Conv") != -1 or class_name.find("BatchNorm") != -1 or \
class_name.find("Linear") != -1:
for param_ in module.parameters():
params += param_.view(-1).size(0)
flops = "Not Available"
if class_name.find("Conv") != -1 and hasattr(module, "weight"):
flops = (
torch.prod(
torch.LongTensor(list(module.weight.data.size()))) *
torch.prod(
torch.LongTensor(list(output.size())[2:]))).item()
elif isinstance(module, nn.Linear):
flops = (torch.prod(torch.LongTensor(list(output.size()))) \
* input[0].size(1)).item()
if isinstance(input[0], list):
input = input[0]
if isinstance(output, list):
output = output[0]
summary.append(
ModuleDetails(
name=layer_name,
input_size=list(input[0].size()),
output_size=list(output.size()),
num_parameters=params,
multiply_adds=flops)
)
if not isinstance(module, nn.ModuleList) \
and not isinstance(module, nn.Sequential) \
and module != model:
hooks.append(module.register_forward_hook(hook))
model.eval()
model.apply(add_hooks)
space_len = item_length
model(*input_tensors)
for hook in hooks:
hook.remove()
details = ''
if verbose:
details = "Model Summary" + \
os.linesep + \
"Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format(
' ' * (space_len - len("Name")),
' ' * (space_len - len("Input Size")),
' ' * (space_len - len("Output Size")),
' ' * (space_len - len("Parameters")),
' ' * (space_len - len("Multiply Adds (Flops)"))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
params_sum = 0
flops_sum = 0
for layer in summary:
params_sum += layer.num_parameters
if layer.multiply_adds != "Not Available":
flops_sum += layer.multiply_adds
if verbose:
details += "{}{}{}{}{}{}{}{}{}{}".format(
layer.name,
' ' * (space_len - len(layer.name)),
layer.input_size,
' ' * (space_len - len(str(layer.input_size))),
layer.output_size,
' ' * (space_len - len(str(layer.output_size))),
layer.num_parameters,
' ' * (space_len - len(str(layer.num_parameters))),
layer.multiply_adds,
' ' * (space_len - len(str(layer.multiply_adds)))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += os.linesep \
+ "Total Parameters: {:,}".format(params_sum) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Total Multiply Adds (For Convolution and Linear Layers only): {:,} GFLOPs".format(flops_sum/(1024**3)) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Number of Layers" + os.linesep
for layer in layer_instances:
details += "{} : {} layers ".format(layer, layer_instances[layer])
return details | :param model: :param input_tensors: :param item_length: :return: |
1,158 | import torch
from lib.models.seg_hrnet import get_seg_model
state_dict_url = 'https://github.com/huawei-noah/ghostnet/raw/master/pytorch/models/state_dict_93.98.pth'
The provided code snippet includes necessary dependencies for implementing the `hrnet_w48_cityscapes` function. Write a Python function `def hrnet_w48_cityscapes(pretrained=False, **kwargs)` to solve the following problem:
# This docstring shows up in hub.help() HRNetW48 model pretrained on Cityscapes pretrained (bool): kwargs, load pretrained weights into the model
Here is the function:
def hrnet_w48_cityscapes(pretrained=False, **kwargs):
""" # This docstring shows up in hub.help()
HRNetW48 model pretrained on Cityscapes
pretrained (bool): kwargs, load pretrained weights into the model
"""
model = ghostnet(num_classes=1000, width=1.0, dropout=0.2)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(state_dict_url, progress=True)
model.load_state_dict(state_dict)
return model | # This docstring shows up in hub.help() HRNetW48 model pretrained on Cityscapes pretrained (bool): kwargs, load pretrained weights into the model |
1,159 | import argparse
import os
import pprint
import shutil
import sys
import logging
import time
import timeit
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
from tensorboardX import SummaryWriter
import _init_paths
import models
import datasets
from config import config
from config import update_config
from core.criterion import CrossEntropy, OhemCrossEntropy
from core.function import train, validate
from utils.modelsummary import get_model_summary
from utils.utils import create_logger, FullModel
def parse_args():
parser = argparse.ArgumentParser(description='Train segmentation network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--seed', type=int, default=304)
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
update_config(config, args)
return args | null |
1,160 | import argparse
import os
import pprint
import shutil
import sys
import logging
import time
import timeit
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
from tensorboardX import SummaryWriter
import _init_paths
import models
import datasets
from config import config
from config import update_config
from core.criterion import CrossEntropy, OhemCrossEntropy
from core.function import train, validate
from utils.modelsummary import get_model_summary
from utils.utils import create_logger, FullModel
def is_distributed():
def get_sampler(dataset):
from utils.distributed import is_distributed
if is_distributed():
from torch.utils.data.distributed import DistributedSampler
return DistributedSampler(dataset)
else:
return None | null |
1,161 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path) | null |
1,162 | import os
import platform
import shlex
from tempfile import NamedTemporaryFile
from typing import Any, Callable
import typer
from click import BadParameter, UsageError
from sgpt.__version__ import __version__
from sgpt.integration import bash_integration, zsh_integration
The provided code snippet includes necessary dependencies for implementing the `get_edited_prompt` function. Write a Python function `def get_edited_prompt() -> str` to solve the following problem:
Opens the user's default editor to let them input a prompt, and returns the edited text. :return: String prompt.
Here is the function:
def get_edited_prompt() -> str:
"""
Opens the user's default editor to let them
input a prompt, and returns the edited text.
:return: String prompt.
"""
with NamedTemporaryFile(suffix=".txt", delete=False) as file:
# Create file and store path.
file_path = file.name
editor = os.environ.get("EDITOR", "vim")
# This will write text to file using $EDITOR.
os.system(f"{editor} {file_path}")
# Read file when editor is closed.
with open(file_path, "r", encoding="utf-8") as file:
output = file.read()
os.remove(file_path)
if not output:
raise BadParameter("Couldn't get valid PROMPT from $EDITOR")
return output | Opens the user's default editor to let them input a prompt, and returns the edited text. :return: String prompt. |
1,163 | import os
import platform
import shlex
from tempfile import NamedTemporaryFile
from typing import Any, Callable
import typer
from click import BadParameter, UsageError
from sgpt.__version__ import __version__
from sgpt.integration import bash_integration, zsh_integration
The provided code snippet includes necessary dependencies for implementing the `run_command` function. Write a Python function `def run_command(command: str) -> None` to solve the following problem:
Runs a command in the user's shell. It is aware of the current user's $SHELL. :param command: A shell command to run.
Here is the function:
def run_command(command: str) -> None:
"""
Runs a command in the user's shell.
It is aware of the current user's $SHELL.
:param command: A shell command to run.
"""
if platform.system() == "Windows":
is_powershell = len(os.getenv("PSModulePath", "").split(os.pathsep)) >= 3
full_command = (
f'powershell.exe -Command "{command}"'
if is_powershell
else f'cmd.exe /c "{command}"'
)
else:
shell = os.environ.get("SHELL", "/bin/sh")
full_command = f"{shell} -c {shlex.quote(command)}"
os.system(full_command) | Runs a command in the user's shell. It is aware of the current user's $SHELL. :param command: A shell command to run. |
1,164 | import os
import platform
import shlex
from tempfile import NamedTemporaryFile
from typing import Any, Callable
import typer
from click import BadParameter, UsageError
from sgpt.__version__ import __version__
from sgpt.integration import bash_integration, zsh_integration
def option_callback(func: Callable) -> Callable: # type: ignore
def wrapper(cls: Any, value: str) -> None:
if not value:
return
func(cls, value)
raise typer.Exit()
return wrapper | null |
1,165 | import os
import platform
import shlex
from tempfile import NamedTemporaryFile
from typing import Any, Callable
import typer
from click import BadParameter, UsageError
from sgpt.__version__ import __version__
from sgpt.integration import bash_integration, zsh_integration
bash_integration = """
# Shell-GPT integration BASH v0.2
_sgpt_bash() {
if [[ -n "$READLINE_LINE" ]]; then
READLINE_LINE=$(sgpt --shell <<< "$READLINE_LINE" --no-interaction)
READLINE_POINT=${#READLINE_LINE}
fi
}
bind -x '"\\C-l": _sgpt_bash'
# Shell-GPT integration BASH v0.2
"""
zsh_integration = """
# Shell-GPT integration ZSH v0.2
_sgpt_zsh() {
if [[ -n "$BUFFER" ]]; then
_sgpt_prev_cmd=$BUFFER
BUFFER+="⌛"
zle -I && zle redisplay
BUFFER=$(sgpt --shell <<< "$_sgpt_prev_cmd" --no-interaction)
zle end-of-line
fi
}
zle -N _sgpt_zsh
bindkey ^l _sgpt_zsh
# Shell-GPT integration ZSH v0.2
"""
The provided code snippet includes necessary dependencies for implementing the `install_shell_integration` function. Write a Python function `def install_shell_integration(*_args: Any) -> None` to solve the following problem:
Installs shell integration. Currently only supports ZSH and Bash. Allows user to get shell completions in terminal by using hotkey. Replaces current "buffer" of the shell with the completion.
Here is the function:
def install_shell_integration(*_args: Any) -> None:
"""
Installs shell integration. Currently only supports ZSH and Bash.
Allows user to get shell completions in terminal by using hotkey.
Replaces current "buffer" of the shell with the completion.
"""
# TODO: Add support for Windows.
# TODO: Implement updates.
shell = os.getenv("SHELL", "")
if shell == "/bin/zsh":
typer.echo("Installing ZSH integration...")
with open(os.path.expanduser("~/.zshrc"), "a", encoding="utf-8") as file:
file.write(zsh_integration)
elif shell == "/bin/bash":
typer.echo("Installing Bash integration...")
with open(os.path.expanduser("~/.bashrc"), "a", encoding="utf-8") as file:
file.write(bash_integration)
else:
raise UsageError("ShellGPT integrations only available for ZSH and Bash.")
typer.echo("Done! Restart your shell to apply changes.") | Installs shell integration. Currently only supports ZSH and Bash. Allows user to get shell completions in terminal by using hotkey. Replaces current "buffer" of the shell with the completion. |
1,166 | import os
import platform
import shlex
from tempfile import NamedTemporaryFile
from typing import Any, Callable
import typer
from click import BadParameter, UsageError
from sgpt.__version__ import __version__
from sgpt.integration import bash_integration, zsh_integration
__version__ = "1.4.0"
The provided code snippet includes necessary dependencies for implementing the `get_sgpt_version` function. Write a Python function `def get_sgpt_version(*_args: Any) -> None` to solve the following problem:
Displays the current installed version of ShellGPT
Here is the function:
def get_sgpt_version(*_args: Any) -> None:
"""
Displays the current installed version of ShellGPT
"""
typer.echo(f"ShellGPT {__version__}") | Displays the current installed version of ShellGPT |
1,167 | import os
import platform
import shutil
from pathlib import Path
from typing import Any
from ..config import cfg
from ..utils import option_callback
FUNCTIONS_FOLDER = Path(cfg.get("OPENAI_FUNCTIONS_PATH"))
def install_functions(*_args: Any) -> None:
current_folder = os.path.dirname(os.path.abspath(__file__))
common_folder = Path(current_folder + "/common")
common_files = [Path(path) for path in common_folder.glob("*.py")]
print("Installing default functions...")
for file in common_files:
print(f"Installed {FUNCTIONS_FOLDER}/{file.name}")
shutil.copy(file, FUNCTIONS_FOLDER, follow_symlinks=True)
current_platform = platform.system()
if current_platform == "Linux":
print("Installing Linux functions...")
if current_platform == "Windows":
print("Installing Windows functions...")
if current_platform == "Darwin":
print("Installing Mac functions...")
mac_folder = Path(current_folder + "/mac")
mac_files = [Path(path) for path in mac_folder.glob("*.py")]
for file in mac_files:
print(f"Installed {FUNCTIONS_FOLDER}/{file.name}")
shutil.copy(file, FUNCTIONS_FOLDER, follow_symlinks=True) | null |
1,168 | import os
import readline
import sys
import typer
from click import BadArgumentUsage
from click.types import Choice
from sgpt.config import cfg
from sgpt.function import get_openai_schemas
from sgpt.handlers.chat_handler import ChatHandler
from sgpt.handlers.default_handler import DefaultHandler
from sgpt.handlers.repl_handler import ReplHandler
from sgpt.llm_functions.init_functions import install_functions as inst_funcs
from sgpt.role import DefaultRoles, SystemRole
from sgpt.utils import (
get_edited_prompt,
get_sgpt_version,
install_shell_integration,
run_command,
)
def main(
prompt: str = typer.Argument(
"",
show_default=False,
help="The prompt to generate completions for.",
),
model: str = typer.Option(
cfg.get("DEFAULT_MODEL"),
help="Large language model to use.",
),
temperature: float = typer.Option(
0.0,
min=0.0,
max=2.0,
help="Randomness of generated output.",
),
top_p: float = typer.Option(
1.0,
min=0.0,
max=1.0,
help="Limits highest probable tokens (words).",
),
md: bool = typer.Option(
cfg.get("PRETTIFY_MARKDOWN") == "true",
help="Prettify markdown output.",
),
shell: bool = typer.Option(
False,
"--shell",
"-s",
help="Generate and execute shell commands.",
rich_help_panel="Assistance Options",
),
interaction: bool = typer.Option(
True,
help="Interactive mode for --shell option.",
rich_help_panel="Assistance Options",
),
describe_shell: bool = typer.Option(
False,
"--describe-shell",
"-d",
help="Describe a shell command.",
rich_help_panel="Assistance Options",
),
code: bool = typer.Option(
False,
"--code",
"-c",
help="Generate only code.",
rich_help_panel="Assistance Options",
),
functions: bool = typer.Option(
cfg.get("OPENAI_USE_FUNCTIONS") == "true",
help="Allow function calls.",
rich_help_panel="Assistance Options",
),
editor: bool = typer.Option(
False,
help="Open $EDITOR to provide a prompt.",
),
cache: bool = typer.Option(
True,
help="Cache completion results.",
),
version: bool = typer.Option(
False,
"--version",
help="Show version.",
callback=get_sgpt_version,
),
chat: str = typer.Option(
None,
help="Follow conversation with id, " 'use "temp" for quick session.',
rich_help_panel="Chat Options",
),
repl: str = typer.Option(
None,
help="Start a REPL (Read–eval–print loop) session.",
rich_help_panel="Chat Options",
),
show_chat: str = typer.Option(
None,
help="Show all messages from provided chat id.",
callback=ChatHandler.show_messages_callback,
rich_help_panel="Chat Options",
),
list_chats: bool = typer.Option(
False,
"--list-chats",
"-lc",
help="List all existing chat ids.",
callback=ChatHandler.list_ids,
rich_help_panel="Chat Options",
),
role: str = typer.Option(
None,
help="System role for GPT model.",
rich_help_panel="Role Options",
),
create_role: str = typer.Option(
None,
help="Create role.",
callback=SystemRole.create,
rich_help_panel="Role Options",
),
show_role: str = typer.Option(
None,
help="Show role.",
callback=SystemRole.show,
rich_help_panel="Role Options",
),
list_roles: bool = typer.Option(
False,
"--list-roles",
"-lr",
help="List roles.",
callback=SystemRole.list,
rich_help_panel="Role Options",
),
install_integration: bool = typer.Option(
False,
help="Install shell integration (ZSH and Bash only)",
callback=install_shell_integration,
hidden=True, # Hiding since should be used only once.
),
install_functions: bool = typer.Option(
False,
help="Install default functions.",
callback=inst_funcs,
hidden=True, # Hiding since should be used only once.
),
) -> None:
stdin_passed = not sys.stdin.isatty()
if stdin_passed:
stdin = ""
# TODO: This is very hacky.
# In some cases, we need to pass stdin along with inputs.
# When we want part of stdin to be used as a init prompt,
# but rest of the stdin to be used as a inputs. For example:
# echo "hello\n__sgpt__eof__\nThis is input" | sgpt --repl temp
# In this case, "hello" will be used as a init prompt, and
# "This is input" will be used as "interactive" input to the REPL.
# This is useful to test REPL with some initial context.
for line in sys.stdin:
if "__sgpt__eof__" in line:
break
stdin += line
prompt = f"{stdin}\n\n{prompt}" if prompt else stdin
try:
# Switch to stdin for interactive input.
if os.name == "posix":
sys.stdin = open("/dev/tty", "r")
elif os.name == "nt":
sys.stdin = open("CON", "r")
except OSError:
# Non-interactive shell.
pass
if sum((shell, describe_shell, code)) > 1:
raise BadArgumentUsage(
"Only one of --shell, --describe-shell, and --code options can be used at a time."
)
if chat and repl:
raise BadArgumentUsage("--chat and --repl options cannot be used together.")
if editor and stdin_passed:
raise BadArgumentUsage("--editor option cannot be used with stdin input.")
if editor:
prompt = get_edited_prompt()
role_class = (
DefaultRoles.check_get(shell, describe_shell, code)
if not role
else SystemRole.get(role)
)
function_schemas = (get_openai_schemas() or None) if functions else None
if repl:
# Will be in infinite loop here until user exits with Ctrl+C.
ReplHandler(repl, role_class, md).handle(
init_prompt=prompt,
model=model,
temperature=temperature,
top_p=top_p,
caching=cache,
functions=function_schemas,
)
if chat:
full_completion = ChatHandler(chat, role_class, md).handle(
prompt=prompt,
model=model,
temperature=temperature,
top_p=top_p,
caching=cache,
functions=function_schemas,
)
else:
full_completion = DefaultHandler(role_class, md).handle(
prompt=prompt,
model=model,
temperature=temperature,
top_p=top_p,
caching=cache,
functions=function_schemas,
)
while shell and interaction:
option = typer.prompt(
text="[E]xecute, [D]escribe, [A]bort",
type=Choice(("e", "d", "a", "y"), case_sensitive=False),
default="e" if cfg.get("DEFAULT_EXECUTE_SHELL_CMD") == "true" else "a",
show_choices=False,
show_default=False,
)
if option in ("e", "y"):
# "y" option is for keeping compatibility with old version.
run_command(full_completion)
elif option == "d":
DefaultHandler(DefaultRoles.DESCRIBE_SHELL.get_role(), md).handle(
full_completion,
model=model,
temperature=temperature,
top_p=top_p,
caching=cache,
functions=function_schemas,
)
continue
break
def entry_point() -> None:
typer.run(main) | null |
1,169 | import importlib.util
import sys
from abc import ABCMeta
from pathlib import Path
from typing import Any, Callable, Dict, List
from .config import cfg
functions = [Function(str(path)) for path in functions_folder.glob("*.py")]
def get_function(name: str) -> Callable[..., Any]:
for function in functions:
if function.name == name:
return function.execute
raise ValueError(f"Function {name} not found") | null |
1,170 | import importlib.util
import sys
from abc import ABCMeta
from pathlib import Path
from typing import Any, Callable, Dict, List
from .config import cfg
functions = [Function(str(path)) for path in functions_folder.glob("*.py")]
def get_openai_schemas() -> List[Dict[str, Any]]:
return [function.openai_schema for function in functions] | null |
1,171 | from io import open
from setuptools import setup
with open('requirements.txt', encoding="utf-8-sig") as f:
requirements = f.readlines()
def readme():
with open('README.md', encoding="utf-8-sig") as f:
README = f.read()
return README | null |
1,172 | import os
import sys
import time
import random
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
from torch.cuda.amp import autocast, GradScaler
import numpy as np
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def count_parameters(model):
print("Modules, Parameters")
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
#table.add_row([name, param])
total_params+=param
print(name, param)
print(f"Total Trainable Params: {total_params}")
return total_params
class CTCLabelConverter(object):
""" Convert between text-label and text-index """
#def __init__(self, character, separator = []):
def __init__(self, character, separator_list = {}, dict_pathlist = {}):
# character (str): set of the possible characters.
dict_character = list(character)
#special_character = ['\xa2', '\xa3', '\xa4','\xa5']
#self.separator_char = special_character[:len(separator)]
self.dict = {}
#for i, char in enumerate(self.separator_char + dict_character):
for i, char in enumerate(dict_character):
# NOTE: 0 is reserved for 'blank' token required by CTCLoss
self.dict[char] = i + 1
self.character = ['[blank]'] + dict_character # dummy '[blank]' token for CTCLoss (index 0)
#self.character = ['[blank]']+ self.separator_char + dict_character # dummy '[blank]' token for CTCLoss (index 0)
self.separator_list = separator_list
separator_char = []
for lang, sep in separator_list.items():
separator_char += sep
self.ignore_idx = [0] + [i+1 for i,item in enumerate(separator_char)]
dict_list = {}
for lang, dict_path in dict_pathlist.items():
with open(dict_path, "rb") as input_file:
word_count = pickle.load(input_file)
dict_list[lang] = word_count
self.dict_list = dict_list
def encode(self, text, batch_max_length=25):
"""convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
output:
text: concatenated text index for CTCLoss.
[sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
length: length of each text. [batch_size]
"""
length = [len(s) for s in text]
text = ''.join(text)
text = [self.dict[char] for char in text]
return (torch.IntTensor(text), torch.IntTensor(length))
def decode_greedy(self, text_index, length):
""" convert text-index into text-label. """
texts = []
index = 0
for l in length:
t = text_index[index:index + l]
char_list = []
for i in range(l):
if t[i] not in self.ignore_idx and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank (and separator).
#if (t[i] != 0) and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank (and separator).
char_list.append(self.character[t[i]])
text = ''.join(char_list)
texts.append(text)
index += l
return texts
def decode_beamsearch(self, mat, beamWidth=5):
texts = []
for i in range(mat.shape[0]):
t = ctcBeamSearch(mat[i], self.character, self.ignore_idx, None, beamWidth=beamWidth)
texts.append(t)
return texts
def decode_wordbeamsearch(self, mat, beamWidth=5):
texts = []
argmax = np.argmax(mat, axis = 2)
for i in range(mat.shape[0]):
words = word_segmentation(argmax[i])
string = ''
for word in words:
matrix = mat[i, word[1][0]:word[1][1]+1,:]
if word[0] == '': dict_list = []
else: dict_list = self.dict_list[word[0]]
t = ctcBeamSearch(matrix, self.character, self.ignore_idx, None, beamWidth=beamWidth, dict_list=dict_list)
string += t
texts.append(string)
return texts
class AttnLabelConverter(object):
""" Convert between text-label and text-index """
def __init__(self, character):
# character (str): set of the possible characters.
# [GO] for the start token of the attention decoder. [s] for end-of-sentence token.
list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]']
list_character = list(character)
self.character = list_token + list_character
self.dict = {}
for i, char in enumerate(self.character):
# print(i, char)
self.dict[char] = i
def encode(self, text, batch_max_length=25):
""" convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
batch_max_length: max length of text label in the batch. 25 by default
output:
text : the input of attention decoder. [batch_size x (max_length+2)] +1 for [GO] token and +1 for [s] token.
text[:, 0] is [GO] token and text is padded with [GO] token after [s] token.
length : the length of output of attention decoder, which count [s] token also. [3, 7, ....] [batch_size]
"""
length = [len(s) + 1 for s in text] # +1 for [s] at end of sentence.
# batch_max_length = max(length) # this is not allowed for multi-gpu setting
batch_max_length += 1
# additional +1 for [GO] at first step. batch_text is padded with [GO] token after [s] token.
batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0)
for i, t in enumerate(text):
text = list(t)
text.append('[s]')
text = [self.dict[char] for char in text]
batch_text[i][1:1 + len(text)] = torch.LongTensor(text) # batch_text[:, 0] = [GO] token
return (batch_text.to(device), torch.IntTensor(length).to(device))
def decode(self, text_index, length):
""" convert text-index into text-label. """
texts = []
for index, l in enumerate(length):
text = ''.join([self.character[i] for i in text_index[index, :]])
texts.append(text)
return texts
class Averager(object):
"""Compute average for torch.Tensor, used for loss average."""
def __init__(self):
self.reset()
def add(self, v):
count = v.data.numel()
v = v.data.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
class Batch_Balanced_Dataset(object):
def __init__(self, opt):
"""
Modulate the data ratio in the batch.
For example, when select_data is "MJ-ST" and batch_ratio is "0.5-0.5",
the 50% of the batch is filled with MJ and the other 50% of the batch is filled with ST.
"""
log = open(f'./saved_models/{opt.experiment_name}/log_dataset.txt', 'a')
dashed_line = '-' * 80
print(dashed_line)
log.write(dashed_line + '\n')
print(f'dataset_root: {opt.train_data}\nopt.select_data: {opt.select_data}\nopt.batch_ratio: {opt.batch_ratio}')
log.write(f'dataset_root: {opt.train_data}\nopt.select_data: {opt.select_data}\nopt.batch_ratio: {opt.batch_ratio}\n')
assert len(opt.select_data) == len(opt.batch_ratio)
_AlignCollate = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, contrast_adjust = opt.contrast_adjust)
self.data_loader_list = []
self.dataloader_iter_list = []
batch_size_list = []
Total_batch_size = 0
for selected_d, batch_ratio_d in zip(opt.select_data, opt.batch_ratio):
_batch_size = max(round(opt.batch_size * float(batch_ratio_d)), 1)
print(dashed_line)
log.write(dashed_line + '\n')
_dataset, _dataset_log = hierarchical_dataset(root=opt.train_data, opt=opt, select_data=[selected_d])
total_number_dataset = len(_dataset)
log.write(_dataset_log)
"""
The total number of data can be modified with opt.total_data_usage_ratio.
ex) opt.total_data_usage_ratio = 1 indicates 100% usage, and 0.2 indicates 20% usage.
See 4.2 section in our paper.
"""
number_dataset = int(total_number_dataset * float(opt.total_data_usage_ratio))
dataset_split = [number_dataset, total_number_dataset - number_dataset]
indices = range(total_number_dataset)
_dataset, _ = [Subset(_dataset, indices[offset - length:offset])
for offset, length in zip(_accumulate(dataset_split), dataset_split)]
selected_d_log = f'num total samples of {selected_d}: {total_number_dataset} x {opt.total_data_usage_ratio} (total_data_usage_ratio) = {len(_dataset)}\n'
selected_d_log += f'num samples of {selected_d} per batch: {opt.batch_size} x {float(batch_ratio_d)} (batch_ratio) = {_batch_size}'
print(selected_d_log)
log.write(selected_d_log + '\n')
batch_size_list.append(str(_batch_size))
Total_batch_size += _batch_size
_data_loader = torch.utils.data.DataLoader(
_dataset, batch_size=_batch_size,
shuffle=True,
num_workers=int(opt.workers), #prefetch_factor=2,persistent_workers=True,
collate_fn=_AlignCollate, pin_memory=True)
self.data_loader_list.append(_data_loader)
self.dataloader_iter_list.append(iter(_data_loader))
Total_batch_size_log = f'{dashed_line}\n'
batch_size_sum = '+'.join(batch_size_list)
Total_batch_size_log += f'Total_batch_size: {batch_size_sum} = {Total_batch_size}\n'
Total_batch_size_log += f'{dashed_line}'
opt.batch_size = Total_batch_size
print(Total_batch_size_log)
log.write(Total_batch_size_log + '\n')
log.close()
def get_batch(self):
balanced_batch_images = []
balanced_batch_texts = []
for i, data_loader_iter in enumerate(self.dataloader_iter_list):
try:
image, text = data_loader_iter.next()
balanced_batch_images.append(image)
balanced_batch_texts += text
except StopIteration:
self.dataloader_iter_list[i] = iter(self.data_loader_list[i])
image, text = self.dataloader_iter_list[i].next()
balanced_batch_images.append(image)
balanced_batch_texts += text
except ValueError:
pass
balanced_batch_images = torch.cat(balanced_batch_images, 0)
return balanced_batch_images, balanced_batch_texts
def hierarchical_dataset(root, opt, select_data='/'):
""" select_data='/' contains all sub-directory of root directory """
dataset_list = []
dataset_log = f'dataset_root: {root}\t dataset: {select_data[0]}'
print(dataset_log)
dataset_log += '\n'
for dirpath, dirnames, filenames in os.walk(root+'/'):
if not dirnames:
select_flag = False
for selected_d in select_data:
if selected_d in dirpath:
select_flag = True
break
if select_flag:
dataset = OCRDataset(dirpath, opt)
sub_dataset_log = f'sub-directory:\t/{os.path.relpath(dirpath, root)}\t num samples: {len(dataset)}'
print(sub_dataset_log)
dataset_log += f'{sub_dataset_log}\n'
dataset_list.append(dataset)
concatenated_dataset = ConcatDataset(dataset_list)
return concatenated_dataset, dataset_log
class AlignCollate(object):
def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False, contrast_adjust = 0.):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = keep_ratio_with_pad
self.contrast_adjust = contrast_adjust
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images, labels = zip(*batch)
if self.keep_ratio_with_pad: # same concept with 'Rosetta' paper
resized_max_w = self.imgW
input_channel = 3 if images[0].mode == 'RGB' else 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
#### augmentation here - change contrast
if self.contrast_adjust > 0:
image = np.array(image.convert("L"))
image = adjust_contrast_grey(image, target = self.contrast_adjust)
image = Image.fromarray(image, 'L')
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
# resized_image.save('./image_test/%d_test.jpg' % w)
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
else:
transform = ResizeNormalize((self.imgW, self.imgH))
image_tensors = [transform(image) for image in images]
image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)
return image_tensors, labels
class Model(nn.Module):
def __init__(self, opt):
super(Model, self).__init__()
self.opt = opt
self.stages = {'Trans': opt.Transformation, 'Feat': opt.FeatureExtraction,
'Seq': opt.SequenceModeling, 'Pred': opt.Prediction}
""" Transformation """
if opt.Transformation == 'TPS':
self.Transformation = TPS_SpatialTransformerNetwork(
F=opt.num_fiducial, I_size=(opt.imgH, opt.imgW), I_r_size=(opt.imgH, opt.imgW), I_channel_num=opt.input_channel)
else:
print('No Transformation module specified')
""" FeatureExtraction """
if opt.FeatureExtraction == 'VGG':
self.FeatureExtraction = VGG_FeatureExtractor(opt.input_channel, opt.output_channel)
elif opt.FeatureExtraction == 'RCNN':
self.FeatureExtraction = RCNN_FeatureExtractor(opt.input_channel, opt.output_channel)
elif opt.FeatureExtraction == 'ResNet':
self.FeatureExtraction = ResNet_FeatureExtractor(opt.input_channel, opt.output_channel)
else:
raise Exception('No FeatureExtraction module specified')
self.FeatureExtraction_output = opt.output_channel # int(imgH/16-1) * 512
self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) # Transform final (imgH/16-1) -> 1
""" Sequence modeling"""
if opt.SequenceModeling == 'BiLSTM':
self.SequenceModeling = nn.Sequential(
BidirectionalLSTM(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size),
BidirectionalLSTM(opt.hidden_size, opt.hidden_size, opt.hidden_size))
self.SequenceModeling_output = opt.hidden_size
else:
print('No SequenceModeling module specified')
self.SequenceModeling_output = self.FeatureExtraction_output
""" Prediction """
if opt.Prediction == 'CTC':
self.Prediction = nn.Linear(self.SequenceModeling_output, opt.num_class)
elif opt.Prediction == 'Attn':
self.Prediction = Attention(self.SequenceModeling_output, opt.hidden_size, opt.num_class)
else:
raise Exception('Prediction is neither CTC or Attn')
def forward(self, input, text, is_train=True):
""" Transformation stage """
if not self.stages['Trans'] == "None":
input = self.Transformation(input)
""" Feature extraction stage """
visual_feature = self.FeatureExtraction(input)
visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) # [b, c, h, w] -> [b, w, c, h]
visual_feature = visual_feature.squeeze(3)
""" Sequence modeling stage """
if self.stages['Seq'] == 'BiLSTM':
contextual_feature = self.SequenceModeling(visual_feature)
else:
contextual_feature = visual_feature # for convenience. this is NOT contextually modeled by BiLSTM
""" Prediction stage """
if self.stages['Pred'] == 'CTC':
prediction = self.Prediction(contextual_feature.contiguous())
else:
prediction = self.Prediction(contextual_feature.contiguous(), text, is_train, batch_max_length=self.opt.batch_max_length)
return prediction
def validation(model, criterion, evaluation_loader, converter, opt, device):
""" validation or evaluation """
n_correct = 0
norm_ED = 0
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
for i, (image_tensors, labels) in enumerate(evaluation_loader):
batch_size = image_tensors.size(0)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)
start_time = time.time()
if 'CTC' in opt.Prediction:
preds = model(image, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC decoder.
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
cost = criterion(preds.log_softmax(2).permute(1, 0, 2), text_for_loss, preds_size, length_for_loss)
if opt.decode == 'greedy':
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_index = preds_index.view(-1)
preds_str = converter.decode_greedy(preds_index.data, preds_size.data)
elif opt.decode == 'beamsearch':
preds_str = converter.decode_beamsearch(preds, beamWidth=2)
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
infer_time += forward_time
valid_loss_avg.add(cost)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
if pred == gt:
n_correct += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt) == 0 or len(pred) ==0:
norm_ED += 0
elif len(gt) > len(pred):
norm_ED += 1 - edit_distance(pred, gt) / len(gt)
else:
norm_ED += 1 - edit_distance(pred, gt) / len(pred)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
# print(pred, gt, pred==gt, confidence_score)
accuracy = n_correct / float(length_of_data) * 100
norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return valid_loss_avg.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(opt, show_number = 2, amp=False)` to solve the following problem:
dataset preparation
Here is the function:
def train(opt, show_number = 2, amp=False):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.experiment_name}/log_dataset.txt', 'a', encoding="utf8")
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, contrast_adjust=opt.contrast_adjust)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=min(32, opt.batch_size),
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers), prefetch_factor=512,
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
if opt.saved_model != '':
pretrained_dict = torch.load(opt.saved_model)
if opt.new_prediction:
model.Prediction = nn.Linear(model.SequenceModeling_output, len(pretrained_dict['module.Prediction.weight']))
model = torch.nn.DataParallel(model).to(device)
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(pretrained_dict, strict=False)
else:
model.load_state_dict(pretrained_dict)
if opt.new_prediction:
model.module.Prediction = nn.Linear(model.module.SequenceModeling_output, opt.num_class)
for name, param in model.module.Prediction.named_parameters():
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
model = model.to(device)
else:
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
model = torch.nn.DataParallel(model).to(device)
model.train()
print("Model:")
print(model)
count_parameters(model)
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# freeze some layers
try:
if opt.freeze_FeatureFxtraction:
for param in model.module.FeatureExtraction.parameters():
param.requires_grad = False
if opt.freeze_SequenceModeling:
for param in model.module.SequenceModeling.parameters():
param.requires_grad = False
except:
pass
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.optim=='adam':
#optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
optimizer = optim.Adam(filtered_parameters)
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.experiment_name}/opt.txt', 'a', encoding="utf8") as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
i = start_iter
scaler = GradScaler()
t1= time.time()
while(True):
# train part
optimizer.zero_grad(set_to_none=True)
if amp:
with autocast():
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text).log_softmax(2)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.permute(1, 0, 2)
torch.backends.cudnn.enabled = False
cost = criterion(preds, text.to(device), preds_size.to(device), length.to(device))
torch.backends.cudnn.enabled = True
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
scaler.scale(cost).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip)
scaler.step(optimizer)
scaler.update()
else:
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text).log_softmax(2)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.permute(1, 0, 2)
torch.backends.cudnn.enabled = False
cost = criterion(preds, text.to(device), preds_size.to(device), length.to(device))
torch.backends.cudnn.enabled = True
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
loss_avg.add(cost)
# validation part
if (i % opt.valInterval == 0) and (i!=0):
print('training time: ', time.time()-t1)
t1=time.time()
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.experiment_name}/log_train.txt', 'a', encoding="utf8") as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels,\
infer_time, length_of_data = validation(model, criterion, valid_loader, converter, opt, device)
model.train()
# training loss and validation loss
loss_log = f'[{i}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.4f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.experiment_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.experiment_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.4f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
#show_number = min(show_number, len(labels))
start = random.randint(0,len(labels) - show_number )
for gt, pred, confidence in zip(labels[start:start+show_number], preds[start:start+show_number], confidence_score[start:start+show_number]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
print('validation time: ', time.time()-t1)
t1=time.time()
# save model per 1e+4 iter.
if (i + 1) % 1e+4 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.experiment_name}/iter_{i+1}.pth')
if i == opt.num_iter:
print('end the training')
sys.exit()
i += 1 | dataset preparation |
1,173 | import torch
import pickle
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `applyLM` function. Write a Python function `def applyLM(parentBeam, childBeam, classes, lm)` to solve the following problem:
calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars
Here is the function:
def applyLM(parentBeam, childBeam, classes, lm):
"calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars"
if lm and not childBeam.lmApplied:
c1 = classes[parentBeam.labeling[-1] if parentBeam.labeling else classes.index(' ')] # first char
c2 = classes[childBeam.labeling[-1]] # second char
lmFactor = 0.01 # influence of language model
bigramProb = lm.getCharBigram(c1, c2) ** lmFactor # probability of seeing first and second char next to each other
childBeam.prText = parentBeam.prText * bigramProb # probability of char sequence
childBeam.lmApplied = True # only apply LM once per beam entry | calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars |
1,174 | import torch
import pickle
import numpy as np
class BeamEntry:
"information about one single beam at specific time-step"
def __init__(self):
self.prTotal = 0 # blank and non-blank
self.prNonBlank = 0 # non-blank
self.prBlank = 0 # blank
self.prText = 1 # LM score
self.lmApplied = False # flag if LM was already applied to this beam
self.labeling = () # beam-labeling
class BeamState:
"information about the beams at specific time-step"
def __init__(self):
self.entries = {}
def norm(self):
"length-normalise LM score"
for (k, _) in self.entries.items():
labelingLen = len(self.entries[k].labeling)
self.entries[k].prText = self.entries[k].prText ** (1.0 / (labelingLen if labelingLen else 1.0))
def sort(self):
"return beam-labelings, sorted by probability"
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
return [x.labeling for x in sortedBeams]
def wordsearch(self, classes, ignore_idx, beamWidth, dict_list):
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)[:beamWidth]
for j, candidate in enumerate(sortedBeams):
idx_list = candidate.labeling
text = ''
for i,l in enumerate(idx_list):
if l not in ignore_idx and (not (i > 0 and idx_list[i - 1] == idx_list[i])): # removing repeated characters and blank.
text += classes[l]
if j == 0: best_text = text
if text in dict_list:
print('found text: ', text)
best_text = text
break
else:
print('not in dict: ', text)
return best_text
def addBeam(beamState, labeling):
"add beam if it does not yet exist"
if labeling not in beamState.entries:
beamState.entries[labeling] = BeamEntry()
The provided code snippet includes necessary dependencies for implementing the `ctcBeamSearch` function. Write a Python function `def ctcBeamSearch(mat, classes, ignore_idx, lm, beamWidth=25, dict_list = [])` to solve the following problem:
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
Here is the function:
def ctcBeamSearch(mat, classes, ignore_idx, lm, beamWidth=25, dict_list = []):
"beam search as described by the paper of Hwang et al. and the paper of Graves et al."
#blankIdx = len(classes)
blankIdx = 0
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText # beam-labeling not changed, therefore also LM score unchanged from
curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
for c in range(maxC - 1):
# add new char to current beam-labeling
newLabeling = labeling + (c,)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
#applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
# sort by probability
#bestLabeling = last.sort()[0] # get most probable labeling
# map labels to chars
#res = ''
#for idx,l in enumerate(bestLabeling):
# if l not in ignore_idx and (not (idx > 0 and bestLabeling[idx - 1] == bestLabeling[idx])): # removing repeated characters and blank.
# res += classes[l]
if dict_list == []:
bestLabeling = last.sort()[0] # get most probable labeling
res = ''
for i,l in enumerate(bestLabeling):
if l not in ignore_idx and (not (i > 0 and bestLabeling[i - 1] == bestLabeling[i])): # removing repeated characters and blank.
res += classes[l]
else:
res = last.wordsearch(classes, ignore_idx, beamWidth, dict_list)
return res | beam search as described by the paper of Hwang et al. and the paper of Graves et al. |
1,175 | import torch
import pickle
import numpy as np
def consecutive(data, mode ='first', stepsize=1):
group = np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
group = [item for item in group if len(item)>0]
if mode == 'first': result = [l[0] for l in group]
elif mode == 'last': result = [l[-1] for l in group]
return result
def word_segmentation(mat, separator_idx = {'th': [1,2],'en': [3,4]}, separator_idx_list = [1,2,3,4]):
result = []
sep_list = []
start_idx = 0
for sep_idx in separator_idx_list:
if sep_idx % 2 == 0: mode ='first'
else: mode ='last'
a = consecutive( np.argwhere(mat == sep_idx).flatten(), mode)
new_sep = [ [item, sep_idx] for item in a]
sep_list += new_sep
sep_list = sorted(sep_list, key=lambda x: x[0])
for sep in sep_list:
for lang in separator_idx.keys():
if sep[1] == separator_idx[lang][0]: # start lang
sep_lang = lang
sep_start_idx = sep[0]
elif sep[1] == separator_idx[lang][1]: # end lang
if sep_lang == lang: # check if last entry if the same start lang
new_sep_pair = [lang, [sep_start_idx+1, sep[0]-1]]
if sep_start_idx > start_idx:
result.append( ['', [start_idx, sep_start_idx-1] ] )
start_idx = sep[0]+1
result.append(new_sep_pair)
else: # reset
sep_lang = ''
if start_idx <= len(mat)-1:
result.append( ['', [start_idx, len(mat)-1] ] )
return result | null |
1,176 | import os
import sys
import re
import six
import math
import torch
import pandas as pd
from natsort import natsorted
from PIL import Image
import numpy as np
from torch.utils.data import Dataset, ConcatDataset, Subset
from torch._utils import _accumulate
import torchvision.transforms as transforms
def contrast_grey(img):
high = np.percentile(img, 90)
low = np.percentile(img, 10)
return (high-low)/(high+low), high, low
def adjust_contrast_grey(img, target = 0.4):
contrast, high, low = contrast_grey(img)
if contrast < target:
img = img.astype(int)
ratio = 200./(high-low)
img = (img - low + 25)*ratio
img = np.maximum(np.full(img.shape, 0) ,np.minimum(np.full(img.shape, 255), img)).astype(np.uint8)
return img | null |
1,177 | import os
import sys
import re
import six
import math
import torch
import pandas as pd
from natsort import natsorted
from PIL import Image
import numpy as np
from torch.utils.data import Dataset, ConcatDataset, Subset
from torch._utils import _accumulate
import torchvision.transforms as transforms
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor.cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype) | null |
1,178 | import os
import sys
import re
import six
import math
import torch
import pandas as pd
from natsort import natsorted
from PIL import Image
import numpy as np
from torch.utils.data import Dataset, ConcatDataset, Subset
from torch._utils import _accumulate
import torchvision.transforms as transforms
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path) | null |
1,179 | import argparse
import os
import shutil
import time
import yaml
import multiprocessing as mp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from config.load_config import load_yaml, DotDict
from data.dataset import SynthTextDataSet
from loss.mseloss import Maploss_v2, Maploss_v3
from model.craft import CRAFT
from metrics.eval_det_iou import DetectionIoUEvaluator
from eval import main_eval
from utils.util import copyStateDict, save_parser
class Trainer(object):
def __init__(self, config, gpu):
self.config = config
self.gpu = gpu
self.mode = None
self.trn_loader, self.trn_sampler = self.get_trn_loader()
self.net_param = self.get_load_param(gpu)
def get_trn_loader(self):
dataset = SynthTextDataSet(
output_size=self.config.train.data.output_size,
data_dir=self.config.data_dir.synthtext,
saved_gt_dir=None,
mean=self.config.train.data.mean,
variance=self.config.train.data.variance,
gauss_init_size=self.config.train.data.gauss_init_size,
gauss_sigma=self.config.train.data.gauss_sigma,
enlarge_region=self.config.train.data.enlarge_region,
enlarge_affinity=self.config.train.data.enlarge_affinity,
aug=self.config.train.data.syn_aug,
vis_test_dir=self.config.vis_test_dir,
vis_opt=self.config.train.data.vis_opt,
sample=self.config.train.data.syn_sample,
)
trn_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
trn_loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.config.train.batch_size,
shuffle=False,
num_workers=self.config.train.num_workers,
sampler=trn_sampler,
drop_last=True,
pin_memory=True,
)
return trn_loader, trn_sampler
def get_load_param(self, gpu):
if self.config.train.ckpt_path is not None:
map_location = {"cuda:%d" % 0: "cuda:%d" % gpu}
param = torch.load(self.config.train.ckpt_path, map_location=map_location)
else:
param = None
return param
def adjust_learning_rate(self, optimizer, gamma, step, lr):
lr = lr * (gamma ** step)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return param_group["lr"]
def get_loss(self):
if self.config.train.loss == 2:
criterion = Maploss_v2()
elif self.config.train.loss == 3:
criterion = Maploss_v3()
else:
raise Exception("Undefined loss")
return criterion
def iou_eval(self, dataset, train_step, save_param_path, buffer, model):
test_config = DotDict(self.config.test[dataset])
val_result_dir = os.path.join(
self.config.results_dir, "{}/{}".format(dataset + "_iou", str(train_step))
)
evaluator = DetectionIoUEvaluator()
metrics = main_eval(
save_param_path,
self.config.train.backbone,
test_config,
evaluator,
val_result_dir,
buffer,
model,
self.mode,
)
if self.gpu == 0 and self.config.wandb_opt:
wandb.log(
{
"{} IoU Recall".format(dataset): np.round(metrics["recall"], 3),
"{} IoU Precision".format(dataset): np.round(
metrics["precision"], 3
),
"{} IoU F1-score".format(dataset): np.round(metrics["hmean"], 3),
}
)
def train(self, buffer_dict):
torch.cuda.set_device(self.gpu)
# DATASET -----------------------------------------------------------------------------------------------------#
trn_loader = self.trn_loader
# MODEL -------------------------------------------------------------------------------------------------------#
if self.config.train.backbone == "vgg":
craft = CRAFT(pretrained=True, amp=self.config.train.amp)
else:
raise Exception("Undefined architecture")
if self.config.train.ckpt_path is not None:
craft.load_state_dict(copyStateDict(self.net_param["craft"]))
craft = nn.SyncBatchNorm.convert_sync_batchnorm(craft)
craft = craft.cuda()
craft = torch.nn.parallel.DistributedDataParallel(craft, device_ids=[self.gpu])
torch.backends.cudnn.benchmark = True
# OPTIMIZER----------------------------------------------------------------------------------------------------#
optimizer = optim.Adam(
craft.parameters(),
lr=self.config.train.lr,
weight_decay=self.config.train.weight_decay,
)
if self.config.train.ckpt_path is not None and self.config.train.st_iter != 0:
optimizer.load_state_dict(copyStateDict(self.net_param["optimizer"]))
self.config.train.st_iter = self.net_param["optimizer"]["state"][0]["step"]
self.config.train.lr = self.net_param["optimizer"]["param_groups"][0]["lr"]
# LOSS --------------------------------------------------------------------------------------------------------#
# mixed precision
if self.config.train.amp:
scaler = torch.cuda.amp.GradScaler()
# load model
if (
self.config.train.ckpt_path is not None
and self.config.train.st_iter != 0
):
scaler.load_state_dict(copyStateDict(self.net_param["scaler"]))
else:
scaler = None
criterion = self.get_loss()
# TRAIN -------------------------------------------------------------------------------------------------------#
train_step = self.config.train.st_iter
whole_training_step = self.config.train.end_iter
update_lr_rate_step = 0
training_lr = self.config.train.lr
loss_value = 0
batch_time = 0
epoch = 0
start_time = time.time()
while train_step < whole_training_step:
self.trn_sampler.set_epoch(train_step)
for (
index,
(image, region_image, affinity_image, confidence_mask,),
) in enumerate(trn_loader):
craft.train()
if train_step > 0 and train_step % self.config.train.lr_decay == 0:
update_lr_rate_step += 1
training_lr = self.adjust_learning_rate(
optimizer,
self.config.train.gamma,
update_lr_rate_step,
self.config.train.lr,
)
images = image.cuda(non_blocking=True)
region_image_label = region_image.cuda(non_blocking=True)
affinity_image_label = affinity_image.cuda(non_blocking=True)
confidence_mask_label = confidence_mask.cuda(non_blocking=True)
if self.config.train.amp:
with torch.cuda.amp.autocast():
output, _ = craft(images)
out1 = output[:, :, :, 0]
out2 = output[:, :, :, 1]
loss = criterion(
region_image_label,
affinity_image_label,
out1,
out2,
confidence_mask_label,
self.config.train.neg_rto,
self.config.train.n_min_neg,
)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
output, _ = craft(images)
out1 = output[:, :, :, 0]
out2 = output[:, :, :, 1]
loss = criterion(
region_image_label,
affinity_image_label,
out1,
out2,
confidence_mask_label,
self.config.train.neg_rto,
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
end_time = time.time()
loss_value += loss.item()
batch_time += end_time - start_time
if train_step > 0 and train_step % 5 == 0 and self.gpu == 0:
mean_loss = loss_value / 5
loss_value = 0
avg_batch_time = batch_time / 5
batch_time = 0
print(
"{}, training_step: {}|{}, learning rate: {:.8f}, "
"training_loss: {:.5f}, avg_batch_time: {:.5f}".format(
time.strftime(
"%Y-%m-%d:%H:%M:%S", time.localtime(time.time())
),
train_step,
whole_training_step,
training_lr,
mean_loss,
avg_batch_time,
)
)
if self.gpu == 0 and self.config.wandb_opt:
wandb.log({"train_step": train_step, "mean_loss": mean_loss})
if (
train_step % self.config.train.eval_interval == 0
and train_step != 0
):
# initialize all buffer value with zero
if self.gpu == 0:
for buffer in buffer_dict.values():
for i in range(len(buffer)):
buffer[i] = None
print("Saving state, index:", train_step)
save_param_dic = {
"iter": train_step,
"craft": craft.state_dict(),
"optimizer": optimizer.state_dict(),
}
save_param_path = (
self.config.results_dir
+ "/CRAFT_clr_"
+ repr(train_step)
+ ".pth"
)
if self.config.train.amp:
save_param_dic["scaler"] = scaler.state_dict()
save_param_path = (
self.config.results_dir
+ "/CRAFT_clr_amp_"
+ repr(train_step)
+ ".pth"
)
if self.gpu == 0:
torch.save(save_param_dic, save_param_path)
# validation
self.iou_eval(
"icdar2013",
train_step,
save_param_path,
buffer_dict["icdar2013"],
craft,
)
train_step += 1
if train_step >= whole_training_step:
break
epoch += 1
# save last model
if self.gpu == 0:
save_param_dic = {
"iter": train_step,
"craft": craft.state_dict(),
"optimizer": optimizer.state_dict(),
}
save_param_path = (
self.config.results_dir + "/CRAFT_clr_" + repr(train_step) + ".pth"
)
if self.config.train.amp:
save_param_dic["scaler"] = scaler.state_dict()
save_param_path = (
self.config.results_dir
+ "/CRAFT_clr_amp_"
+ repr(train_step)
+ ".pth"
)
torch.save(save_param_dic, save_param_path)
class DotDict(dict):
def __getattr__(self, k):
try:
v = self[k]
except:
return super().__getattr__(k)
if isinstance(v, dict):
return DotDict(v)
return v
def __getitem__(self, k):
if isinstance(k, str) and '.' in k:
k = k.split('.')
if isinstance(k, (list, tuple)):
return reduce(lambda d, kk: d[kk], k, self)
return super().__getitem__(k)
def get(self, k, default=None):
if isinstance(k, str) and '.' in k:
try:
return self[k]
except KeyError:
return default
return super().get(k, default=default)
def main_worker(gpu, port, ngpus_per_node, config, buffer_dict, exp_name):
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://127.0.0.1:" + port,
world_size=ngpus_per_node,
rank=gpu,
)
# Apply config to wandb
if gpu == 0 and config["wandb_opt"]:
wandb.init(project="craft-stage1", entity="gmuffiness", name=exp_name)
wandb.config.update(config)
batch_size = int(config["train"]["batch_size"] / ngpus_per_node)
config["train"]["batch_size"] = batch_size
config = DotDict(config)
# Start train
trainer = Trainer(config, gpu)
trainer.train(buffer_dict)
if gpu == 0 and config["wandb_opt"]:
wandb.finish()
torch.distributed.destroy_process_group() | null |
1,180 | import numpy as np
import cv2
from skimage import io
def loadImage(img_file):
img = io.imread(img_file) # RGB order
if img.shape[0] == 2:
img = img[0]
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if img.shape[2] == 4:
img = img[:, :, :3]
img = np.array(img)
return img | null |
1,181 | import numpy as np
import cv2
from skimage import io
def denormalizeMeanVariance(
in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)
):
# should be RGB order
img = in_img.copy()
img *= variance
img += mean
img *= 255.0
img = np.clip(img, 0, 255).astype(np.uint8)
return img | null |
1,182 | import cv2
import numpy as np
from skimage.segmentation import watershed
def segment_region_score(watershed_param, region_score, word_image, pseudo_vis_opt):
region_score = np.float32(region_score) / 255
fore = np.uint8(region_score > 0.75)
back = np.uint8(region_score < 0.05)
unknown = 1 - (fore + back)
ret, markers = cv2.connectedComponents(fore)
markers += 1
markers[unknown == 1] = 0
labels = watershed(-region_score, markers)
boxes = []
for label in range(2, ret + 1):
y, x = np.where(labels == label)
x_max = x.max()
y_max = y.max()
x_min = x.min()
y_min = y.min()
box = [[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]]
box = np.array(box)
box *= 2
boxes.append(box)
return np.array(boxes, dtype=np.float32)
def exec_watershed_by_version(
watershed_param, region_score, word_image, pseudo_vis_opt
):
func_name_map_dict = {
"skimage": segment_region_score,
}
try:
return func_name_map_dict[watershed_param.version](
watershed_param, region_score, word_image, pseudo_vis_opt
)
except:
print(
f"Watershed version {watershed_param.version} does not exist in func_name_map_dict."
) | null |
1,183 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def rescale(img, bboxes, target_size=2240):
h, w = img.shape[0:2]
scale = target_size / max(h, w)
img = cv2.resize(img, dsize=None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
bboxes = bboxes * scale
return img, bboxes | null |
1,184 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def random_resize_crop_synth(augment_targets, size):
image, region_score, affinity_score, confidence_mask = augment_targets
image = Image.fromarray(image)
region_score = Image.fromarray(region_score)
affinity_score = Image.fromarray(affinity_score)
confidence_mask = Image.fromarray(confidence_mask)
short_side = min(image.size)
i, j, h, w = RandomCrop.get_params(image, output_size=(short_side, short_side))
image = resized_crop(
image, i, j, h, w, size=(size, size), interpolation=InterpolationMode.BICUBIC
)
region_score = resized_crop(
region_score, i, j, h, w, (size, size), interpolation=InterpolationMode.BICUBIC
)
affinity_score = resized_crop(
affinity_score,
i,
j,
h,
w,
(size, size),
interpolation=InterpolationMode.BICUBIC,
)
confidence_mask = resized_crop(
confidence_mask,
i,
j,
h,
w,
(size, size),
interpolation=InterpolationMode.NEAREST,
)
image = np.array(image)
region_score = np.array(region_score)
affinity_score = np.array(affinity_score)
confidence_mask = np.array(confidence_mask)
augment_targets = [image, region_score, affinity_score, confidence_mask]
return augment_targets | null |
1,185 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def random_resize_crop(
augment_targets, scale, ratio, size, threshold, pre_crop_area=None
):
image, region_score, affinity_score, confidence_mask = augment_targets
image = Image.fromarray(image)
region_score = Image.fromarray(region_score)
affinity_score = Image.fromarray(affinity_score)
confidence_mask = Image.fromarray(confidence_mask)
if pre_crop_area != None:
i, j, h, w = pre_crop_area
else:
if random.random() < threshold:
i, j, h, w = RandomResizedCrop.get_params(image, scale=scale, ratio=ratio)
else:
i, j, h, w = RandomResizedCrop.get_params(
image, scale=(1.0, 1.0), ratio=(1.0, 1.0)
)
image = resized_crop(
image, i, j, h, w, size=(size, size), interpolation=InterpolationMode.BICUBIC
)
region_score = resized_crop(
region_score, i, j, h, w, (size, size), interpolation=InterpolationMode.BICUBIC
)
affinity_score = resized_crop(
affinity_score,
i,
j,
h,
w,
(size, size),
interpolation=InterpolationMode.BICUBIC,
)
confidence_mask = resized_crop(
confidence_mask,
i,
j,
h,
w,
(size, size),
interpolation=InterpolationMode.NEAREST,
)
image = np.array(image)
region_score = np.array(region_score)
affinity_score = np.array(affinity_score)
confidence_mask = np.array(confidence_mask)
augment_targets = [image, region_score, affinity_score, confidence_mask]
return augment_targets | null |
1,186 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def random_crop(augment_targets, size):
image, region_score, affinity_score, confidence_mask = augment_targets
image = Image.fromarray(image)
region_score = Image.fromarray(region_score)
affinity_score = Image.fromarray(affinity_score)
confidence_mask = Image.fromarray(confidence_mask)
i, j, h, w = RandomCrop.get_params(image, output_size=(size, size))
image = crop(image, i, j, h, w)
region_score = crop(region_score, i, j, h, w)
affinity_score = crop(affinity_score, i, j, h, w)
confidence_mask = crop(confidence_mask, i, j, h, w)
image = np.array(image)
region_score = np.array(region_score)
affinity_score = np.array(affinity_score)
confidence_mask = np.array(confidence_mask)
augment_targets = [image, region_score, affinity_score, confidence_mask]
return augment_targets | null |
1,187 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def random_horizontal_flip(imgs):
if random.random() < 0.5:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=1).copy()
return imgs | null |
1,188 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def random_scale(images, word_level_char_bbox, scale_range):
scale = random.sample(scale_range, 1)[0]
for i in range(len(images)):
images[i] = cv2.resize(images[i], dsize=None, fx=scale, fy=scale)
for i in range(len(word_level_char_bbox)):
word_level_char_bbox[i] *= scale
return images | null |
1,189 | import random
import cv2
import numpy as np
from PIL import Image
from torchvision.transforms.functional import resized_crop, crop
from torchvision.transforms import RandomResizedCrop, RandomCrop
from torchvision.transforms import InterpolationMode
def random_rotate(images, max_angle):
angle = random.random() * 2 * max_angle - max_angle
for i in range(len(images)):
img = images[i]
w, h = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)
if i == len(images) - 1:
img_rotation = cv2.warpAffine(
img, M=rotation_matrix, dsize=(h, w), flags=cv2.INTER_NEAREST
)
else:
img_rotation = cv2.warpAffine(img, rotation_matrix, (h, w))
images[i] = img_rotation
return images | null |
1,190 | import math
import numpy as np
def getX(K, B, Ypoint):
return int((Ypoint-B)/K) | null |
1,191 | import math
import numpy as np
def lineBiasAndK(Apoint, Bpoint):
K = pointAngle(Apoint, Bpoint)
B = Apoint[1] - K*Apoint[0]
return K, B
def sidePoint(Apoint, Bpoint, h, w, placehold, enlarge_size):
K, B = lineBiasAndK(Apoint, Bpoint)
angle = abs(math.atan(pointAngle(Apoint, Bpoint)))
distance = pointDistance(Apoint, Bpoint)
x_enlarge_size, y_enlarge_size = enlarge_size
XaxisIncreaseDistance = abs(math.cos(angle) * x_enlarge_size * distance)
YaxisIncreaseDistance = abs(math.sin(angle) * y_enlarge_size * distance)
if placehold == 'leftTop':
x1 = max(0, Apoint[0] - XaxisIncreaseDistance)
y1 = max(0, Apoint[1] - YaxisIncreaseDistance)
elif placehold == 'rightTop':
x1 = min(w, Bpoint[0] + XaxisIncreaseDistance)
y1 = max(0, Bpoint[1] - YaxisIncreaseDistance)
elif placehold == 'rightBottom':
x1 = min(w, Bpoint[0] + XaxisIncreaseDistance)
y1 = min(h, Bpoint[1] + YaxisIncreaseDistance)
elif placehold == 'leftBottom':
x1 = max(0, Apoint[0] - XaxisIncreaseDistance)
y1 = min(h, Apoint[1] + YaxisIncreaseDistance)
return int(x1), int(y1)
def enlargebox(box, h, w, enlarge_size, horizontal_text_bool):
if not horizontal_text_bool:
enlarge_size = (enlarge_size[1], enlarge_size[0])
box = np.roll(box, -np.argmin(box.sum(axis=1)), axis=0)
Apoint, Bpoint, Cpoint, Dpoint = box
K1, B1 = lineBiasAndK(box[0], box[2])
K2, B2 = lineBiasAndK(box[3], box[1])
X = (B2 - B1)/(K1 - K2)
Y = K1 * X + B1
center = [X, Y]
x1, y1 = sidePoint(Apoint, center, h, w, 'leftTop', enlarge_size)
x2, y2 = sidePoint(center, Bpoint, h, w, 'rightTop', enlarge_size)
x3, y3 = sidePoint(center, Cpoint, h, w, 'rightBottom', enlarge_size)
x4, y4 = sidePoint(Dpoint, center, h, w, 'leftBottom', enlarge_size)
newcharbox = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
return newcharbox | null |
1,192 | import os
import yaml
from functools import reduce
CONFIG_PATH = os.path.dirname(__file__)
import os
os.environ["LRU_CACHE_CAPACITY"] = "1"
def load_yaml(config_name):
with open(os.path.join(CONFIG_PATH, config_name)+ '.yaml') as file:
config = yaml.safe_load(file)
return config | null |
1,193 | import argparse
import os
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from tqdm import tqdm
import wandb
from config.load_config import load_yaml, DotDict
from model.craft import CRAFT
from metrics.eval_det_iou import DetectionIoUEvaluator
from utils.inference_boxes import (
test_net,
load_icdar2015_gt,
load_icdar2013_gt,
load_synthtext_gt,
)
from utils.util import copyStateDict
def main_eval(model_path, backbone, config, evaluator, result_dir, buffer, model, mode):
if not os.path.exists(result_dir):
os.makedirs(result_dir, exist_ok=True)
total_imgs_bboxes_gt, total_imgs_path = load_test_dataset_iou("custom_data", config)
if mode == "weak_supervision" and torch.cuda.device_count() != 1:
gpu_count = torch.cuda.device_count() // 2
else:
gpu_count = torch.cuda.device_count()
gpu_idx = torch.cuda.current_device()
torch.cuda.set_device(gpu_idx)
# Only evaluation time
if model is None:
piece_imgs_path = total_imgs_path
if backbone == "vgg":
model = CRAFT()
else:
raise Exception("Undefined architecture")
print("Loading weights from checkpoint (" + model_path + ")")
net_param = torch.load(model_path, map_location=f"cuda:{gpu_idx}")
model.load_state_dict(copyStateDict(net_param["craft"]))
if config.cuda:
model = model.cuda()
cudnn.benchmark = False
# Distributed evaluation in the middle of training time
else:
if buffer is not None:
# check all buffer value is None for distributed evaluation
assert all(
v is None for v in buffer
), "Buffer already filled with another value."
slice_idx = len(total_imgs_bboxes_gt) // gpu_count
# last gpu
if gpu_idx == gpu_count - 1:
piece_imgs_path = total_imgs_path[gpu_idx * slice_idx :]
# piece_imgs_bboxes_gt = total_imgs_bboxes_gt[gpu_idx * slice_idx:]
else:
piece_imgs_path = total_imgs_path[
gpu_idx * slice_idx : (gpu_idx + 1) * slice_idx
]
# piece_imgs_bboxes_gt = total_imgs_bboxes_gt[gpu_idx * slice_idx: (gpu_idx + 1) * slice_idx]
model.eval()
# -----------------------------------------------------------------------------------------------------------------#
total_imgs_bboxes_pre = []
for k, img_path in enumerate(tqdm(piece_imgs_path)):
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
single_img_bbox = []
bboxes, polys, score_text = test_net(
model,
image,
config.text_threshold,
config.link_threshold,
config.low_text,
config.cuda,
config.poly,
config.canvas_size,
config.mag_ratio,
)
for box in bboxes:
box_info = {"points": box, "text": "###", "ignore": False}
single_img_bbox.append(box_info)
total_imgs_bboxes_pre.append(single_img_bbox)
# Distributed evaluation -------------------------------------------------------------------------------------#
if buffer is not None:
buffer[gpu_idx * slice_idx + k] = single_img_bbox
# print(sum([element is not None for element in buffer]))
# -------------------------------------------------------------------------------------------------------------#
if config.vis_opt:
viz_test(
image,
score_text,
pre_box=polys,
gt_box=total_imgs_bboxes_gt[k],
img_name=img_path,
result_dir=result_dir,
test_folder_name="custom_data",
)
# When distributed evaluation mode, wait until buffer is full filled
if buffer is not None:
while None in buffer:
continue
assert all(v is not None for v in buffer), "Buffer not filled"
total_imgs_bboxes_pre = buffer
results = []
for i, (gt, pred) in enumerate(zip(total_imgs_bboxes_gt, total_imgs_bboxes_pre)):
perSampleMetrics_dict = evaluator.evaluate_image(gt, pred)
results.append(perSampleMetrics_dict)
metrics = evaluator.combine_results(results)
print(metrics)
return metrics
class DotDict(dict):
def __getattr__(self, k):
try:
v = self[k]
except:
return super().__getattr__(k)
if isinstance(v, dict):
return DotDict(v)
return v
def __getitem__(self, k):
if isinstance(k, str) and '.' in k:
k = k.split('.')
if isinstance(k, (list, tuple)):
return reduce(lambda d, kk: d[kk], k, self)
return super().__getitem__(k)
def get(self, k, default=None):
if isinstance(k, str) and '.' in k:
try:
return self[k]
except KeyError:
return default
return super().get(k, default=default)
class DetectionIoUEvaluator(object):
def __init__(self, iou_constraint=0.5, area_precision_constraint=0.5):
self.iou_constraint = iou_constraint
self.area_precision_constraint = area_precision_constraint
def evaluate_image(self, gt, pred):
def get_union(pD, pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD, pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD, pG):
return Polygon(pD).intersection(Polygon(pG)).area
def compute_ap(confList, matchList, numGtCare):
correct = 0
AP = 0
if len(confList) > 0:
confList = np.array(confList)
matchList = np.array(matchList)
sorted_ind = np.argsort(-confList)
confList = confList[sorted_ind]
matchList = matchList[sorted_ind]
for n in range(len(confList)):
match = matchList[n]
if match:
correct += 1
AP += float(correct) / (n + 1)
if numGtCare > 0:
AP /= numGtCare
return AP
perSampleMetrics = {}
matchedSum = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
numGlobalCareGt = 0
numGlobalCareDet = 0
arrGlobalConfidences = []
arrGlobalMatches = []
recall = 0
precision = 0
hmean = 0
detMatched = 0
iouMat = np.empty([1, 1])
gtPols = []
detPols = []
gtPolPoints = []
detPolPoints = []
# Array of Ground Truth Polygons' keys marked as don't Care
gtDontCarePolsNum = []
# Array of Detected Polygons' matched with a don't Care GT
detDontCarePolsNum = []
pairs = []
detMatchedNums = []
arrSampleConfidences = []
arrSampleMatch = []
evaluationLog = ""
# print(len(gt))
for n in range(len(gt)):
points = gt[n]['points']
# transcription = gt[n]['text']
dontCare = gt[n]['ignore']
# points = Polygon(points)
# points = points.buffer(0)
try:
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
except:
import ipdb;
ipdb.set_trace()
#import ipdb;ipdb.set_trace()
gtPol = points
gtPols.append(gtPol)
gtPolPoints.append(points)
if dontCare:
gtDontCarePolsNum.append(len(gtPols) - 1)
evaluationLog += "GT polygons: " + str(len(gtPols)) + (
" (" + str(len(gtDontCarePolsNum)) + " don't care)\n"
if len(gtDontCarePolsNum) > 0 else "\n")
for n in range(len(pred)):
points = pred[n]['points']
# points = Polygon(points)
# points = points.buffer(0)
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
detPol = points
detPols.append(detPol)
detPolPoints.append(points)
if len(gtDontCarePolsNum) > 0:
for dontCarePol in gtDontCarePolsNum:
dontCarePol = gtPols[dontCarePol]
intersected_area = get_intersection(dontCarePol, detPol)
pdDimensions = Polygon(detPol).area
precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
if (precision > self.area_precision_constraint):
detDontCarePolsNum.append(len(detPols) - 1)
break
evaluationLog += "DET polygons: " + str(len(detPols)) + (
" (" + str(len(detDontCarePolsNum)) + " don't care)\n"
if len(detDontCarePolsNum) > 0 else "\n")
if len(gtPols) > 0 and len(detPols) > 0:
# Calculate IoU and precision matrices
outputShape = [len(gtPols), len(detPols)]
iouMat = np.empty(outputShape)
gtRectMat = np.zeros(len(gtPols), np.int8)
detRectMat = np.zeros(len(detPols), np.int8)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
pG = gtPols[gtNum]
pD = detPols[detNum]
iouMat[gtNum, detNum] = get_intersection_over_union(pD, pG)
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
if gtRectMat[gtNum] == 0 and detRectMat[
detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum:
if iouMat[gtNum, detNum] > self.iou_constraint:
gtRectMat[gtNum] = 1
detRectMat[detNum] = 1
detMatched += 1
pairs.append({'gt': gtNum, 'det': detNum})
detMatchedNums.append(detNum)
evaluationLog += "Match GT #" + \
str(gtNum) + " with Det #" + str(detNum) + "\n"
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
numDetCare = (len(detPols) - len(detDontCarePolsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if numDetCare > 0 else float(1)
else:
recall = float(detMatched) / numGtCare
precision = 0 if numDetCare == 0 else float(detMatched) / numDetCare
hmean = 0 if (precision + recall) == 0 else 2.0 * \
precision * recall / (precision + recall)
matchedSum += detMatched
numGlobalCareGt += numGtCare
numGlobalCareDet += numDetCare
perSampleMetrics = {
'precision': precision,
'recall': recall,
'hmean': hmean,
'pairs': pairs,
'iouMat': [] if len(detPols) > 100 else iouMat.tolist(),
'gtPolPoints': gtPolPoints,
'detPolPoints': detPolPoints,
'gtCare': numGtCare,
'detCare': numDetCare,
'gtDontCare': gtDontCarePolsNum,
'detDontCare': detDontCarePolsNum,
'detMatched': detMatched,
'evaluationLog': evaluationLog
}
return perSampleMetrics
def combine_results(self, results):
numGlobalCareGt = 0
numGlobalCareDet = 0
matchedSum = 0
for result in results:
numGlobalCareGt += result['gtCare']
numGlobalCareDet += result['detCare']
matchedSum += result['detMatched']
methodRecall = 0 if numGlobalCareGt == 0 else float(
matchedSum) / numGlobalCareGt
methodPrecision = 0 if numGlobalCareDet == 0 else float(
matchedSum) / numGlobalCareDet
methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * \
methodRecall * methodPrecision / (
methodRecall + methodPrecision)
# print(methodRecall, methodPrecision, methodHmean)
# sys.exit(-1)
methodMetrics = {
'precision': methodPrecision,
'recall': methodRecall,
'hmean': methodHmean
}
return methodMetrics
def cal_eval(config, data, res_dir_name, opt, mode):
evaluator = DetectionIoUEvaluator()
test_config = DotDict(config.test[data])
res_dir = os.path.join(os.path.join("exp", args.yaml), "{}".format(res_dir_name))
if opt == "iou_eval":
main_eval(
config.test.trained_model,
config.train.backbone,
test_config,
evaluator,
res_dir,
buffer=None,
model=None,
mode=mode,
)
else:
print("Undefined evaluation") | null |
1,194 | import argparse
import os
import shutil
import time
import multiprocessing as mp
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from config.load_config import load_yaml, DotDict
from data.dataset import SynthTextDataSet, CustomDataset
from loss.mseloss import Maploss_v2, Maploss_v3
from model.craft import CRAFT
from eval import main_eval
from metrics.eval_det_iou import DetectionIoUEvaluator
from utils.util import copyStateDict, save_parser
class Trainer(object):
def __init__(self, config, gpu, mode):
self.config = config
self.gpu = gpu
self.mode = mode
self.net_param = self.get_load_param(gpu)
def get_synth_loader(self):
dataset = SynthTextDataSet(
output_size=self.config.train.data.output_size,
data_dir=self.config.train.synth_data_dir,
saved_gt_dir=None,
mean=self.config.train.data.mean,
variance=self.config.train.data.variance,
gauss_init_size=self.config.train.data.gauss_init_size,
gauss_sigma=self.config.train.data.gauss_sigma,
enlarge_region=self.config.train.data.enlarge_region,
enlarge_affinity=self.config.train.data.enlarge_affinity,
aug=self.config.train.data.syn_aug,
vis_test_dir=self.config.vis_test_dir,
vis_opt=self.config.train.data.vis_opt,
sample=self.config.train.data.syn_sample,
)
syn_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
syn_loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.config.train.batch_size // self.config.train.synth_ratio,
shuffle=False,
num_workers=self.config.train.num_workers,
sampler=syn_sampler,
drop_last=True,
pin_memory=True,
)
return syn_loader
def get_custom_dataset(self):
custom_dataset = CustomDataset(
output_size=self.config.train.data.output_size,
data_dir=self.config.data_root_dir,
saved_gt_dir=None,
mean=self.config.train.data.mean,
variance=self.config.train.data.variance,
gauss_init_size=self.config.train.data.gauss_init_size,
gauss_sigma=self.config.train.data.gauss_sigma,
enlarge_region=self.config.train.data.enlarge_region,
enlarge_affinity=self.config.train.data.enlarge_affinity,
watershed_param=self.config.train.data.watershed,
aug=self.config.train.data.custom_aug,
vis_test_dir=self.config.vis_test_dir,
sample=self.config.train.data.custom_sample,
vis_opt=self.config.train.data.vis_opt,
pseudo_vis_opt=self.config.train.data.pseudo_vis_opt,
do_not_care_label=self.config.train.data.do_not_care_label,
)
return custom_dataset
def get_load_param(self, gpu):
if self.config.train.ckpt_path is not None:
map_location = "cuda:%d" % gpu
param = torch.load(self.config.train.ckpt_path, map_location=map_location)
else:
param = None
return param
def adjust_learning_rate(self, optimizer, gamma, step, lr):
lr = lr * (gamma ** step)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return param_group["lr"]
def get_loss(self):
if self.config.train.loss == 2:
criterion = Maploss_v2()
elif self.config.train.loss == 3:
criterion = Maploss_v3()
else:
raise Exception("Undefined loss")
return criterion
def iou_eval(self, dataset, train_step, buffer, model):
test_config = DotDict(self.config.test[dataset])
val_result_dir = os.path.join(
self.config.results_dir, "{}/{}".format(dataset + "_iou", str(train_step))
)
evaluator = DetectionIoUEvaluator()
metrics = main_eval(
None,
self.config.train.backbone,
test_config,
evaluator,
val_result_dir,
buffer,
model,
self.mode,
)
if self.gpu == 0 and self.config.wandb_opt:
wandb.log(
{
"{} iou Recall".format(dataset): np.round(metrics["recall"], 3),
"{} iou Precision".format(dataset): np.round(
metrics["precision"], 3
),
"{} iou F1-score".format(dataset): np.round(metrics["hmean"], 3),
}
)
def train(self, buffer_dict):
torch.cuda.set_device(self.gpu)
total_gpu_num = torch.cuda.device_count()
# MODEL -------------------------------------------------------------------------------------------------------#
# SUPERVISION model
if self.config.mode == "weak_supervision":
if self.config.train.backbone == "vgg":
supervision_model = CRAFT(pretrained=False, amp=self.config.train.amp)
else:
raise Exception("Undefined architecture")
# NOTE: only work on half GPU assign train / half GPU assign supervision setting
supervision_device = total_gpu_num // 2 + self.gpu
if self.config.train.ckpt_path is not None:
supervision_param = self.get_load_param(supervision_device)
supervision_model.load_state_dict(
copyStateDict(supervision_param["craft"])
)
supervision_model = supervision_model.to(f"cuda:{supervision_device}")
print(f"Supervision model loading on : gpu {supervision_device}")
else:
supervision_model, supervision_device = None, None
# TRAIN model
if self.config.train.backbone == "vgg":
craft = CRAFT(pretrained=False, amp=self.config.train.amp)
else:
raise Exception("Undefined architecture")
if self.config.train.ckpt_path is not None:
craft.load_state_dict(copyStateDict(self.net_param["craft"]))
craft = nn.SyncBatchNorm.convert_sync_batchnorm(craft)
craft = craft.cuda()
craft = torch.nn.parallel.DistributedDataParallel(craft, device_ids=[self.gpu])
torch.backends.cudnn.benchmark = True
# DATASET -----------------------------------------------------------------------------------------------------#
if self.config.train.use_synthtext:
trn_syn_loader = self.get_synth_loader()
batch_syn = iter(trn_syn_loader)
if self.config.train.real_dataset == "custom":
trn_real_dataset = self.get_custom_dataset()
else:
raise Exception("Undefined dataset")
if self.config.mode == "weak_supervision":
trn_real_dataset.update_model(supervision_model)
trn_real_dataset.update_device(supervision_device)
trn_real_sampler = torch.utils.data.distributed.DistributedSampler(
trn_real_dataset
)
trn_real_loader = torch.utils.data.DataLoader(
trn_real_dataset,
batch_size=self.config.train.batch_size,
shuffle=False,
num_workers=self.config.train.num_workers,
sampler=trn_real_sampler,
drop_last=False,
pin_memory=True,
)
# OPTIMIZER ---------------------------------------------------------------------------------------------------#
optimizer = optim.Adam(
craft.parameters(),
lr=self.config.train.lr,
weight_decay=self.config.train.weight_decay,
)
if self.config.train.ckpt_path is not None and self.config.train.st_iter != 0:
optimizer.load_state_dict(copyStateDict(self.net_param["optimizer"]))
self.config.train.st_iter = self.net_param["optimizer"]["state"][0]["step"]
self.config.train.lr = self.net_param["optimizer"]["param_groups"][0]["lr"]
# LOSS --------------------------------------------------------------------------------------------------------#
# mixed precision
if self.config.train.amp:
scaler = torch.cuda.amp.GradScaler()
if (
self.config.train.ckpt_path is not None
and self.config.train.st_iter != 0
):
scaler.load_state_dict(copyStateDict(self.net_param["scaler"]))
else:
scaler = None
criterion = self.get_loss()
# TRAIN -------------------------------------------------------------------------------------------------------#
train_step = self.config.train.st_iter
whole_training_step = self.config.train.end_iter
update_lr_rate_step = 0
training_lr = self.config.train.lr
loss_value = 0
batch_time = 0
start_time = time.time()
print(
"================================ Train start ================================"
)
while train_step < whole_training_step:
trn_real_sampler.set_epoch(train_step)
for (
index,
(
images,
region_scores,
affinity_scores,
confidence_masks,
),
) in enumerate(trn_real_loader):
craft.train()
if train_step > 0 and train_step % self.config.train.lr_decay == 0:
update_lr_rate_step += 1
training_lr = self.adjust_learning_rate(
optimizer,
self.config.train.gamma,
update_lr_rate_step,
self.config.train.lr,
)
images = images.cuda(non_blocking=True)
region_scores = region_scores.cuda(non_blocking=True)
affinity_scores = affinity_scores.cuda(non_blocking=True)
confidence_masks = confidence_masks.cuda(non_blocking=True)
if self.config.train.use_synthtext:
# Synth image load
syn_image, syn_region_label, syn_affi_label, syn_confidence_mask = next(
batch_syn
)
syn_image = syn_image.cuda(non_blocking=True)
syn_region_label = syn_region_label.cuda(non_blocking=True)
syn_affi_label = syn_affi_label.cuda(non_blocking=True)
syn_confidence_mask = syn_confidence_mask.cuda(non_blocking=True)
# concat syn & custom image
images = torch.cat((syn_image, images), 0)
region_image_label = torch.cat(
(syn_region_label, region_scores), 0
)
affinity_image_label = torch.cat((syn_affi_label, affinity_scores), 0)
confidence_mask_label = torch.cat(
(syn_confidence_mask, confidence_masks), 0
)
else:
region_image_label = region_scores
affinity_image_label = affinity_scores
confidence_mask_label = confidence_masks
if self.config.train.amp:
with torch.cuda.amp.autocast():
output, _ = craft(images)
out1 = output[:, :, :, 0]
out2 = output[:, :, :, 1]
loss = criterion(
region_image_label,
affinity_image_label,
out1,
out2,
confidence_mask_label,
self.config.train.neg_rto,
self.config.train.n_min_neg,
)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
output, _ = craft(images)
out1 = output[:, :, :, 0]
out2 = output[:, :, :, 1]
loss = criterion(
region_image_label,
affinity_image_label,
out1,
out2,
confidence_mask_label,
self.config.train.neg_rto,
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
end_time = time.time()
loss_value += loss.item()
batch_time += end_time - start_time
if train_step > 0 and train_step % 5 == 0 and self.gpu == 0:
mean_loss = loss_value / 5
loss_value = 0
avg_batch_time = batch_time / 5
batch_time = 0
print(
"{}, training_step: {}|{}, learning rate: {:.8f}, "
"training_loss: {:.5f}, avg_batch_time: {:.5f}".format(
time.strftime(
"%Y-%m-%d:%H:%M:%S", time.localtime(time.time())
),
train_step,
whole_training_step,
training_lr,
mean_loss,
avg_batch_time,
)
)
if self.gpu == 0 and self.config.wandb_opt:
wandb.log({"train_step": train_step, "mean_loss": mean_loss})
if (
train_step % self.config.train.eval_interval == 0
and train_step != 0
):
craft.eval()
# initialize all buffer value with zero
if self.gpu == 0:
for buffer in buffer_dict.values():
for i in range(len(buffer)):
buffer[i] = None
print("Saving state, index:", train_step)
save_param_dic = {
"iter": train_step,
"craft": craft.state_dict(),
"optimizer": optimizer.state_dict(),
}
save_param_path = (
self.config.results_dir
+ "/CRAFT_clr_"
+ repr(train_step)
+ ".pth"
)
if self.config.train.amp:
save_param_dic["scaler"] = scaler.state_dict()
save_param_path = (
self.config.results_dir
+ "/CRAFT_clr_amp_"
+ repr(train_step)
+ ".pth"
)
torch.save(save_param_dic, save_param_path)
# validation
self.iou_eval(
"custom_data",
train_step,
buffer_dict["custom_data"],
craft,
)
train_step += 1
if train_step >= whole_training_step:
break
if self.config.mode == "weak_supervision":
state_dict = craft.module.state_dict()
supervision_model.load_state_dict(state_dict)
trn_real_dataset.update_model(supervision_model)
# save last model
if self.gpu == 0:
save_param_dic = {
"iter": train_step,
"craft": craft.state_dict(),
"optimizer": optimizer.state_dict(),
}
save_param_path = (
self.config.results_dir + "/CRAFT_clr_" + repr(train_step) + ".pth"
)
if self.config.train.amp:
save_param_dic["scaler"] = scaler.state_dict()
save_param_path = (
self.config.results_dir
+ "/CRAFT_clr_amp_"
+ repr(train_step)
+ ".pth"
)
torch.save(save_param_dic, save_param_path)
class DotDict(dict):
def __getattr__(self, k):
try:
v = self[k]
except:
return super().__getattr__(k)
if isinstance(v, dict):
return DotDict(v)
return v
def __getitem__(self, k):
if isinstance(k, str) and '.' in k:
k = k.split('.')
if isinstance(k, (list, tuple)):
return reduce(lambda d, kk: d[kk], k, self)
return super().__getitem__(k)
def get(self, k, default=None):
if isinstance(k, str) and '.' in k:
try:
return self[k]
except KeyError:
return default
return super().get(k, default=default)
def main_worker(gpu, port, ngpus_per_node, config, buffer_dict, exp_name, mode):
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://127.0.0.1:" + port,
world_size=ngpus_per_node,
rank=gpu,
)
# Apply config to wandb
if gpu == 0 and config["wandb_opt"]:
wandb.init(project="craft-stage2", entity="user_name", name=exp_name)
wandb.config.update(config)
batch_size = int(config["train"]["batch_size"] / ngpus_per_node)
config["train"]["batch_size"] = batch_size
config = DotDict(config)
# Start train
trainer = Trainer(config, gpu, mode)
trainer.train(buffer_dict)
if gpu == 0:
if config["wandb_opt"]:
wandb.finish()
torch.distributed.barrier()
torch.distributed.destroy_process_group() | null |
1,195 | import torch
import torch.nn as nn
import torch.nn.init as init
import torchvision
from torchvision import models
from packaging import version
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() | null |
1,196 | from collections import OrderedDict
import os
import cv2
import numpy as np
from data import imgproc
from utils import craft_utils
def saveInput(
imagename, vis_dir, image, region_scores, affinity_scores, confidence_mask
):
image = np.uint8(image.copy())
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
boxes, polys = craft_utils.getDetBoxes(
region_scores, affinity_scores, 0.85, 0.2, 0.5, False
)
if image.shape[0] / region_scores.shape[0] >= 2:
boxes = np.array(boxes, np.int32) * 2
else:
boxes = np.array(boxes, np.int32)
if len(boxes) > 0:
np.clip(boxes[:, :, 0], 0, image.shape[1])
np.clip(boxes[:, :, 1], 0, image.shape[0])
for box in boxes:
cv2.polylines(image, [np.reshape(box, (-1, 1, 2))], True, (0, 0, 255))
target_gaussian_heatmap_color = imgproc.cvt2HeatmapImg(region_scores)
target_gaussian_affinity_heatmap_color = imgproc.cvt2HeatmapImg(affinity_scores)
confidence_mask_gray = imgproc.cvt2HeatmapImg(confidence_mask)
# overlay
height, width, channel = image.shape
overlay_region = cv2.resize(target_gaussian_heatmap_color, (width, height))
overlay_aff = cv2.resize(target_gaussian_affinity_heatmap_color, (width, height))
confidence_mask_gray = cv2.resize(
confidence_mask_gray, (width, height), interpolation=cv2.INTER_NEAREST
)
overlay_region = cv2.addWeighted(image, 0.4, overlay_region, 0.6, 5)
overlay_aff = cv2.addWeighted(image, 0.4, overlay_aff, 0.7, 6)
gt_scores = np.concatenate([overlay_region, overlay_aff], axis=1)
output = np.concatenate([gt_scores, confidence_mask_gray], axis=1)
output = np.hstack([image, output])
# synthtext
if type(imagename) is not str:
imagename = imagename[0].split("/")[-1][:-4]
outpath = vis_dir + f"/{imagename}_input.jpg"
if not os.path.exists(os.path.dirname(outpath)):
os.makedirs(os.path.dirname(outpath), exist_ok=True)
cv2.imwrite(outpath, output)
# print(f'Logging train input into {outpath}') | null |
1,197 | from collections import OrderedDict
import os
import cv2
import numpy as np
from data import imgproc
from utils import craft_utils
def saveImage(
imagename,
vis_dir,
image,
bboxes,
affi_bboxes,
region_scores,
affinity_scores,
confidence_mask,
):
output_image = np.uint8(image.copy())
output_image = cv2.cvtColor(output_image, cv2.COLOR_RGB2BGR)
if len(bboxes) > 0:
for i in range(len(bboxes)):
_bboxes = np.int32(bboxes[i])
for j in range(_bboxes.shape[0]):
cv2.polylines(
output_image,
[np.reshape(_bboxes[j], (-1, 1, 2))],
True,
(0, 0, 255),
)
for i in range(len(affi_bboxes)):
cv2.polylines(
output_image,
[np.reshape(affi_bboxes[i].astype(np.int32), (-1, 1, 2))],
True,
(255, 0, 0),
)
target_gaussian_heatmap_color = imgproc.cvt2HeatmapImg(region_scores)
target_gaussian_affinity_heatmap_color = imgproc.cvt2HeatmapImg(affinity_scores)
confidence_mask_gray = imgproc.cvt2HeatmapImg(confidence_mask)
# overlay
height, width, channel = image.shape
overlay_region = cv2.resize(target_gaussian_heatmap_color, (width, height))
overlay_aff = cv2.resize(target_gaussian_affinity_heatmap_color, (width, height))
overlay_region = cv2.addWeighted(image.copy(), 0.4, overlay_region, 0.6, 5)
overlay_aff = cv2.addWeighted(image.copy(), 0.4, overlay_aff, 0.6, 5)
heat_map = np.concatenate([overlay_region, overlay_aff], axis=1)
# synthtext
if type(imagename) is not str:
imagename = imagename[0].split("/")[-1][:-4]
output = np.concatenate([output_image, heat_map, confidence_mask_gray], axis=1)
outpath = vis_dir + f"/{imagename}.jpg"
if not os.path.exists(os.path.dirname(outpath)):
os.makedirs(os.path.dirname(outpath), exist_ok=True)
cv2.imwrite(outpath, output)
# print(f'Logging original image into {outpath}') | null |
1,198 | from collections import OrderedDict
import os
import cv2
import numpy as np
from data import imgproc
from utils import craft_utils
The provided code snippet includes necessary dependencies for implementing the `save_parser` function. Write a Python function `def save_parser(args)` to solve the following problem:
final options
Here is the function:
def save_parser(args):
""" final options """
with open(f"{args.results_dir}/opt.txt", "a", encoding="utf-8") as opt_file:
opt_log = "------------ Options -------------\n"
arg = vars(args)
for k, v in arg.items():
opt_log += f"{str(k)}: {str(v)}\n"
opt_log += "---------------------------------------\n"
print(opt_log)
opt_file.write(opt_log) | final options |
1,199 | import os
import torch
import cv2
import math
import numpy as np
from data import imgproc
def save_outputs(image, region_scores, affinity_scores, text_threshold, link_threshold,
low_text, outoput_path, confidence_mask = None):
"""save image, region_scores, and affinity_scores in a single image. region_scores and affinity_scores must be
cpu numpy arrays. You can convert GPU Tensors to CPU numpy arrays like this:
>>> array = tensor.cpu().data.numpy()
When saving outputs of the network during training, make sure you convert ALL tensors (image, region_score,
affinity_score) to numpy array first.
:param image: numpy array
:param region_scores: [] 2D numpy array with each element between 0~1.
:param affinity_scores: same as region_scores
:param text_threshold: 0 ~ 1. Closer to 0, characters with lower confidence will also be considered a word and be boxed
:param link_threshold: 0 ~ 1. Closer to 0, links with lower confidence will also be considered a word and be boxed
:param low_text: 0 ~ 1. Closer to 0, boxes will be more loosely drawn.
:param outoput_path:
:param confidence_mask:
:return:
"""
assert region_scores.shape == affinity_scores.shape
assert len(image.shape) - 1 == len(region_scores.shape)
boxes, polys = getDetBoxes(region_scores, affinity_scores, text_threshold, link_threshold,
low_text, False)
boxes = np.array(boxes, np.int32) * 2
if len(boxes) > 0:
np.clip(boxes[:, :, 0], 0, image.shape[1])
np.clip(boxes[:, :, 1], 0, image.shape[0])
for box in boxes:
cv2.polylines(image, [np.reshape(box, (-1, 1, 2))], True, (0, 0, 255))
target_gaussian_heatmap_color = imgproc.cvt2HeatmapImg(region_scores)
target_gaussian_affinity_heatmap_color = imgproc.cvt2HeatmapImg(affinity_scores)
if confidence_mask is not None:
confidence_mask_gray = imgproc.cvt2HeatmapImg(confidence_mask)
gt_scores = np.hstack([target_gaussian_heatmap_color, target_gaussian_affinity_heatmap_color])
confidence_mask_gray = np.hstack([np.zeros_like(confidence_mask_gray), confidence_mask_gray])
output = np.concatenate([gt_scores, confidence_mask_gray], axis=0)
output = np.hstack([image, output])
else:
gt_scores = np.concatenate([target_gaussian_heatmap_color, target_gaussian_affinity_heatmap_color], axis=0)
output = np.hstack([image, gt_scores])
cv2.imwrite(outoput_path, output)
return output
The provided code snippet includes necessary dependencies for implementing the `save_outputs_from_tensors` function. Write a Python function `def save_outputs_from_tensors(images, region_scores, affinity_scores, text_threshold, link_threshold, low_text, output_dir, image_names, confidence_mask = None)` to solve the following problem:
takes images, region_scores, and affinity_scores as tensors (cab be GPU). :param images: 4D tensor :param region_scores: 3D tensor with values between 0 ~ 1 :param affinity_scores: 3D tensor with values between 0 ~ 1 :param text_threshold: :param link_threshold: :param low_text: :param output_dir: direcotry to save the output images. Will be joined with base names of image_names :param image_names: names of each image. Doesn't have to be the base name (image file names) :param confidence_mask: :return:
Here is the function:
def save_outputs_from_tensors(images, region_scores, affinity_scores, text_threshold, link_threshold,
low_text, output_dir, image_names, confidence_mask = None):
"""takes images, region_scores, and affinity_scores as tensors (cab be GPU).
:param images: 4D tensor
:param region_scores: 3D tensor with values between 0 ~ 1
:param affinity_scores: 3D tensor with values between 0 ~ 1
:param text_threshold:
:param link_threshold:
:param low_text:
:param output_dir: direcotry to save the output images. Will be joined with base names of image_names
:param image_names: names of each image. Doesn't have to be the base name (image file names)
:param confidence_mask:
:return:
"""
#import ipdb;ipdb.set_trace()
#images = images.cpu().permute(0, 2, 3, 1).contiguous().data.numpy()
if type(images) == torch.Tensor:
images = np.array(images)
region_scores = region_scores.cpu().data.numpy()
affinity_scores = affinity_scores.cpu().data.numpy()
batch_size = images.shape[0]
assert batch_size == region_scores.shape[0] and batch_size == affinity_scores.shape[0] and batch_size == len(image_names), \
"The first dimension (i.e. batch size) of images, region scores, and affinity scores must be equal"
output_images = []
for i in range(batch_size):
image = images[i]
region_score = region_scores[i]
affinity_score = affinity_scores[i]
image_name = os.path.basename(image_names[i])
outoput_path = os.path.join(output_dir,image_name)
output_image = save_outputs(image, region_score, affinity_score, text_threshold, link_threshold,
low_text, outoput_path, confidence_mask=confidence_mask)
output_images.append(output_image)
return output_images | takes images, region_scores, and affinity_scores as tensors (cab be GPU). :param images: 4D tensor :param region_scores: 3D tensor with values between 0 ~ 1 :param affinity_scores: 3D tensor with values between 0 ~ 1 :param text_threshold: :param link_threshold: :param low_text: :param output_dir: direcotry to save the output images. Will be joined with base names of image_names :param image_names: names of each image. Doesn't have to be the base name (image file names) :param confidence_mask: :return: |
1,200 | import os
import re
import itertools
import cv2
import time
import numpy as np
import torch
from torch.autograd import Variable
from utils.craft_utils import getDetBoxes, adjustResultCoordinates
from data import imgproc
from data.dataset import SynthTextDataSet
import math
import xml.etree.ElementTree as elemTree
def xml_parsing(xml):
tree = elemTree.parse(xml)
annotations = [] # Initialize the list to store labels
iter_element = tree.iter(tag="object")
for element in iter_element:
annotation = {} # Initialize the dict to store labels
annotation['name'] = element.find("name").text # Save the name tag value
box_coords = element.iter(tag="robndbox")
for box_coord in box_coords:
cx = float(box_coord.find("cx").text)
cy = float(box_coord.find("cy").text)
w = float(box_coord.find("w").text)
h = float(box_coord.find("h").text)
angle = float(box_coord.find("angle").text)
convertcoodi = addRotatedShape(cx, cy, w, h, angle)
annotation['box_coodi'] = convertcoodi
annotations.append(annotation)
box_coords = element.iter(tag="bndbox")
for box_coord in box_coords:
xmin = int(box_coord.find("xmin").text)
ymin = int(box_coord.find("ymin").text)
xmax = int(box_coord.find("xmax").text)
ymax = int(box_coord.find("ymax").text)
# annotation['bndbox'] = [xmin,ymin,xmax,ymax]
annotation['box_coodi'] = [[xmin, ymin], [xmax, ymin], [xmax, ymax],
[xmin, ymax]]
annotations.append(annotation)
bounds = []
for i in range(len(annotations)):
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info_dict["points"] = np.array(annotations[i]['box_coodi'])
if annotations[i]['name'] == "dnc":
box_info_dict["text"] = "###"
box_info_dict["ignore"] = True
else:
box_info_dict["text"] = annotations[i]['name']
box_info_dict["ignore"] = False
bounds.append(box_info_dict)
return bounds
def load_prescription_gt(dataFolder):
total_img_path = []
total_imgs_bboxes = []
for (root, directories, files) in os.walk(dataFolder):
for file in files:
if '.jpg' in file:
img_path = os.path.join(root, file)
total_img_path.append(img_path)
if '.xml' in file:
gt_path = os.path.join(root, file)
total_imgs_bboxes.append(gt_path)
total_imgs_parsing_bboxes = []
for img_path, bbox in zip(sorted(total_img_path), sorted(total_imgs_bboxes)):
# check file
assert img_path.split(".jpg")[0] == bbox.split(".xml")[0]
result_label = xml_parsing(bbox)
total_imgs_parsing_bboxes.append(result_label)
return total_imgs_parsing_bboxes, sorted(total_img_path) | null |
1,201 | import os
import re
import itertools
import cv2
import time
import numpy as np
import torch
from torch.autograd import Variable
from utils.craft_utils import getDetBoxes, adjustResultCoordinates
from data import imgproc
from data.dataset import SynthTextDataSet
import math
import xml.etree.ElementTree as elemTree
def load_prescription_cleval_gt(dataFolder):
total_img_path = []
total_gt_path = []
for (root, directories, files) in os.walk(dataFolder):
for file in files:
if '.jpg' in file:
img_path = os.path.join(root, file)
total_img_path.append(img_path)
if '_cl.txt' in file:
gt_path = os.path.join(root, file)
total_gt_path.append(gt_path)
total_imgs_parsing_bboxes = []
for img_path, gt_path in zip(sorted(total_img_path), sorted(total_gt_path)):
# check file
assert img_path.split(".jpg")[0] == gt_path.split('_label_cl.txt')[0]
lines = open(gt_path, encoding="utf-8").readlines()
word_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box_points = [int(box_info[i]) for i in range(8)]
box_info_dict["points"] = np.array(box_points)
word_bboxes.append(box_info_dict)
total_imgs_parsing_bboxes.append(word_bboxes)
return total_imgs_parsing_bboxes, sorted(total_img_path) | null |
1,202 | import argparse
import onnx
import torch
import easyocr
import numpy as np
def export_detector(detector_onnx_save_path,
in_shape=[1, 3, 608, 800],
lang_list=["en"],
model_storage_directory=None,
user_network_directory=None,
download_enabled=True,
dynamic=True,
device="cpu",
quantize=True,
detector=True,
recognizer=True):
if dynamic is False:
print('WARNING: it is recommended to use -d dynamic flag when exporting onnx')
ocr_reader = easyocr.Reader(lang_list,
gpu=False if device == "cpu" else True,
detector=detector,
recognizer=detector,
quantize=quantize,
model_storage_directory=model_storage_directory,
user_network_directory=user_network_directory,
download_enabled=download_enabled)
# exporting detector if selected
if detector:
dummy_input = torch.rand(in_shape)
dummy_input = dummy_input.to(device)
# forward pass
with torch.no_grad():
y_torch_out, feature_torch_out = ocr_reader.detector(dummy_input)
torch.onnx.export(ocr_reader.detector,
dummy_input,
detector_onnx_save_path,
export_params=True,
do_constant_folding=True,
opset_version=12,
# model's input names
input_names=['input'],
# model's output names, ignore the 2nd output
output_names=['output'],
# variable length axes
dynamic_axes={'input': {0: 'batch_size', 2: "height", 3: "width"},
'output': {0: 'batch_size', 1: "dim1", 2: "dim2"}
} if dynamic else None,
verbose=False)
# verify exported onnx model
detector_onnx = onnx.load(detector_onnx_save_path)
onnx.checker.check_model(detector_onnx)
print(f"Model Inputs:\n {detector_onnx.graph.input}\n{'*'*80}")
print(f"Model Outputs:\n {detector_onnx.graph.output}\n{'*'*80}")
# onnx inference validation
import onnxruntime
ort_session = onnxruntime.InferenceSession(detector_onnx_save_path)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
return tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}
y_onnx_out, feature_onnx_out = ort_session.run(None, ort_inputs)
print(f"torch outputs: y_torch_out.shape={y_torch_out.shape} feature_torch_out.shape={feature_torch_out.shape}")
print(f"onnx outputs: y_onnx_out.shape={y_onnx_out.shape} feature_onnx_out.shape={feature_onnx_out.shape}")
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(
to_numpy(y_torch_out), y_onnx_out, rtol=1e-03, atol=1e-05)
np.testing.assert_allclose(
to_numpy(feature_torch_out), feature_onnx_out, rtol=1e-03, atol=1e-05)
print(f"Model exported to {detector_onnx_save_path} and tested with ONNXRuntime, and the result looks good!") | null |
1,203 | import argparse
import onnx
import torch
import easyocr
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--lang_list',
nargs='+', type=str,
default=["en"],
help='-l en ch_sim ... (language lists for easyocr)')
parser.add_argument('-s', '--detector_onnx_save_path', type=str,
default="detector_craft.onnx",
help="export detector onnx file path ending in .onnx" +
"Do not pass in this flag to avoid exporting detector")
parser.add_argument('-d', '--dynamic',
action='store_true',
help="Dynamic input output shapes for detector")
parser.add_argument('-is', '--in_shape',
nargs='+', type=int,
default=[1, 3, 608, 800],
help='-is 1 3 608 800 (bsize, channel, height, width)')
parser.add_argument('-m', '--model_storage_directory', type=str,
help="model storage directory for craft model")
parser.add_argument('-u', '--user_network_directory', type=str,
help="user model storage directory")
args = parser.parse_args()
dpath = args.detector_onnx_save_path
args.detector_onnx_save_path = None if dpath == "None" else dpath
if len(args.in_shape) != 4:
raise ValueError(
f"Input shape must have four values (bsize, channel, height, width) eg. 1 3 608 800")
return args | null |
1,204 | import numpy as np
from skimage import io
import cv2
def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
# should be RGB order
img = in_img.copy()
img *= variance
img += mean
img *= 255.0
img = np.clip(img, 0, 255).astype(np.uint8)
return img | null |
1,205 | import numpy as np
from skimage import io
import cv2
def cvt2HeatmapImg(img):
img = (np.clip(img, 0, 1) * 255).astype(np.uint8)
img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
return img | null |
1,206 | import argparse
import easyocr
def parse_args():
parser = argparse.ArgumentParser(description="Process EasyOCR.")
parser.add_argument(
"-l",
"--lang",
nargs='+',
required=True,
type=str,
help="for languages",
)
parser.add_argument(
"--gpu",
type=bool,
choices=[True, False],
default=True,
help="Using GPU (default: True)",
)
parser.add_argument(
"--model_storage_directory",
type=str,
default=None,
help="Directory for model (.pth) file",
)
parser.add_argument(
"--user_network_directory",
type=str,
default=None,
help="Directory for custom network files",
)
parser.add_argument(
"--recog_network",
type=str,
default='standard',
help="Recognition networks",
)
parser.add_argument(
"--download_enabled",
type=bool,
choices=[True, False],
default=True,
help="Enable Download",
)
parser.add_argument(
"--detector",
type=bool,
choices=[True, False],
default=True,
help="Initialize text detector module",
)
parser.add_argument(
"--recognizer",
type=bool,
choices=[True, False],
default=True,
help="Initialize text recognizer module",
)
parser.add_argument(
"--verbose",
type=bool,
choices=[True, False],
default=True,
help="Print detail/warning",
)
parser.add_argument(
"--quantize",
type=bool,
choices=[True, False],
default=True,
help="Use dynamic quantization",
)
parser.add_argument(
"-f",
"--file",
required=True,
type=str,
help="input file",
)
parser.add_argument(
"--decoder",
type=str,
choices=["greedy", 'beamsearch', 'wordbeamsearch'],
default='greedy',
help="decoder algorithm",
)
parser.add_argument(
"--beamWidth",
type=int,
default=5,
help="size of beam search",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="batch_size",
)
parser.add_argument(
"--workers",
type=int,
default=0,
help="number of processing cpu cores",
)
parser.add_argument(
"--allowlist",
type=str,
default=None,
help="Force EasyOCR to recognize only subset of characters",
)
parser.add_argument(
"--blocklist",
type=str,
default=None,
help="Block subset of character. This argument will be ignored if allowlist is given.",
)
parser.add_argument(
"--detail",
type=int,
choices=[0, 1],
default=1,
help="simple output (default: 1)",
)
parser.add_argument(
"--rotation_info",
type=list,
default=None,
help="Allow EasyOCR to rotate each text box and return the one with the best confident score. Eligible values are 90, 180 and 270. For example, try [90, 180 ,270] for all possible text orientations.",
)
parser.add_argument(
"--paragraph",
type=bool,
choices=[True, False],
default=False,
help="Combine result into paragraph",
)
parser.add_argument(
"--min_size",
type=int,
default=20,
help="Filter text box smaller than minimum value in pixel",
)
parser.add_argument(
"--contrast_ths",
type=float,
default=0.1,
help="Text box with contrast lower than this value will be passed into model 2 times. First is with original image and second with contrast adjusted to 'adjust_contrast' value. The one with more confident level will be returned as a result.",
)
parser.add_argument(
"--adjust_contrast",
type=float,
default=0.5,
help="target contrast level for low contrast text box",
)
parser.add_argument(
"--text_threshold",
type=float,
default=0.7,
help="Text confidence threshold",
)
parser.add_argument(
"--low_text",
type=float,
default=0.4,
help="Text low-bound score",
)
parser.add_argument(
"--link_threshold",
type=float,
default=0.4,
help="Link confidence threshold",
)
parser.add_argument(
"--canvas_size",
type=int,
default=2560,
help="Maximum image size. Image bigger than this value will be resized down.",
)
parser.add_argument(
"--mag_ratio",
type=float,
default=1.,
help="Image magnification ratio",
)
parser.add_argument(
"--slope_ths",
type=float,
default=0.1,
help="Maximum slope (delta y/delta x) to considered merging. Low value means tiled boxes will not be merged.",
)
parser.add_argument(
"--ycenter_ths",
type=float,
default=0.5,
help="Maximum shift in y direction. Boxes with different level should not be merged.",
)
parser.add_argument(
"--height_ths",
type=float,
default=0.5,
help="Maximum different in box height. Boxes with very different text size should not be merged. ",
)
parser.add_argument(
"--width_ths",
type=float,
default=0.5,
help="Maximum horizontal distance to merge boxes.",
)
parser.add_argument(
"--y_ths",
type=float,
default=0.5,
help="Maximum vertical distance to merge boxes (when paragraph = True).",
)
parser.add_argument(
"--x_ths",
type=float,
default=1.0,
help="Maximum horizontal distance to merge boxes (when paragraph = True).",
)
parser.add_argument(
"--add_margin",
type=float,
default=0.1,
help="Extend bounding boxes in all direction by certain value. This is important for language with complex script (E.g. Thai).",
)
parser.add_argument(
"--output_format",
type=str,
choices=["standard", 'dict', 'json'],
default='standard',
help="output format.",
)
args = parser.parse_args()
return args | null |
1,207 | from __future__ import print_function
import torch
import pickle
import numpy as np
import math
import cv2
from PIL import Image, JpegImagePlugin
from scipy import ndimage
import hashlib
import sys, os
from zipfile import ZipFile
from .imgproc import loadImage
def consecutive(data, mode ='first', stepsize=1):
group = np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
group = [item for item in group if len(item)>0]
if mode == 'first': result = [l[0] for l in group]
elif mode == 'last': result = [l[-1] for l in group]
return result
def word_segmentation(mat, separator_idx = {'th': [1,2],'en': [3,4]}, separator_idx_list = [1,2,3,4]):
result = []
sep_list = []
start_idx = 0
sep_lang = ''
for sep_idx in separator_idx_list:
if sep_idx % 2 == 0: mode ='first'
else: mode ='last'
a = consecutive( np.argwhere(mat == sep_idx).flatten(), mode)
new_sep = [ [item, sep_idx] for item in a]
sep_list += new_sep
sep_list = sorted(sep_list, key=lambda x: x[0])
for sep in sep_list:
for lang in separator_idx.keys():
if sep[1] == separator_idx[lang][0]: # start lang
sep_lang = lang
sep_start_idx = sep[0]
elif sep[1] == separator_idx[lang][1]: # end lang
if sep_lang == lang: # check if last entry if the same start lang
new_sep_pair = [lang, [sep_start_idx+1, sep[0]-1]]
if sep_start_idx > start_idx:
result.append( ['', [start_idx, sep_start_idx-1] ] )
start_idx = sep[0]+1
result.append(new_sep_pair)
sep_lang = ''# reset
if start_idx <= len(mat)-1:
result.append( ['', [start_idx, len(mat)-1] ] )
return result | null |
1,208 | from __future__ import print_function
import torch
import pickle
import numpy as np
import math
import cv2
from PIL import Image, JpegImagePlugin
from scipy import ndimage
import hashlib
import sys, os
from zipfile import ZipFile
from .imgproc import loadImage
The provided code snippet includes necessary dependencies for implementing the `applyLM` function. Write a Python function `def applyLM(parentBeam, childBeam, classes, lm)` to solve the following problem:
calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars
Here is the function:
def applyLM(parentBeam, childBeam, classes, lm):
"calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars"
if lm and not childBeam.lmApplied:
c1 = classes[parentBeam.labeling[-1] if parentBeam.labeling else classes.index(' ')] # first char
c2 = classes[childBeam.labeling[-1]] # second char
lmFactor = 0.01 # influence of language model
bigramProb = lm.getCharBigram(c1, c2) ** lmFactor # probability of seeing first and second char next to each other
childBeam.prText = parentBeam.prText * bigramProb # probability of char sequence
childBeam.lmApplied = True # only apply LM once per beam entry | calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars |
1,209 | from __future__ import print_function
import torch
import pickle
import numpy as np
import math
import cv2
from PIL import Image, JpegImagePlugin
from scipy import ndimage
import hashlib
import sys, os
from zipfile import ZipFile
from .imgproc import loadImage
class BeamEntry:
"information about one single beam at specific time-step"
def __init__(self):
self.prTotal = 0 # blank and non-blank
self.prNonBlank = 0 # non-blank
self.prBlank = 0 # blank
self.prText = 1 # LM score
self.lmApplied = False # flag if LM was already applied to this beam
self.labeling = () # beam-labeling
self.simplified = True # To run simplyfiy label
class BeamState:
"information about the beams at specific time-step"
def __init__(self):
self.entries = {}
def norm(self):
"length-normalise LM score"
for (k, _) in self.entries.items():
labelingLen = len(self.entries[k].labeling)
self.entries[k].prText = self.entries[k].prText ** (1.0 / (labelingLen if labelingLen else 1.0))
def sort(self):
"return beam-labelings, sorted by probability"
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
return [x.labeling for x in sortedBeams]
def wordsearch(self, classes, ignore_idx, maxCandidate, dict_list):
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
if len(sortedBeams) > maxCandidate: sortedBeams = sortedBeams[:maxCandidate]
for j, candidate in enumerate(sortedBeams):
idx_list = candidate.labeling
text = ''
for i,l in enumerate(idx_list):
if l not in ignore_idx and (not (i > 0 and idx_list[i - 1] == idx_list[i])):
text += classes[l]
if j == 0: best_text = text
if text in dict_list:
#print('found text: ', text)
best_text = text
break
else:
pass
#print('not in dict: ', text)
return best_text
def simplify_label(labeling, blankIdx = 0):
labeling = np.array(labeling)
# collapse blank
idx = np.where(~((np.roll(labeling,1) == labeling) & (labeling == blankIdx)))[0]
labeling = labeling[idx]
# get rid of blank between different characters
idx = np.where( ~((np.roll(labeling,1) != np.roll(labeling,-1)) & (labeling == blankIdx)) )[0]
if len(labeling) > 0:
last_idx = len(labeling)-1
if last_idx not in idx: idx = np.append(idx, [last_idx])
labeling = labeling[idx]
return tuple(labeling)
def fast_simplify_label(labeling, c, blankIdx=0):
# Adding BlankIDX after Non-Blank IDX
if labeling and c == blankIdx and labeling[-1] != blankIdx:
newLabeling = labeling + (c,)
# Case when a nonBlankChar is added after BlankChar |len(char) - 1
elif labeling and c != blankIdx and labeling[-1] == blankIdx:
# If Blank between same character do nothing | As done by Simplify label
if labeling[-2] == c:
newLabeling = labeling + (c,)
# if blank between different character, remove it | As done by Simplify Label
else:
newLabeling = labeling[:-1] + (c,)
# if consecutive blanks : Keep the original label
elif labeling and c == blankIdx and labeling[-1] == blankIdx:
newLabeling = labeling
# if empty beam & first index is blank
elif not labeling and c == blankIdx:
newLabeling = labeling
# if empty beam & first index is non-blank
elif not labeling and c != blankIdx:
newLabeling = labeling + (c,)
elif labeling and c != blankIdx:
newLabeling = labeling + (c,)
# Cases that might still require simplyfying
else:
newLabeling = labeling + (c,)
newLabeling = simplify_label(newLabeling, blankIdx)
return newLabeling
def addBeam(beamState, labeling):
"add beam if it does not yet exist"
if labeling not in beamState.entries:
beamState.entries[labeling] = BeamEntry()
def ctcBeamSearch(mat, classes, ignore_idx, lm, beamWidth=25, dict_list = []):
blankIdx = 0
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
prev_labeling = labeling
if not last.entries[labeling].simplified:
labeling = simplify_label(labeling, blankIdx)
# labeling = simplify_label(labeling, blankIdx)
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[prev_labeling].prText
# beam-labeling not changed, therefore also LM score unchanged from
#curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
# char_highscore = np.argpartition(mat[t, :], -5)[-5:] # run through 5 highest probability
char_highscore = np.where(mat[t, :] >= 0.5/maxC)[0] # run through all probable characters
for c in char_highscore:
#for c in range(maxC - 1):
# add new char to current beam-labeling
# newLabeling = labeling + (c,)
# newLabeling = simplify_label(newLabeling, blankIdx)
newLabeling = fast_simplify_label(labeling, c, blankIdx)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[prev_labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[prev_labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
#applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
if dict_list == []:
bestLabeling = last.sort()[0] # get most probable labeling
res = ''
for i,l in enumerate(bestLabeling):
# removing repeated characters and blank.
if l not in ignore_idx and (not (i > 0 and bestLabeling[i - 1] == bestLabeling[i])):
res += classes[l]
else:
res = last.wordsearch(classes, ignore_idx, 20, dict_list)
return res | null |
1,210 | from __future__ import print_function
import torch
import pickle
import numpy as np
import math
import cv2
from PIL import Image, JpegImagePlugin
from scipy import ndimage
import hashlib
import sys, os
from zipfile import ZipFile
from .imgproc import loadImage
def merge_to_free(merge_result, free_list):
merge_result_buf, mr_buf = [], []
if not free_list:
return merge_result
free_list_buf = merge_result[-len(free_list):]
merge_result = merge_result[:-len(free_list)]
for idx, r in enumerate(merge_result):
if idx == len(merge_result)-1:
mr_buf.append(r)
merge_result_buf.append(mr_buf)
mr_buf=[]
continue
if (mr_buf == []) or (mr_buf[-1][0] < r[0]):
mr_buf.append(r)
else:
merge_result_buf.append(mr_buf)
mr_buf=[]
mr_buf.append(r)
for free_pos in free_list_buf:
y_pos = len(merge_result_buf)
x_pos = len(merge_result_buf[y_pos-1])
for i, result_pos in enumerate(merge_result_buf[1:]):
if free_pos[0][0][1] < result_pos[0][0][0][1]:
y_pos = i
break
for i, result_pos in enumerate(merge_result_buf[y_pos]):
if free_pos[0][0][0] < result_pos[0][0][0]:
x_pos = i
break
merge_result_buf[y_pos].insert(x_pos, free_pos)
merge_result = []
[merge_result.extend(r) for r in merge_result_buf]
return merge_result | null |
1,211 | from __future__ import print_function
import torch
import pickle
import numpy as np
import math
import cv2
from PIL import Image, JpegImagePlugin
from scipy import ndimage
import hashlib
import sys, os
from zipfile import ZipFile
from .imgproc import loadImage
def group_text_box(polys, slope_ths = 0.1, ycenter_ths = 0.5, height_ths = 0.5, width_ths = 1.0, add_margin = 0.05, sort_output = True):
# poly top-left, top-right, low-right, low-left
horizontal_list, free_list,combined_list, merged_list = [],[],[],[]
for poly in polys:
slope_up = (poly[3]-poly[1])/np.maximum(10, (poly[2]-poly[0]))
slope_down = (poly[5]-poly[7])/np.maximum(10, (poly[4]-poly[6]))
if max(abs(slope_up), abs(slope_down)) < slope_ths:
x_max = max([poly[0],poly[2],poly[4],poly[6]])
x_min = min([poly[0],poly[2],poly[4],poly[6]])
y_max = max([poly[1],poly[3],poly[5],poly[7]])
y_min = min([poly[1],poly[3],poly[5],poly[7]])
horizontal_list.append([x_min, x_max, y_min, y_max, 0.5*(y_min+y_max), y_max-y_min])
else:
height = np.linalg.norm([poly[6]-poly[0],poly[7]-poly[1]])
width = np.linalg.norm([poly[2]-poly[0],poly[3]-poly[1]])
margin = int(1.44*add_margin*min(width, height))
theta13 = abs(np.arctan( (poly[1]-poly[5])/np.maximum(10, (poly[0]-poly[4]))))
theta24 = abs(np.arctan( (poly[3]-poly[7])/np.maximum(10, (poly[2]-poly[6]))))
# do I need to clip minimum, maximum value here?
x1 = poly[0] - np.cos(theta13)*margin
y1 = poly[1] - np.sin(theta13)*margin
x2 = poly[2] + np.cos(theta24)*margin
y2 = poly[3] - np.sin(theta24)*margin
x3 = poly[4] + np.cos(theta13)*margin
y3 = poly[5] + np.sin(theta13)*margin
x4 = poly[6] - np.cos(theta24)*margin
y4 = poly[7] + np.sin(theta24)*margin
free_list.append([[x1,y1],[x2,y2],[x3,y3],[x4,y4]])
if sort_output:
horizontal_list = sorted(horizontal_list, key=lambda item: item[4])
# combine box
new_box = []
for poly in horizontal_list:
if len(new_box) == 0:
b_height = [poly[5]]
b_ycenter = [poly[4]]
new_box.append(poly)
else:
# comparable height and comparable y_center level up to ths*height
if abs(np.mean(b_ycenter) - poly[4]) < ycenter_ths*np.mean(b_height):
b_height.append(poly[5])
b_ycenter.append(poly[4])
new_box.append(poly)
else:
b_height = [poly[5]]
b_ycenter = [poly[4]]
combined_list.append(new_box)
new_box = [poly]
combined_list.append(new_box)
# merge list use sort again
for boxes in combined_list:
if len(boxes) == 1: # one box per line
box = boxes[0]
margin = int(add_margin*min(box[1]-box[0],box[5]))
merged_list.append([box[0]-margin,box[1]+margin,box[2]-margin,box[3]+margin])
else: # multiple boxes per line
boxes = sorted(boxes, key=lambda item: item[0])
merged_box, new_box = [],[]
for box in boxes:
if len(new_box) == 0:
b_height = [box[5]]
x_max = box[1]
new_box.append(box)
else:
if (abs(np.mean(b_height) - box[5]) < height_ths*np.mean(b_height)) and ((box[0]-x_max) < width_ths *(box[3]-box[2])): # merge boxes
b_height.append(box[5])
x_max = box[1]
new_box.append(box)
else:
b_height = [box[5]]
x_max = box[1]
merged_box.append(new_box)
new_box = [box]
if len(new_box) >0: merged_box.append(new_box)
for mbox in merged_box:
if len(mbox) != 1: # adjacent box in same line
# do I need to add margin here?
x_min = min(mbox, key=lambda x: x[0])[0]
x_max = max(mbox, key=lambda x: x[1])[1]
y_min = min(mbox, key=lambda x: x[2])[2]
y_max = max(mbox, key=lambda x: x[3])[3]
box_width = x_max - x_min
box_height = y_max - y_min
margin = int(add_margin * (min(box_width, box_height)))
merged_list.append([x_min-margin, x_max+margin, y_min-margin, y_max+margin])
else: # non adjacent box in same line
box = mbox[0]
box_width = box[1] - box[0]
box_height = box[3] - box[2]
margin = int(add_margin * (min(box_width, box_height)))
merged_list.append([box[0]-margin,box[1]+margin,box[2]-margin,box[3]+margin])
# may need to check if box is really in image
return merged_list, free_list | null |
1,212 | from __future__ import print_function
import torch
import pickle
import numpy as np
import math
import cv2
from PIL import Image, JpegImagePlugin
from scipy import ndimage
import hashlib
import sys, os
from zipfile import ZipFile
from .imgproc import loadImage
def four_point_transform(image, rect):
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([[0, 0],[maxWidth - 1, 0],[maxWidth - 1, maxHeight - 1],[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def calculate_ratio(width,height):
'''
Calculate aspect ratio for normal use case (w>h) and vertical text (h>w)
'''
ratio = width/height
if ratio<1.0:
ratio = 1./ratio
return ratio
def compute_ratio_and_resize(img,width,height,model_height):
'''
Calculate ratio and resize correctly for both horizontal text
and vertical case
'''
ratio = width/height
if ratio<1.0:
ratio = calculate_ratio(width,height)
img = cv2.resize(img,(model_height,int(model_height*ratio)), interpolation=Image.Resampling.LANCZOS)
else:
img = cv2.resize(img,(int(model_height*ratio),model_height),interpolation=Image.Resampling.LANCZOS)
return img,ratio
def get_image_list(horizontal_list, free_list, img, model_height = 64, sort_output = True):
image_list = []
maximum_y,maximum_x = img.shape
max_ratio_hori, max_ratio_free = 1,1
for box in free_list:
rect = np.array(box, dtype = "float32")
transformed_img = four_point_transform(img, rect)
ratio = calculate_ratio(transformed_img.shape[1],transformed_img.shape[0])
new_width = int(model_height*ratio)
if new_width == 0:
pass
else:
crop_img,ratio = compute_ratio_and_resize(transformed_img,transformed_img.shape[1],transformed_img.shape[0],model_height)
image_list.append( (box,crop_img) ) # box = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]]
max_ratio_free = max(ratio, max_ratio_free)
max_ratio_free = math.ceil(max_ratio_free)
for box in horizontal_list:
x_min = max(0,box[0])
x_max = min(box[1],maximum_x)
y_min = max(0,box[2])
y_max = min(box[3],maximum_y)
crop_img = img[y_min : y_max, x_min:x_max]
width = x_max - x_min
height = y_max - y_min
ratio = calculate_ratio(width,height)
new_width = int(model_height*ratio)
if new_width == 0:
pass
else:
crop_img,ratio = compute_ratio_and_resize(crop_img,width,height,model_height)
image_list.append( ( [[x_min,y_min],[x_max,y_min],[x_max,y_max],[x_min,y_max]] ,crop_img) )
max_ratio_hori = max(ratio, max_ratio_hori)
max_ratio_hori = math.ceil(max_ratio_hori)
max_ratio = max(max_ratio_hori, max_ratio_free)
max_width = math.ceil(max_ratio)*model_height
if sort_output:
image_list = sorted(image_list, key=lambda item: item[0][0][1]) # sort by vertical position
return image_list, max_width | null |
Subsets and Splits