language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def write_to_csv(keyword, tweets):
'''
Save tweets in a csv file to load later.
'''
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in tweets]
# write the csv
file_name = '/app/data/{}_{}.csv'.format(
keyword, datetime.strftime(datetime.now(), '%d_%m_%Y_%H:%M:%S'))
with open(file_name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "text"])
writer.writerows(outtweets) | def write_to_csv(keyword, tweets):
'''
Save tweets in a csv file to load later.
'''
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in tweets]
# write the csv
file_name = '/app/data/{}_{}.csv'.format(
keyword, datetime.strftime(datetime.now(), '%d_%m_%Y_%H:%M:%S'))
with open(file_name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "text"])
writer.writerows(outtweets) |
Python | def write_to_json(keyword, tweets):
'''
Save tweets in a json file to load later.
'''
outtweets = [{tweet.id_str: {'created_at': str(tweet.created_at), 'place': str(tweet.place),
'geo': str(tweet.geo), 'text': tweet.text.encode("utf-8")}} for tweet in tweets]
file_name = '/app/data/{}_{}.json'.format(
keyword, datetime.strftime(datetime.now(), '%d_%m_%Y_%H:%M:%S'))
# write the json
with open(file_name, 'wb') as f:
json.dump(outtweets, f) | def write_to_json(keyword, tweets):
'''
Save tweets in a json file to load later.
'''
outtweets = [{tweet.id_str: {'created_at': str(tweet.created_at), 'place': str(tweet.place),
'geo': str(tweet.geo), 'text': tweet.text.encode("utf-8")}} for tweet in tweets]
file_name = '/app/data/{}_{}.json'.format(
keyword, datetime.strftime(datetime.now(), '%d_%m_%Y_%H:%M:%S'))
# write the json
with open(file_name, 'wb') as f:
json.dump(outtweets, f) |
Python | def find_stats(inputFile, THRESHOLD_TWO_QUARTETS=0.1, THRESHOLD_THREE_QUARTETS=0.15):
with open(inputFile) as fileobject:
for line in fileobject:
append_to_dictionary(line)
keys = list(dictionary_quartets.keys())
four_tax_seq = keys[0:]
list_names = four_tax_seq
# list_four_tax_sequence = [(1,1,2,3,9),(1,1,2,4)]
# x = [1,2]
x = np.arange(len(four_tax_seq))
list_four_tax_sequence = []
features = 7
'''Does this sequence contain only 1 type of quartet ? [Binary]
Does this sequence contain only 2 types of quartets ? [Binary]
Does this sequence contain all 3 types of quartets ? [Binary]
Does this sequence have 2 top quartets weights <= 10%? [Binary]
Does this sequence have 3 quartets weights <= 15% ? [Binary]
'''
results_table = np.zeros((len(four_tax_seq),features))
idx_key = 0
for key in four_tax_seq:
list_four_tax_sequence.append([])
vals = dictionary_quartets[key]
for val in vals:
(_1, _2, _3, _4, w) = val
list_four_tax_sequence[idx_key].append(float(w))
list_four_tax_sequence[idx_key].sort(reverse=True)
#print(len(list_four_tax_sequence[idx_key]))
if len(list_four_tax_sequence[idx_key])==1:
results_table[idx_key][0] = 1
if len(list_four_tax_sequence[idx_key])==2:
results_table[idx_key][1] = 1
if len(list_four_tax_sequence[idx_key])==3:
results_table[idx_key][2] = 1
if ( len(list_four_tax_sequence[idx_key])==2 or len(list_four_tax_sequence[idx_key])==3 ) and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_TWO_QUARTETS):
results_table[idx_key][3] = 1 # AND condition so, we get Pr(A and B)
if len(list_four_tax_sequence[idx_key])==3 and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_THREE_QUARTETS):
results_table[idx_key][4] = 1
if len(list_four_tax_sequence[idx_key])==3 and (2*list_four_tax_sequence[idx_key][0] - list_four_tax_sequence[idx_key][1] - list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][5] = 1
if len(list_four_tax_sequence[idx_key])==3 and (3*list_four_tax_sequence[idx_key][0] - 2*list_four_tax_sequence[idx_key][1] - 2*list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][6] = 1
#print(list_four_tax_sequence[idx_key])
#print(results_table[idx_key])
idx_key += 1
#print(len(list_four_tax_sequence))
#print(list_four_tax_sequence)
#print(results_table)
#print("Overall statistics")
sums = np.sum(results_table,axis=0)
F1 = (sums[0]/len(list_four_tax_sequence))
F2 = (sums[1]/len(list_four_tax_sequence))
F3 = (sums[2]/len(list_four_tax_sequence))
F4 = (sums[3]/(len(list_four_tax_sequence)-sums[0])) # Pr(A|B) = Pr(A and B) / Pr(B)
F5 = (sums[4]/sums[2])
F6 = (sums[5]/sums[2])
F7 = (sums[6]/sums[2])
return F1,F2,F3,F4,F5,F6,F7 | def find_stats(inputFile, THRESHOLD_TWO_QUARTETS=0.1, THRESHOLD_THREE_QUARTETS=0.15):
with open(inputFile) as fileobject:
for line in fileobject:
append_to_dictionary(line)
keys = list(dictionary_quartets.keys())
four_tax_seq = keys[0:]
list_names = four_tax_seq
# list_four_tax_sequence = [(1,1,2,3,9),(1,1,2,4)]
# x = [1,2]
x = np.arange(len(four_tax_seq))
list_four_tax_sequence = []
features = 7
'''Does this sequence contain only 1 type of quartet ? [Binary]
Does this sequence contain only 2 types of quartets ? [Binary]
Does this sequence contain all 3 types of quartets ? [Binary]
Does this sequence have 2 top quartets weights <= 10%? [Binary]
Does this sequence have 3 quartets weights <= 15% ? [Binary]
'''
results_table = np.zeros((len(four_tax_seq),features))
idx_key = 0
for key in four_tax_seq:
list_four_tax_sequence.append([])
vals = dictionary_quartets[key]
for val in vals:
(_1, _2, _3, _4, w) = val
list_four_tax_sequence[idx_key].append(float(w))
list_four_tax_sequence[idx_key].sort(reverse=True)
#print(len(list_four_tax_sequence[idx_key]))
if len(list_four_tax_sequence[idx_key])==1:
results_table[idx_key][0] = 1
if len(list_four_tax_sequence[idx_key])==2:
results_table[idx_key][1] = 1
if len(list_four_tax_sequence[idx_key])==3:
results_table[idx_key][2] = 1
if ( len(list_four_tax_sequence[idx_key])==2 or len(list_four_tax_sequence[idx_key])==3 ) and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_TWO_QUARTETS):
results_table[idx_key][3] = 1 # AND condition so, we get Pr(A and B)
if len(list_four_tax_sequence[idx_key])==3 and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_THREE_QUARTETS):
results_table[idx_key][4] = 1
if len(list_four_tax_sequence[idx_key])==3 and (2*list_four_tax_sequence[idx_key][0] - list_four_tax_sequence[idx_key][1] - list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][5] = 1
if len(list_four_tax_sequence[idx_key])==3 and (3*list_four_tax_sequence[idx_key][0] - 2*list_four_tax_sequence[idx_key][1] - 2*list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][6] = 1
#print(list_four_tax_sequence[idx_key])
#print(results_table[idx_key])
idx_key += 1
#print(len(list_four_tax_sequence))
#print(list_four_tax_sequence)
#print(results_table)
#print("Overall statistics")
sums = np.sum(results_table,axis=0)
F1 = (sums[0]/len(list_four_tax_sequence))
F2 = (sums[1]/len(list_four_tax_sequence))
F3 = (sums[2]/len(list_four_tax_sequence))
F4 = (sums[3]/(len(list_four_tax_sequence)-sums[0])) # Pr(A|B) = Pr(A and B) / Pr(B)
F5 = (sums[4]/sums[2])
F6 = (sums[5]/sums[2])
F7 = (sums[6]/sums[2])
return F1,F2,F3,F4,F5,F6,F7 |
Python | def discover_taxa(self,
treefile,
schema,
preserve_underscores):
"""
Reads first tree in treefile, and assumes that is sufficient to populate a
taxon set object fully, which it then returns.
"""
for tree in dendropy.Tree.yield_from_files([treefile],
schema=schema,
preserve_underscores=preserve_underscores,
ignore_unrecognized_keyword_arguments=True,
):
return tree.taxon_namespace | def discover_taxa(self,
treefile,
schema,
preserve_underscores):
"""
Reads first tree in treefile, and assumes that is sufficient to populate a
taxon set object fully, which it then returns.
"""
for tree in dendropy.Tree.yield_from_files([treefile],
schema=schema,
preserve_underscores=preserve_underscores,
ignore_unrecognized_keyword_arguments=True,
):
return tree.taxon_namespace |
Python | def find_stats(inputFile, THRESHOLD_TWO_QUARTETS=0.1, THRESHOLD_THREE_QUARTETS=0.15):
with open(inputFile) as fileobject:
for line in fileobject:
append_to_dictionary(line)
keys = list(dictionary_quartets.keys())
four_tax_seq = keys[0:]
list_names = four_tax_seq
# list_four_tax_sequence = [(1,1,2,3,9),(1,1,2,4)]
# x = [1,2]
x = np.arange(len(four_tax_seq))
list_four_tax_sequence = []
features = 8
'''Does this sequence contain only 1 type of quartet ? [Binary]
Does this sequence contain only 2 types of quartets ? [Binary]
Does this sequence contain all 3 types of quartets ? [Binary]
Does this sequence have 2 top quartets weights <= 10%? [Binary]
Does this sequence have 3 quartets weights <= 15% ? [Binary]
'''
results_table = np.zeros((len(four_tax_seq),features))
idx_key = 0
ratios = []
for key in four_tax_seq:
list_four_tax_sequence.append([])
vals = dictionary_quartets[key]
for val in vals:
(_1, _2, _3, _4, w) = val
list_four_tax_sequence[idx_key].append(float(w))
list_four_tax_sequence[idx_key].sort(reverse=True)
#print(len(list_four_tax_sequence[idx_key]))
if len(list_four_tax_sequence[idx_key])==1:
results_table[idx_key][0] = 1
if len(list_four_tax_sequence[idx_key])==2:
results_table[idx_key][1] = 1
if len(list_four_tax_sequence[idx_key])==3:
results_table[idx_key][2] = 1
if ( len(list_four_tax_sequence[idx_key])==2 or len(list_four_tax_sequence[idx_key])==3 ) and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_TWO_QUARTETS):
results_table[idx_key][3] = 1 # AND condition so, we get Pr(A and B)
if len(list_four_tax_sequence[idx_key])==3 and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_THREE_QUARTETS):
results_table[idx_key][4] = 1
if len(list_four_tax_sequence[idx_key])==3 and (2*list_four_tax_sequence[idx_key][0] - list_four_tax_sequence[idx_key][1] - list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][5] = 1
if len(list_four_tax_sequence[idx_key])==3 and (3*list_four_tax_sequence[idx_key][0] - 2*list_four_tax_sequence[idx_key][1] - 2*list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][6] = 1
if len(list_four_tax_sequence[idx_key])==3:
ratios.append( (list_four_tax_sequence[idx_key][0]
/ (list_four_tax_sequence[idx_key][1] +list_four_tax_sequence[idx_key][2])))
#print(list_four_tax_sequence[idx_key])
#print(results_table[idx_key])
idx_key += 1
#print(len(list_four_tax_sequence))
#print(list_four_tax_sequence)
#print("Overall statistics")
mean = np.mean(ratios)
print(mean)
#unique_elements, counts_elements = np.unique(results_table[:,7], return_counts=True)
#print("Frequency of unique values of the said array:")
#print(np.asarray((unique_elements, counts_elements)))
sums = np.sum(results_table,axis=0)
F1 = (sums[0]/len(list_four_tax_sequence))
F2 = (sums[1]/len(list_four_tax_sequence))
F3 = (sums[2]/len(list_four_tax_sequence))
F4 = (sums[3]/(len(list_four_tax_sequence)-sums[0])) # Pr(A|B) = Pr(A and B) / Pr(B)
F5 = (sums[4]/sums[2])
F6 = (sums[5]/sums[2])
F7 = (sums[6]/sums[2])
return F1,F2,F3,F4,F5,F6,F7,mean | def find_stats(inputFile, THRESHOLD_TWO_QUARTETS=0.1, THRESHOLD_THREE_QUARTETS=0.15):
with open(inputFile) as fileobject:
for line in fileobject:
append_to_dictionary(line)
keys = list(dictionary_quartets.keys())
four_tax_seq = keys[0:]
list_names = four_tax_seq
# list_four_tax_sequence = [(1,1,2,3,9),(1,1,2,4)]
# x = [1,2]
x = np.arange(len(four_tax_seq))
list_four_tax_sequence = []
features = 8
'''Does this sequence contain only 1 type of quartet ? [Binary]
Does this sequence contain only 2 types of quartets ? [Binary]
Does this sequence contain all 3 types of quartets ? [Binary]
Does this sequence have 2 top quartets weights <= 10%? [Binary]
Does this sequence have 3 quartets weights <= 15% ? [Binary]
'''
results_table = np.zeros((len(four_tax_seq),features))
idx_key = 0
ratios = []
for key in four_tax_seq:
list_four_tax_sequence.append([])
vals = dictionary_quartets[key]
for val in vals:
(_1, _2, _3, _4, w) = val
list_four_tax_sequence[idx_key].append(float(w))
list_four_tax_sequence[idx_key].sort(reverse=True)
#print(len(list_four_tax_sequence[idx_key]))
if len(list_four_tax_sequence[idx_key])==1:
results_table[idx_key][0] = 1
if len(list_four_tax_sequence[idx_key])==2:
results_table[idx_key][1] = 1
if len(list_four_tax_sequence[idx_key])==3:
results_table[idx_key][2] = 1
if ( len(list_four_tax_sequence[idx_key])==2 or len(list_four_tax_sequence[idx_key])==3 ) and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_TWO_QUARTETS):
results_table[idx_key][3] = 1 # AND condition so, we get Pr(A and B)
if len(list_four_tax_sequence[idx_key])==3 and is_within_range(list_four_tax_sequence[idx_key][0],list_four_tax_sequence[idx_key][1], THRESHOLD_THREE_QUARTETS):
results_table[idx_key][4] = 1
if len(list_four_tax_sequence[idx_key])==3 and (2*list_four_tax_sequence[idx_key][0] - list_four_tax_sequence[idx_key][1] - list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][5] = 1
if len(list_four_tax_sequence[idx_key])==3 and (3*list_four_tax_sequence[idx_key][0] - 2*list_four_tax_sequence[idx_key][1] - 2*list_four_tax_sequence[idx_key][2])>0:
results_table[idx_key][6] = 1
if len(list_four_tax_sequence[idx_key])==3:
ratios.append( (list_four_tax_sequence[idx_key][0]
/ (list_four_tax_sequence[idx_key][1] +list_four_tax_sequence[idx_key][2])))
#print(list_four_tax_sequence[idx_key])
#print(results_table[idx_key])
idx_key += 1
#print(len(list_four_tax_sequence))
#print(list_four_tax_sequence)
#print("Overall statistics")
mean = np.mean(ratios)
print(mean)
#unique_elements, counts_elements = np.unique(results_table[:,7], return_counts=True)
#print("Frequency of unique values of the said array:")
#print(np.asarray((unique_elements, counts_elements)))
sums = np.sum(results_table,axis=0)
F1 = (sums[0]/len(list_four_tax_sequence))
F2 = (sums[1]/len(list_four_tax_sequence))
F3 = (sums[2]/len(list_four_tax_sequence))
F4 = (sums[3]/(len(list_four_tax_sequence)-sums[0])) # Pr(A|B) = Pr(A and B) / Pr(B)
F5 = (sums[4]/sums[2])
F6 = (sums[5]/sums[2])
F7 = (sums[6]/sums[2])
return F1,F2,F3,F4,F5,F6,F7,mean |
Python | def cudnnCheckStatus(status):
"""
Raise cuDNN exception
Raise an exception corresponding to the specified cuDNN error code.
Parameters
----------
status : int
cuDNN error code
"""
if status != 0:
try:
raise cudnnExceptions[status]
except KeyError:
raise cudnnError | def cudnnCheckStatus(status):
"""
Raise cuDNN exception
Raise an exception corresponding to the specified cuDNN error code.
Parameters
----------
status : int
cuDNN error code
"""
if status != 0:
try:
raise cudnnExceptions[status]
except KeyError:
raise cudnnError |
Python | def cudnnTransformTensor(handle, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Tensor layout conversion helper (dest = alpha * src + beta * dest).
This function copies the scaled data from one tensor to another tensor with a different
layout. Those descriptors need to have the same dimensions but not necessarily the
same strides. The input and output tensors must not overlap in any way (i.e., tensors
cannot be transformed in place). This function can be used to convert a tensor with an
unsupported format to a supported one.
Parameters
----------
handle : cudnnHandle
cuDNN context.
alpha : float
Scalar factor to be applied to every element of the input tensor before it is added
to the output tensor.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
beta: float
Scaling factor which is applied on every element of the output tensor prior to adding
the result of the operation. Note that if beta is zero, the output is not read and can
contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
destData : void_p
Pointer to data of the tensor described by destDesc descriptor.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(destDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnTransformTensor(handle, alphaRef, srcDesc,
srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) | def cudnnTransformTensor(handle, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Tensor layout conversion helper (dest = alpha * src + beta * dest).
This function copies the scaled data from one tensor to another tensor with a different
layout. Those descriptors need to have the same dimensions but not necessarily the
same strides. The input and output tensors must not overlap in any way (i.e., tensors
cannot be transformed in place). This function can be used to convert a tensor with an
unsupported format to a supported one.
Parameters
----------
handle : cudnnHandle
cuDNN context.
alpha : float
Scalar factor to be applied to every element of the input tensor before it is added
to the output tensor.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
beta: float
Scaling factor which is applied on every element of the output tensor prior to adding
the result of the operation. Note that if beta is zero, the output is not read and can
contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
destData : void_p
Pointer to data of the tensor described by destDesc descriptor.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(destDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnTransformTensor(handle, alphaRef, srcDesc,
srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) |
Python | def cudnnAddTensor(handle, mode, alpha, biasDesc, biasData, beta, srcDestDesc, srcDestData):
""""
Tensor Bias addition : srcDest = alpha * bias + beta * srcDestDesc.
This function adds the scaled values of one tensor to another tensor. The mode parameter
can be used to select different ways of performing the scaled addition. The amount
of data described by the biasDesc descriptor must match exactly the amount of data
needed to perform the addition.
Parameters
----------
handle : cudnnHandle
Handle to a cuDNN context.
mode : cudnnAddMode
Addition mode that describes how the addition is performed
alpha : float
Scalar factor to be applied to every data element of the bias tensor before it is added
to the output tensor.
biasDesc : cudnnTensorDescriptor
Handle to a previoulsy initialized tensor descriptor.
biasData : void_p
Pointer to data of the tensor described by biasDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior to adding
the result of the operation. Note that if beta is zero, the output is not read and can
contain any uninitialized data (including Nan numbers).
srcDestDesc : cudnnTensorDescriptor
Handle to a previoulsy initialized tensor descriptor.
srcDestData : void_p
Pointer to data of the tensor described by srcDestDesc.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDestDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnAddTensor(handle, mode, alphaRef, biasDesc,
biasData, betaRef,
srcDestDesc, srcDestData)
cudnnCheckStatus(status) | def cudnnAddTensor(handle, mode, alpha, biasDesc, biasData, beta, srcDestDesc, srcDestData):
""""
Tensor Bias addition : srcDest = alpha * bias + beta * srcDestDesc.
This function adds the scaled values of one tensor to another tensor. The mode parameter
can be used to select different ways of performing the scaled addition. The amount
of data described by the biasDesc descriptor must match exactly the amount of data
needed to perform the addition.
Parameters
----------
handle : cudnnHandle
Handle to a cuDNN context.
mode : cudnnAddMode
Addition mode that describes how the addition is performed
alpha : float
Scalar factor to be applied to every data element of the bias tensor before it is added
to the output tensor.
biasDesc : cudnnTensorDescriptor
Handle to a previoulsy initialized tensor descriptor.
biasData : void_p
Pointer to data of the tensor described by biasDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior to adding
the result of the operation. Note that if beta is zero, the output is not read and can
contain any uninitialized data (including Nan numbers).
srcDestDesc : cudnnTensorDescriptor
Handle to a previoulsy initialized tensor descriptor.
srcDestData : void_p
Pointer to data of the tensor described by srcDestDesc.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDestDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnAddTensor(handle, mode, alphaRef, biasDesc,
biasData, betaRef,
srcDestDesc, srcDestData)
cudnnCheckStatus(status) |
Python | def cudnnSetTensor(handle, srcDesc, srcData, value):
""""
Set all data points of a tensor to a given value : srcDest = alpha.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
value : float
Value that all elements of the tensor will be set to.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
status = _libcudnn.cudnnSetTensor(handle, srcDesc, srcData, alphaRef)
cudnnCheckStatus(status) | def cudnnSetTensor(handle, srcDesc, srcData, value):
""""
Set all data points of a tensor to a given value : srcDest = alpha.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
value : float
Value that all elements of the tensor will be set to.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
status = _libcudnn.cudnnSetTensor(handle, srcDesc, srcData, alphaRef)
cudnnCheckStatus(status) |
Python | def cudnnScaleTensor(handle, srcDesc, srcData, alpha):
""""
This function scales all the elements of a tensor by a give factor.
Set all data points of a tensor to scaled value : srcDest = alpha * srcDest.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
alpha : float
Value that all elements of the tensor will be scaled with.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
status = _libcudnn.cudnnScaleTensor(handle, srcDesc, srcData, alphaRef)
cudnnCheckStatus(status) | def cudnnScaleTensor(handle, srcDesc, srcData, alpha):
""""
This function scales all the elements of a tensor by a give factor.
Set all data points of a tensor to scaled value : srcDest = alpha * srcDest.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Pointer to data of the tensor described by srcDesc descriptor.
alpha : float
Value that all elements of the tensor will be scaled with.
"""
dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc)
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
status = _libcudnn.cudnnScaleTensor(handle, srcDesc, srcData, alphaRef)
cudnnCheckStatus(status) |
Python | def cudnnCreateFilterDescriptor():
""""
Create a filter descriptor.
This function creates a filter descriptor object by allocating the memory needed
to hold its opaque structure.
Parameters
----------
Returns
-------
filterDesc : cudnnFilterDescriptor
Handle to a newly allocated filter descriptor.
"""
filterDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateFilterDescriptor(ctypes.byref(filterDesc))
cudnnCheckStatus(status)
return filterDesc.value | def cudnnCreateFilterDescriptor():
""""
Create a filter descriptor.
This function creates a filter descriptor object by allocating the memory needed
to hold its opaque structure.
Parameters
----------
Returns
-------
filterDesc : cudnnFilterDescriptor
Handle to a newly allocated filter descriptor.
"""
filterDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateFilterDescriptor(ctypes.byref(filterDesc))
cudnnCheckStatus(status)
return filterDesc.value |
Python | def cudnnSetFilter4dDescriptor(filterDesc, dataType, k, c, h, w):
""""
Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
filterDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
"""
status = _libcudnn.cudnnSetFilter4dDescriptor(filterDesc, dataType, k, c, h, w)
cudnnCheckStatus(status) | def cudnnSetFilter4dDescriptor(filterDesc, dataType, k, c, h, w):
""""
Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
filterDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
"""
status = _libcudnn.cudnnSetFilter4dDescriptor(filterDesc, dataType, k, c, h, w)
cudnnCheckStatus(status) |
Python | def cudnnGetFilter4dDescriptor(filterDesc):
""""
Get parameters of filter descriptor.
This function queries the parameters of the previouly initialized filter descriptor object.
Parameters
----------
filterDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
Returns
-------
dataType : cudnnDataType
Data type.
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
"""
dataType = ctypes.c_int()
k = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetFilter4dDescriptor(filterDesc, ctypes.byref(dataType),
ctypes.byref(k), ctypes.byref(c),
ctypes.byref(h), ctypes.byref(w))
cudnnCheckStatus(status)
return dataType.value, k.value, c.value, h.value, w.value | def cudnnGetFilter4dDescriptor(filterDesc):
""""
Get parameters of filter descriptor.
This function queries the parameters of the previouly initialized filter descriptor object.
Parameters
----------
filterDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
Returns
-------
dataType : cudnnDataType
Data type.
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
"""
dataType = ctypes.c_int()
k = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetFilter4dDescriptor(filterDesc, ctypes.byref(dataType),
ctypes.byref(k), ctypes.byref(c),
ctypes.byref(h), ctypes.byref(w))
cudnnCheckStatus(status)
return dataType.value, k.value, c.value, h.value, w.value |
Python | def cudnnDestroyFilterDescriptor(filterDesc):
""""
Destroy filter descriptor.
This function destroys a previously created Tensor4D descriptor object.
Parameters
----------
filterDesc : cudnnFilterDescriptor
"""
status = _libcudnn.cudnnDestroyFilterDescriptor(filterDesc)
cudnnCheckStatus(status) | def cudnnDestroyFilterDescriptor(filterDesc):
""""
Destroy filter descriptor.
This function destroys a previously created Tensor4D descriptor object.
Parameters
----------
filterDesc : cudnnFilterDescriptor
"""
status = _libcudnn.cudnnDestroyFilterDescriptor(filterDesc)
cudnnCheckStatus(status) |
Python | def cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v, upscalex, upscaley, mode):
""""
Initialize a convolution descriptor.
This function initializes a previously created convolution descriptor object into a 2D
correlation. This function assumes that the tensor and filter descriptors corresponds
to the formard convolution path and checks if their settings are valid. That same
convolution descriptor can be reused in the backward path provided it corresponds to
the same layer.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated
onto the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
upscalex : int
Upscale the input in x-direction.
uscaley : int
Upscale the input in y-direction.
mode : cudnnConvolutionMode
Select between CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
"""
status = _libcudnn.cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v,
upscalex, upscaley, mode)
cudnnCheckStatus(status) | def cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v, upscalex, upscaley, mode):
""""
Initialize a convolution descriptor.
This function initializes a previously created convolution descriptor object into a 2D
correlation. This function assumes that the tensor and filter descriptors corresponds
to the formard convolution path and checks if their settings are valid. That same
convolution descriptor can be reused in the backward path provided it corresponds to
the same layer.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated
onto the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
upscalex : int
Upscale the input in x-direction.
uscaley : int
Upscale the input in y-direction.
mode : cudnnConvolutionMode
Select between CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
"""
status = _libcudnn.cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v,
upscalex, upscaley, mode)
cudnnCheckStatus(status) |
Python | def cudnnGetConvolution2dDescriptor(convDesc):
""""
Get a convolution descriptor.
This function queries a previously initialized 2D convolution descriptor object.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
Returns
-------
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated onto
the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
upscalex : int
Upscale the input in x-direction.
upscaley : int
Upscale the input in y-direction.
mode : cudnnConvolutionMode
Either CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
"""
pad_h = ctypes.c_int()
pad_w = ctypes.c_int()
u = ctypes.c_int()
v = ctypes.c_int()
upscalex = ctypes.c_int()
upscaley = ctypes.c_int()
mode = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dDescriptor(convDesc, ctypes.byref(pad_h),
ctypes.byref(pad_w), ctypes.byref(u),
ctypes.byref(v), ctypes.byref(upscalex),
ctypes.byref(upscaley),
ctypes.byref(mode))
cudnnCheckStatus(status)
return pad_h.value, pad_w.value, u.value, v.value, upscalex.value, upscaley.value, mode.value | def cudnnGetConvolution2dDescriptor(convDesc):
""""
Get a convolution descriptor.
This function queries a previously initialized 2D convolution descriptor object.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
Returns
-------
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated onto
the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
upscalex : int
Upscale the input in x-direction.
upscaley : int
Upscale the input in y-direction.
mode : cudnnConvolutionMode
Either CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
"""
pad_h = ctypes.c_int()
pad_w = ctypes.c_int()
u = ctypes.c_int()
v = ctypes.c_int()
upscalex = ctypes.c_int()
upscaley = ctypes.c_int()
mode = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dDescriptor(convDesc, ctypes.byref(pad_h),
ctypes.byref(pad_w), ctypes.byref(u),
ctypes.byref(v), ctypes.byref(upscalex),
ctypes.byref(upscaley),
ctypes.byref(mode))
cudnnCheckStatus(status)
return pad_h.value, pad_w.value, u.value, v.value, upscalex.value, upscaley.value, mode.value |
Python | def cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc, filterDesc):
""""
Return the dimensions of the output tensor given a convolution descriptor.
This function returns the dimensions of the resulting 4D tensor of a 2D
convolution, given the convolution descriptor, the input tensor descriptor and
the filter descriptor. This function can help to setup the output tensor and allocate
the proper amount of memory prior to launching the actual convolution.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
inputTensorDesc: cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
filterDesc: cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
Returns
-------
n : int
Number of output images.
c : int
Number of output feature maps per image.
h : int
Height of each output feature map.
w : int
Width of each output feature map.
"""
n = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc,
filterDesc, ctypes.byref(n),
ctypes.byref(c), ctypes.byref(h),
ctypes.byref(w))
cudnnCheckStatus(status)
return n.value, c.value, h.value, w.value | def cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc, filterDesc):
""""
Return the dimensions of the output tensor given a convolution descriptor.
This function returns the dimensions of the resulting 4D tensor of a 2D
convolution, given the convolution descriptor, the input tensor descriptor and
the filter descriptor. This function can help to setup the output tensor and allocate
the proper amount of memory prior to launching the actual convolution.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
inputTensorDesc: cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
filterDesc: cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
Returns
-------
n : int
Number of output images.
c : int
Number of output feature maps per image.
h : int
Height of each output feature map.
w : int
Width of each output feature map.
"""
n = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc,
filterDesc, ctypes.byref(n),
ctypes.byref(c), ctypes.byref(h),
ctypes.byref(w))
cudnnCheckStatus(status)
return n.value, c.value, h.value, w.value |
Python | def cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, filterDesc,
convDesc, destDesc, preference, memoryLimitInbytes):
""""
This function returns the best algorithm to choose for the forward convolution
depending on the critera expressed in the cudnnConvolutionFwdPreference_t enumerant.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
preference : cudnnConvolutionFwdPreference
Enumerant to express the preference criteria in terms of memory
requirement and speed.
memoryLimitInbytes: size_t
The maximum amount of GPU memory the user is willing to use as a workspace
when preference is CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT.
Returns
-------
algo: cudnnConvolutionFwdAlgo
Enumerant that specifies which convolution algorithm should be used to
compute the results according to the specified preference.
"""
algo = ctypes.c_int()
status = _libcudnn.cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, filterDesc,
convDesc, destDesc, preference,
ctypes.c_size_t(memoryLimitInbytes),
ctypes.byref(algo))
cudnnCheckStatus(status)
return algo | def cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, filterDesc,
convDesc, destDesc, preference, memoryLimitInbytes):
""""
This function returns the best algorithm to choose for the forward convolution
depending on the critera expressed in the cudnnConvolutionFwdPreference_t enumerant.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
preference : cudnnConvolutionFwdPreference
Enumerant to express the preference criteria in terms of memory
requirement and speed.
memoryLimitInbytes: size_t
The maximum amount of GPU memory the user is willing to use as a workspace
when preference is CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT.
Returns
-------
algo: cudnnConvolutionFwdAlgo
Enumerant that specifies which convolution algorithm should be used to
compute the results according to the specified preference.
"""
algo = ctypes.c_int()
status = _libcudnn.cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, filterDesc,
convDesc, destDesc, preference,
ctypes.c_size_t(memoryLimitInbytes),
ctypes.byref(algo))
cudnnCheckStatus(status)
return algo |
Python | def cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, filterDesc,
convDesc, destDesc, algo):
""""
This function returns the amount of GPU memory workspace the user needs
to allocate to be able to call cudnnConvolutionForward with the specified algorithm.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
algo : cudnnConvolutionFwdAlgo
Enumerant that specifies the chosen convolution algorithm.
Returns
-------
sizeInBytes: c_size_t
Amount of GPU memory needed as workspace to be able to execute a
forward convolution with the sepcified algo.
"""
sizeInBytes = ctypes.c_size_t()
status = _libcudnn.cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, filterDesc,
convDesc, destDesc, algo,
ctypes.byref(sizeInBytes))
cudnnCheckStatus(status)
return sizeInBytes | def cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, filterDesc,
convDesc, destDesc, algo):
""""
This function returns the amount of GPU memory workspace the user needs
to allocate to be able to call cudnnConvolutionForward with the specified algorithm.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
algo : cudnnConvolutionFwdAlgo
Enumerant that specifies the chosen convolution algorithm.
Returns
-------
sizeInBytes: c_size_t
Amount of GPU memory needed as workspace to be able to execute a
forward convolution with the sepcified algo.
"""
sizeInBytes = ctypes.c_size_t()
status = _libcudnn.cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, filterDesc,
convDesc, destDesc, algo,
ctypes.byref(sizeInBytes))
cudnnCheckStatus(status)
return sizeInBytes |
Python | def cudnnConvolutionForward(handle, alpha, srcDesc, srcData, filterDesc, filterData,
convDesc, algo, workspace, workSpaceSizeInBytes, beta,
destDesc, destData):
""""
Perform forward convolution. All of the form "output = alpha * Op(inputs) + beta * output".
This function executes convolutions or cross-correlations over src using the specified
filters, returning results in dest. Scaling factors alpha and beta can be used to scale
the input tensor and the output tensor respectively.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor srcDesc.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
filterData : void_p
Data pointer to GPU memory associated with the filter descriptor filterDesc.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
algo: cudnnConvolutionFwdAlgo
Enumerant that specifies which convolution algorithm shoud be used to
compute the results.
workSpace: void_p
Data pointer to GPU memory to a workspace needed to able to execute
the specified algorithm. If no workspace is needed for a particular
algorithm, that pointer can be nil.
workSpaceSizeInBytes: long
Specifies the size in bytes of the provided workSpace.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the tensor descriptor destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionForward(handle, alphaRef, srcDesc, srcData,
filterDesc, filterData,
convDesc, algo, workspace,
ctypes.c_size_t(workSpaceSizeInBytes),
betaRef, destDesc, destData)
cudnnCheckStatus(status) | def cudnnConvolutionForward(handle, alpha, srcDesc, srcData, filterDesc, filterData,
convDesc, algo, workspace, workSpaceSizeInBytes, beta,
destDesc, destData):
""""
Perform forward convolution. All of the form "output = alpha * Op(inputs) + beta * output".
This function executes convolutions or cross-correlations over src using the specified
filters, returning results in dest. Scaling factors alpha and beta can be used to scale
the input tensor and the output tensor respectively.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor srcDesc.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
filterData : void_p
Data pointer to GPU memory associated with the filter descriptor filterDesc.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
algo: cudnnConvolutionFwdAlgo
Enumerant that specifies which convolution algorithm shoud be used to
compute the results.
workSpace: void_p
Data pointer to GPU memory to a workspace needed to able to execute
the specified algorithm. If no workspace is needed for a particular
algorithm, that pointer can be nil.
workSpaceSizeInBytes: long
Specifies the size in bytes of the provided workSpace.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the tensor descriptor destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionForward(handle, alphaRef, srcDesc, srcData,
filterDesc, filterData,
convDesc, algo, workspace,
ctypes.c_size_t(workSpaceSizeInBytes),
betaRef, destDesc, destData)
cudnnCheckStatus(status) |
Python | def cudnnConvolutionBackwardBias(handle, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Compute the gradient wrt the bias.
This function computes the convolution gradient with respect to the bias, which is the
sum of every element belonging to the same feature map across all of the images of the
input tensor. Therefore, the number of elements produced is equal to the number of
features maps of the input tensor.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution gradient. Note that if beta is zero,
the output is not read and can contain any uninitialized data (including
Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionBackwardBias(handle, alphaRef, srcDesc, srcData,
betaRef, destDesc, destData)
cudnnCheckStatus(status) | def cudnnConvolutionBackwardBias(handle, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Compute the gradient wrt the bias.
This function computes the convolution gradient with respect to the bias, which is the
sum of every element belonging to the same feature map across all of the images of the
input tensor. Therefore, the number of elements produced is equal to the number of
features maps of the input tensor.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution gradient. Note that if beta is zero,
the output is not read and can contain any uninitialized data (including
Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionBackwardBias(handle, alphaRef, srcDesc, srcData,
betaRef, destDesc, destData)
cudnnCheckStatus(status) |
Python | def cudnnConvolutionBackwardFilter(handle, alpha, srcDesc, srcData, diffDesc, diffData,
convDesc, beta, gradDesc, gradData):
""""
Compute the gradient wrt the filter coefficients.
This function computes the convolution gradient with respect to the filter coefficients.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
diffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
diffData : void_p
Data pointer to GPU memory associated with the input differential tensor
descriptor diffDesc.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution gradient. Note that if beta is zero,
the output is not read and can contain any uninitialized data (including
Nan numbers).
gradDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
gradData : void_p
Data pointer to GPU memory associated with the filter descriptor
gradDesc that carries the result.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionBackwardFilter(handle, alphaRef, srcDesc,
srcData, diffDesc,
diffData, convDesc, betaRef,
gradDesc, gradData)
cudnnCheckStatus(status) | def cudnnConvolutionBackwardFilter(handle, alpha, srcDesc, srcData, diffDesc, diffData,
convDesc, beta, gradDesc, gradData):
""""
Compute the gradient wrt the filter coefficients.
This function computes the convolution gradient with respect to the filter coefficients.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
diffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
diffData : void_p
Data pointer to GPU memory associated with the input differential tensor
descriptor diffDesc.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution gradient. Note that if beta is zero,
the output is not read and can contain any uninitialized data (including
Nan numbers).
gradDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
gradData : void_p
Data pointer to GPU memory associated with the filter descriptor
gradDesc that carries the result.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionBackwardFilter(handle, alphaRef, srcDesc,
srcData, diffDesc,
diffData, convDesc, betaRef,
gradDesc, gradData)
cudnnCheckStatus(status) |
Python | def cudnnConvolutionBackwardData(handle, alpha, filterDesc, filterData, diffDesc, diffData, convDesc,
beta, gradDesc, gradData):
""""
Compute the gradients wrt the data.
This function computes the convolution gradient with respect to the output tensor.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
filterData : void_p
Data pointer to GPU memory associated with the filter descriptor
filterDesc.
diffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
diffData : void_p
Data pointer to GPU memory associated with the input differential tensor
descriptor diffDesc.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution gradient. Note that if beta is zero,
the output is not read and can contain any uninitialized data (including
Nan numbers).
gradDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
gradData : void_p
Data pointer to GPU memory associated with the filter descriptor
gradDesc that carries the result.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionBackwardData(handle, alphaRef, filterDesc,
filterData, diffDesc, diffData, convDesc,
betaRef, gradDesc, gradData)
cudnnCheckStatus(status) | def cudnnConvolutionBackwardData(handle, alpha, filterDesc, filterData, diffDesc, diffData, convDesc,
beta, gradDesc, gradData):
""""
Compute the gradients wrt the data.
This function computes the convolution gradient with respect to the output tensor.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
filterDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
filterData : void_p
Data pointer to GPU memory associated with the filter descriptor
filterDesc.
diffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
diffData : void_p
Data pointer to GPU memory associated with the input differential tensor
descriptor diffDesc.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the convolution gradient. Note that if beta is zero,
the output is not read and can contain any uninitialized data (including
Nan numbers).
gradDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
gradData : void_p
Data pointer to GPU memory associated with the filter descriptor
gradDesc that carries the result.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnConvolutionBackwardData(handle, alphaRef, filterDesc,
filterData, diffDesc, diffData, convDesc,
betaRef, gradDesc, gradData)
cudnnCheckStatus(status) |
Python | def cudnnSoftmaxForward(handle, algorithm, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
This routing computes the softmax function
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
algorithm : cudnnSoftmaxAlgorithm
Enumerant to specify the softmax algorithm.
mode : cudnnSoftmaxMode
Enumerant to specify the softmax mode.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnSoftmaxForward(handle, algorithm, mode, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) | def cudnnSoftmaxForward(handle, algorithm, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
This routing computes the softmax function
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
algorithm : cudnnSoftmaxAlgorithm
Enumerant to specify the softmax algorithm.
mode : cudnnSoftmaxMode
Enumerant to specify the softmax mode.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnSoftmaxForward(handle, algorithm, mode, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) |
Python | def cudnnSoftmaxBackward(handle, algorithm, mode, alpha, srcDesc, srcData, srcDiffDesc,
srcDiffData, beta, destDiffDesc, destDiffData):
""""
This routine computes the gradient of the softmax function.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
algorithm : cudnnSoftmaxAlgorithm
Enumerant to specify the softmax algorithm.
mode : cudnnSoftmaxMode
Enumerant to specify the softmax mode.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDiffDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnSoftmaxBackward(handle, algorithm, mode, alphaRef,
srcDesc, srcData,
srcDiffDesc, srcDiffData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) | def cudnnSoftmaxBackward(handle, algorithm, mode, alpha, srcDesc, srcData, srcDiffDesc,
srcDiffData, beta, destDiffDesc, destDiffData):
""""
This routine computes the gradient of the softmax function.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
algorithm : cudnnSoftmaxAlgorithm
Enumerant to specify the softmax algorithm.
mode : cudnnSoftmaxMode
Enumerant to specify the softmax mode.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDiffDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnSoftmaxBackward(handle, algorithm, mode, alphaRef,
srcDesc, srcData,
srcDiffDesc, srcDiffData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) |
Python | def cudnnPoolingForward(handle, poolingDesc, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Perform pooling.
This function computes pooling of input values (i.e., the maximum or average of several
adjacent values) to produce an output with smaller height and/or width.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
poolingDesc : cudnnPoolingDescriptor
Handle to a previously initialized pooling descriptor.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnPoolingForward(handle, poolingDesc, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) | def cudnnPoolingForward(handle, poolingDesc, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Perform pooling.
This function computes pooling of input values (i.e., the maximum or average of several
adjacent values) to produce an output with smaller height and/or width.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
poolingDesc : cudnnPoolingDescriptor
Handle to a previously initialized pooling descriptor.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnPoolingForward(handle, poolingDesc, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) |
Python | def cudnnPoolingBackward(handle, poolingDesc, alpha, srcDesc, srcData, srcDiffDesc,
srcDiffData, destDesc, destData, beta, destDiffDesc, destDiffData):
""""
Gradients wrt the pooling operation.
This function computes the gradient of a pooling operation.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
poolingDesc : cudnnPoolingDescriptor
Handle to the previously initialized pooling descriptor.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnPoolingBackward(handle, poolingDesc, alphaRef,
srcDesc, srcData, srcDiffDesc, srcDiffData,
destDesc, destData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) | def cudnnPoolingBackward(handle, poolingDesc, alpha, srcDesc, srcData, srcDiffDesc,
srcDiffData, destDesc, destData, beta, destDiffDesc, destDiffData):
""""
Gradients wrt the pooling operation.
This function computes the gradient of a pooling operation.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
poolingDesc : cudnnPoolingDescriptor
Handle to the previously initialized pooling descriptor.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnPoolingBackward(handle, poolingDesc, alphaRef,
srcDesc, srcData, srcDiffDesc, srcDiffData,
destDesc, destData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) |
Python | def cudnnActivationForward(handle, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Apply activation function.
This routine applies a specified neuron activation function element-wise over each input
value.
In-place operation is allowed for this routine; i.e., srcData and destData pointers
may be equal. However, this requires srcDesc and destDesc descriptors to be
identical (particularly, the strides of the input and output must match for in-place
operation to be allowed).
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
mode : cudnnActivationMode
Enumerant to specify the activation mode.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensor4dDescription
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensor4dDescription
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnActivationForward(handle, mode, alphaRef, srcDesc, srcData,
betaRef, destDesc, destData)
cudnnCheckStatus(status) | def cudnnActivationForward(handle, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
Apply activation function.
This routine applies a specified neuron activation function element-wise over each input
value.
In-place operation is allowed for this routine; i.e., srcData and destData pointers
may be equal. However, this requires srcDesc and destDesc descriptors to be
identical (particularly, the strides of the input and output must match for in-place
operation to be allowed).
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
mode : cudnnActivationMode
Enumerant to specify the activation mode.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensor4dDescription
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensor4dDescription
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnActivationForward(handle, mode, alphaRef, srcDesc, srcData,
betaRef, destDesc, destData)
cudnnCheckStatus(status) |
Python | def cudnnActivationBackward(handle, mode, alpha, srcDesc, srcData, srcDiffDesc, srcDiffData,
destDesc, destData, beta, destDiffDesc, destDiffData):
""""
Gradient of activation function.
This routine computes the gradient of a neuron activation function.
In-place operation is allowed for this routine; i.e., srcData and destData
pointers may be equal and srcDiffData and destDiffData pointers may be equal.
However, this requires the corresponding tensor descriptors to be identical
(particularly, the strides of the input and output must match for in-place operation
to be allowed).
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
mode : cudnnActivationMode
Enumerant to specify the activation mode.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation gradient. Note that if beta is zero, the
output is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnActivationBackward(handle, mode, alphaRef, srcDesc, srcData,
srcDiffDesc, srcDiffData,
destDesc, destData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) | def cudnnActivationBackward(handle, mode, alpha, srcDesc, srcData, srcDiffDesc, srcDiffData,
destDesc, destData, beta, destDiffDesc, destDiffData):
""""
Gradient of activation function.
This routine computes the gradient of a neuron activation function.
In-place operation is allowed for this routine; i.e., srcData and destData
pointers may be equal and srcDiffData and destDiffData pointers may be equal.
However, this requires the corresponding tensor descriptors to be identical
(particularly, the strides of the input and output must match for in-place operation
to be allowed).
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
mode : cudnnActivationMode
Enumerant to specify the activation mode.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation gradient. Note that if beta is zero, the
output is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
"""
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_FLOAT']:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
else:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
status = _libcudnn.cudnnActivationBackward(handle, mode, alphaRef, srcDesc, srcData,
srcDiffDesc, srcDiffData,
destDesc, destData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) |
Python | def api_url(api_endpoint):
"""
Get SendGrid API URL for syncing webhooks.
:param api_endpoint: SendGrid API endpoint, will be appended to API url
"""
return urllib.basejoin('https://api.sendgrid.com/v3/', api_endpoint.lstrip("/")) | def api_url(api_endpoint):
"""
Get SendGrid API URL for syncing webhooks.
:param api_endpoint: SendGrid API endpoint, will be appended to API url
"""
return urllib.basejoin('https://api.sendgrid.com/v3/', api_endpoint.lstrip("/")) |
Python | def auth_header(api_key):
"""
Get SendGrid authorization header for API key.
:param api_key: SendGrid API key to use for authorization
"""
return {"Authorization": "Bearer {}".format(api_key)} | def auth_header(api_key):
"""
Get SendGrid authorization header for API key.
:param api_key: SendGrid API key to use for authorization
"""
return {"Authorization": "Bearer {}".format(api_key)} |
Python | def handle_http_error(response):
"""
Check HTTP response for errors and process them accordingly.
:param response: HTTP response to check
"""
if response.status_code != 200:
# check if there is some info about error in json format
if response.headers.get("content-type") == "application/json":
error_json = response.json().get("errors")
# log every error
for error in error_json:
frappe.errprint("SendGrid HTTP error {}: {}".format(
response.status_code, error.get("message")))
else:
# log status code at least
frappe.errprint("SendGrid HTTP error {}: {}".format(
response.status_code, response.text))
return True
return False | def handle_http_error(response):
"""
Check HTTP response for errors and process them accordingly.
:param response: HTTP response to check
"""
if response.status_code != 200:
# check if there is some info about error in json format
if response.headers.get("content-type") == "application/json":
error_json = response.json().get("errors")
# log every error
for error in error_json:
frappe.errprint("SendGrid HTTP error {}: {}".format(
response.status_code, error.get("message")))
else:
# log status code at least
frappe.errprint("SendGrid HTTP error {}: {}".format(
response.status_code, response.text))
return True
return False |
Python | def handle_request_errors(request_method):
"""Decorator that handles errors and provides correct log messages."""
@wraps(request_method)
def safe_request_method(*args, **kwargs):
try:
return request_method(*args, **kwargs)
except requests.ConnectionError:
frappe.errprint("Failed to connect to SendGrid API")
except Exception as e:
frappe.errprint("SendGrid API Request Error: {}".format(e.message))
return safe_request_method | def handle_request_errors(request_method):
"""Decorator that handles errors and provides correct log messages."""
@wraps(request_method)
def safe_request_method(*args, **kwargs):
try:
return request_method(*args, **kwargs)
except requests.ConnectionError:
frappe.errprint("Failed to connect to SendGrid API")
except Exception as e:
frappe.errprint("SendGrid API Request Error: {}".format(e.message))
return safe_request_method |
Python | def webhook_exists(api_key, webhook_post_url):
"""
Use SendGrid API to find out if webhook exists already.
:param api_key: SendGrid API key, should be generated in SendGrid settings
:param webhook_post_url: full url for SendGrid webhook with credentials
Response JSON data example from
https://sendgrid.com/docs/API_Reference/Web_API_v3/Webhooks/event.html
{
"enabled": true,
"url": "url",
"group_resubscribe": true,
"delivered": true,
"group_unsubscribe": true,
"spam_report": true,
"bounce": true,
"deferred": true,
"unsubscribe": true,
"processed": true,
"open": true,
"click": true,
"dropped": true
}
"""
r = requests.get(api_url("/user/webhooks/event/settings"),
headers=auth_header(api_key))
if handle_http_error(r):
return
return _webhook_enabled(r.json(), webhook_post_url) | def webhook_exists(api_key, webhook_post_url):
"""
Use SendGrid API to find out if webhook exists already.
:param api_key: SendGrid API key, should be generated in SendGrid settings
:param webhook_post_url: full url for SendGrid webhook with credentials
Response JSON data example from
https://sendgrid.com/docs/API_Reference/Web_API_v3/Webhooks/event.html
{
"enabled": true,
"url": "url",
"group_resubscribe": true,
"delivered": true,
"group_unsubscribe": true,
"spam_report": true,
"bounce": true,
"deferred": true,
"unsubscribe": true,
"processed": true,
"open": true,
"click": true,
"dropped": true
}
"""
r = requests.get(api_url("/user/webhooks/event/settings"),
headers=auth_header(api_key))
if handle_http_error(r):
return
return _webhook_enabled(r.json(), webhook_post_url) |
Python | def add_webhook(api_key, webhook_post_url):
"""
Use SendGrid API to setup events webhook for given url.
:param api_key: SendGrid API key, should be generated in SendGrid settings
:param webhook_post_url: url for SendGrid events webhook
Note that SendGrid webhooks only support basic HTTP authentication so username
and password should be generated and included in webhook url like this:
http(s)://username:password@domain/foo.php
More on SendGrid webhooks here:
https://sendgrid.com/docs/API_Reference/Webhooks/event.html
"""
webhook_settings = {"enabled": True,
"url": webhook_post_url,
"group_resubscribe": True,
"delivered": True,
"group_unsubscribe": True,
"spam_report": True,
"bounce": True,
"deferred": True,
"unsubscribe": True,
"processed": True,
"open": True,
"click": True,
"dropped": True,
}
r = requests.patch(api_url("/user/webhooks/event/settings"),
data=json.dumps(webhook_settings),
headers=auth_header(api_key))
if handle_http_error(r):
return
return _webhook_enabled(r.json(), webhook_post_url) | def add_webhook(api_key, webhook_post_url):
"""
Use SendGrid API to setup events webhook for given url.
:param api_key: SendGrid API key, should be generated in SendGrid settings
:param webhook_post_url: url for SendGrid events webhook
Note that SendGrid webhooks only support basic HTTP authentication so username
and password should be generated and included in webhook url like this:
http(s)://username:password@domain/foo.php
More on SendGrid webhooks here:
https://sendgrid.com/docs/API_Reference/Webhooks/event.html
"""
webhook_settings = {"enabled": True,
"url": webhook_post_url,
"group_resubscribe": True,
"delivered": True,
"group_unsubscribe": True,
"spam_report": True,
"bounce": True,
"deferred": True,
"unsubscribe": True,
"processed": True,
"open": True,
"click": True,
"dropped": True,
}
r = requests.patch(api_url("/user/webhooks/event/settings"),
data=json.dumps(webhook_settings),
headers=auth_header(api_key))
if handle_http_error(r):
return
return _webhook_enabled(r.json(), webhook_post_url) |
Python | def unsubscribe_emails(api_key, endpoint, batch_key="emails", remove_endpoint=None):
"""
Receive list of emails from SendGrid and unsubscribe each email.
:param api_key: SendGrid API key, should be generated in SendGrid settings
:param endpoint: API endpoint to use in order to get list of emails
:param batch_key: key to group emails and perform batch deletion with one request. If
not present - each deletion will become an individual request.
:param remove_endpoint: API endpoint to use in order to remove email from list. If
not present - endpoint for retrieval will be used as removal
endpoint.
"""
if not remove_endpoint:
remove_endpoint = endpoint
header = auth_header(api_key)
r = requests.get(api_url(endpoint), headers=header)
# process errors
if handle_http_error(r):
return
emails = [item["email"] for item in r.json()]
if not emails:
return
# unsubscribe and remove
if batch_key:
# perform batch request
request_data = json.dumps({batch_key: emails})
r = requests.delete(api_url(endpoint), data=request_data, headers=header)
if handle_http_error(r):
return
# mark emails as unsubscribed in erpnext
for email in emails:
global_unsubscribe_and_commit(email)
else:
# perform deletion request for each email
for email in emails:
global_unsubscribe_and_commit(email)
# remove from SendGrid list
email_removal_url = "{}/{}".format(remove_endpoint.rstrip("/"),
urllib.quote_plus(email))
r = requests.delete(api_url(email_removal_url), headers=header)
# check if rate limit reached
if r.status_code == 429:
msg = "SendGrid request rate limit reached for {}".format(
remove_endpoint)
frappe.errprint(msg)
break
# process errors
if handle_http_error(r):
continue | def unsubscribe_emails(api_key, endpoint, batch_key="emails", remove_endpoint=None):
"""
Receive list of emails from SendGrid and unsubscribe each email.
:param api_key: SendGrid API key, should be generated in SendGrid settings
:param endpoint: API endpoint to use in order to get list of emails
:param batch_key: key to group emails and perform batch deletion with one request. If
not present - each deletion will become an individual request.
:param remove_endpoint: API endpoint to use in order to remove email from list. If
not present - endpoint for retrieval will be used as removal
endpoint.
"""
if not remove_endpoint:
remove_endpoint = endpoint
header = auth_header(api_key)
r = requests.get(api_url(endpoint), headers=header)
# process errors
if handle_http_error(r):
return
emails = [item["email"] for item in r.json()]
if not emails:
return
# unsubscribe and remove
if batch_key:
# perform batch request
request_data = json.dumps({batch_key: emails})
r = requests.delete(api_url(endpoint), data=request_data, headers=header)
if handle_http_error(r):
return
# mark emails as unsubscribed in erpnext
for email in emails:
global_unsubscribe_and_commit(email)
else:
# perform deletion request for each email
for email in emails:
global_unsubscribe_and_commit(email)
# remove from SendGrid list
email_removal_url = "{}/{}".format(remove_endpoint.rstrip("/"),
urllib.quote_plus(email))
r = requests.delete(api_url(email_removal_url), headers=header)
# check if rate limit reached
if r.status_code == 429:
msg = "SendGrid request rate limit reached for {}".format(
remove_endpoint)
frappe.errprint(msg)
break
# process errors
if handle_http_error(r):
continue |
Python | def _get_webhook_credentials():
"""
Get list of SendGrid webhook credentials for all Email Accounts.
Generator method that gets values from db.
"""
email_accounts = frappe.get_all("Email Account",
fields=["sendgrid_webhook_credentials",
"api_key"],
filters={"enable_outgoing": 1,
"service": "SendGrid"})
webhook_credentials = list()
for account in email_accounts:
if account.sendgrid_webhook_credentials and account.api_key:
webhook_credentials.append(account.sendgrid_webhook_credentials)
if frappe.conf.sendgrid_webhook_credentials:
webhook_credentials.append(
frappe.conf.sendgrid_webhook_credentials)
return webhook_credentials | def _get_webhook_credentials():
"""
Get list of SendGrid webhook credentials for all Email Accounts.
Generator method that gets values from db.
"""
email_accounts = frappe.get_all("Email Account",
fields=["sendgrid_webhook_credentials",
"api_key"],
filters={"enable_outgoing": 1,
"service": "SendGrid"})
webhook_credentials = list()
for account in email_accounts:
if account.sendgrid_webhook_credentials and account.api_key:
webhook_credentials.append(account.sendgrid_webhook_credentials)
if frappe.conf.sendgrid_webhook_credentials:
webhook_credentials.append(
frappe.conf.sendgrid_webhook_credentials)
return webhook_credentials |
Python | def global_unsubscribe_and_commit(email):
"""Set Global Unsubscribe flag for the email and commit to db.
:param email: email address to unsubscribe
"""
try:
unsubscribe_data = {"doctype": "Email Unsubscribe",
"email": email,
"global_unsubscribe": 1}
frappe.get_doc(unsubscribe_data).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
pass
else:
frappe.db.commit() | def global_unsubscribe_and_commit(email):
"""Set Global Unsubscribe flag for the email and commit to db.
:param email: email address to unsubscribe
"""
try:
unsubscribe_data = {"doctype": "Email Unsubscribe",
"email": email,
"global_unsubscribe": 1}
frappe.get_doc(unsubscribe_data).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
pass
else:
frappe.db.commit() |
Python | def unsubscribe_blacklisted():
"""
Get blacklisted emails, unsubscribe them globally and delete them from SendGrid.
Run via Daily Scheduler.
"""
for email_account in frappe.get_all("Email Account",
filters={"service": "SendGrid",
"enable_outgoing": 1},
fields=["api_key",
"sendgrid_webhook_credentials"]):
# don't do it when SendGrid integration is inactive
if not email_account.sendgrid_webhook_credentials or not email_account.api_key:
continue
# process blocked emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/blocks")
# process bounced emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/bounces")
# process emails that were marked as spam
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/spam_reports")
# process invalid emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/invalid_emails")
# process unsubscribed emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/unsubscribes",
remove_endpoint="/asm/suppressions/global",
batch_key=None) | def unsubscribe_blacklisted():
"""
Get blacklisted emails, unsubscribe them globally and delete them from SendGrid.
Run via Daily Scheduler.
"""
for email_account in frappe.get_all("Email Account",
filters={"service": "SendGrid",
"enable_outgoing": 1},
fields=["api_key",
"sendgrid_webhook_credentials"]):
# don't do it when SendGrid integration is inactive
if not email_account.sendgrid_webhook_credentials or not email_account.api_key:
continue
# process blocked emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/blocks")
# process bounced emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/bounces")
# process emails that were marked as spam
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/spam_reports")
# process invalid emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/invalid_emails")
# process unsubscribed emails
unsubscribe_emails(email_account.api_key,
endpoint="/suppression/unsubscribes",
remove_endpoint="/asm/suppressions/global",
batch_key=None) |
Python | def sync(doc, method=None):
"""Sync Webhook under SendGrid account."""
if not doc.service == "SendGrid":
return
if not (doc.api_key and
doc.enable_outgoing and
doc.smtp_server and
doc.email_id and
doc.password):
frappe.msgprint("Imposible to setup SendGrid webhook (incorrect of settings)")
return
webhook_url = None
if doc.sendgrid_webhook_credentials:
webhook_url = get_webhook_post_url(doc.sendgrid_webhook_credentials)
if webhook_exists(doc.api_key, webhook_url):
frappe.msgprint("SendGrid events webhook already exists")
return
credentials = generate_credentials()
webhook_url = get_webhook_post_url(credentials)
if add_webhook(doc.api_key, webhook_url):
# save webhook credentials in Email Account
doc.sendgrid_webhook_credentials = credentials
doc.db_set("sendgrid_webhook_credentials", credentials)
frappe.db.commit()
frappe.msgprint("SendGrid events webhook created successfuly")
else:
frappe.msgprint("Failed to create SendGrid events webhook")
frappe.errprint("Failed to create SendGrid events webhook")
# always clear key cache
clear_cache() | def sync(doc, method=None):
"""Sync Webhook under SendGrid account."""
if not doc.service == "SendGrid":
return
if not (doc.api_key and
doc.enable_outgoing and
doc.smtp_server and
doc.email_id and
doc.password):
frappe.msgprint("Imposible to setup SendGrid webhook (incorrect of settings)")
return
webhook_url = None
if doc.sendgrid_webhook_credentials:
webhook_url = get_webhook_post_url(doc.sendgrid_webhook_credentials)
if webhook_exists(doc.api_key, webhook_url):
frappe.msgprint("SendGrid events webhook already exists")
return
credentials = generate_credentials()
webhook_url = get_webhook_post_url(credentials)
if add_webhook(doc.api_key, webhook_url):
# save webhook credentials in Email Account
doc.sendgrid_webhook_credentials = credentials
doc.db_set("sendgrid_webhook_credentials", credentials)
frappe.db.commit()
frappe.msgprint("SendGrid events webhook created successfuly")
else:
frappe.msgprint("Failed to create SendGrid events webhook")
frappe.errprint("Failed to create SendGrid events webhook")
# always clear key cache
clear_cache() |
Python | def generate_string(length=10):
"""
Generate string of random symbols and digits.
:param length: length of string to generate
"""
symbols = string.ascii_letters + string.digits
return "".join((random.choice(symbols) for i in xrange(length))) | def generate_string(length=10):
"""
Generate string of random symbols and digits.
:param length: length of string to generate
"""
symbols = string.ascii_letters + string.digits
return "".join((random.choice(symbols) for i in xrange(length))) |
Python | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index |
Python | def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'school_lunch_dataset' ) | def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'school_lunch_dataset' ) |
Python | def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'rotated': 0,
'seg_areas': seg_areas,
} | def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'rotated': 0,
'seg_areas': seg_areas,
} |
Python | def batchnorm(input, weight=None, bias=None, running_mean=None, running_var=None, training=True, eps=1e-5, momentum=0.1):
''' momentum = 1 restricts stats to the current mini-batch '''
# This hack only works when momentum is 1 and avoids needing to track running stats
# by substuting dummy variables
running_mean = torch.zeros(np.prod(np.array(input.data.size()[1]))).cuda()
running_var = torch.ones(np.prod(np.array(input.data.size()[1]))).cuda()
return F.batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps) | def batchnorm(input, weight=None, bias=None, running_mean=None, running_var=None, training=True, eps=1e-5, momentum=0.1):
''' momentum = 1 restricts stats to the current mini-batch '''
# This hack only works when momentum is 1 and avoids needing to track running stats
# by substuting dummy variables
running_mean = torch.zeros(np.prod(np.array(input.data.size()[1]))).cuda()
running_var = torch.ones(np.prod(np.array(input.data.size()[1]))).cuda()
return F.batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps) |
Python | def create_inner_imageset(ct, excl_train_mtN):
'''
inner is the inner set between train of excl{dataset} and trainval of {dataset}
'''
print("------processing {}-----------".format(ct))
imgsets_path = "../data/Food/Food_{}/ImageSets".format(ct)
anno_path = "../data/Food/Food_{}/Annotations".format(ct)
if excl_train_mtN == 0:
excl_class = get_categories("excl"+ct+"_train")
else:
excl_class = get_categories(
"excl"+ct+"_trainmt{}".format(excl_train_mtN))
# 3种方法实现通过回调函数,对xml进行筛选
# 1. save extra info of callback with class
fx = filter_xml(excl_class)
process_all_xml_files_from_dir(anno_path, fx.process)
print(len(fx.reserver_xmls))
filter_xmls = fx.reserver_xmls
# 保存筛选信息
print("saving inner mt {} sets:{}".format(
excl_train_mtN, len(filter_xmls)))
print(imgsets_path)
if excl_train_mtN == 0:
saving_file = "inner.txt"
else:
saving_file = "innermt{}.txt".format(excl_train_mtN)
with open(os.path.join(imgsets_path, saving_file), 'w') as f:
for i in filter_xmls:
x_name = os.path.split(i)[1]
x_name = os.path.splitext(x_name)[0]
f.write(x_name + '\n') | def create_inner_imageset(ct, excl_train_mtN):
'''
inner is the inner set between train of excl{dataset} and trainval of {dataset}
'''
print("------processing {}-----------".format(ct))
imgsets_path = "../data/Food/Food_{}/ImageSets".format(ct)
anno_path = "../data/Food/Food_{}/Annotations".format(ct)
if excl_train_mtN == 0:
excl_class = get_categories("excl"+ct+"_train")
else:
excl_class = get_categories(
"excl"+ct+"_trainmt{}".format(excl_train_mtN))
# 3种方法实现通过回调函数,对xml进行筛选
# 1. save extra info of callback with class
fx = filter_xml(excl_class)
process_all_xml_files_from_dir(anno_path, fx.process)
print(len(fx.reserver_xmls))
filter_xmls = fx.reserver_xmls
# 保存筛选信息
print("saving inner mt {} sets:{}".format(
excl_train_mtN, len(filter_xmls)))
print(imgsets_path)
if excl_train_mtN == 0:
saving_file = "inner.txt"
else:
saving_file = "innermt{}.txt".format(excl_train_mtN)
with open(os.path.join(imgsets_path, saving_file), 'w') as f:
for i in filter_xmls:
x_name = os.path.split(i)[1]
x_name = os.path.splitext(x_name)[0]
f.write(x_name + '\n') |
Python | def create_few_inner_for_cross_domain(ct, imgset, mtN, fewN):
"""select_few_inner_for_train
Selecting few shot training samples and from the val of canteen
:param ct:
:param mtN: N of mt which means the number of training sample is more than N
:param fewN: the number of selected sample for each categories
"""
print("------processing {}-selecting few inner--------".format(ct))
imgsets_path = "../data/Food/Food_{}/ImageSets".format(ct)
anno_path = "../data/Food/Food_{}/Annotations".format(ct)
imset_path = os.path.join(imgsets_path, imgset+'.txt')
if mtN == 0:
excl_classes = get_categories("excl"+ct+"_train")
else:
excl_classes = get_categories("excl"+ct+"_trainmt{}".format(mtN))
cls_sample_count = {}
for ex_cls in excl_classes[1:]:
cls_sample_count[ex_cls] = 0
few_filter = Xml_in_few_sample_filter(cls_sample_count, fewN)
dishes = create_dishes(ct, 'innermt10val')
process_xml_from_file(imset_path, anno_path,
few_filter.process)
# 保存筛选信息
def saving_file(xmls, imgset):
print("saving inner few{} mt {} {} sets:{}".format(
fewN, mtN, imgset, len(xmls)))
if mtN == 0:
saving_file = "innermt10valfew{}{}.txt".format(fewN, imgset)
else:
saving_file = "innermt10valfew{}mt{}{}.txt".format(
fewN, mtN, imgset)
with open(os.path.join(imgsets_path, saving_file), 'w') as f:
for x_name in xmls:
f.write(x_name + '\n')
few_filter.clean_discard_by_dishes(dishes)
saving_file(few_filter.reserver_xmls, 'train')
saving_file(few_filter.discard_xmls, 'val') | def create_few_inner_for_cross_domain(ct, imgset, mtN, fewN):
"""select_few_inner_for_train
Selecting few shot training samples and from the val of canteen
:param ct:
:param mtN: N of mt which means the number of training sample is more than N
:param fewN: the number of selected sample for each categories
"""
print("------processing {}-selecting few inner--------".format(ct))
imgsets_path = "../data/Food/Food_{}/ImageSets".format(ct)
anno_path = "../data/Food/Food_{}/Annotations".format(ct)
imset_path = os.path.join(imgsets_path, imgset+'.txt')
if mtN == 0:
excl_classes = get_categories("excl"+ct+"_train")
else:
excl_classes = get_categories("excl"+ct+"_trainmt{}".format(mtN))
cls_sample_count = {}
for ex_cls in excl_classes[1:]:
cls_sample_count[ex_cls] = 0
few_filter = Xml_in_few_sample_filter(cls_sample_count, fewN)
dishes = create_dishes(ct, 'innermt10val')
process_xml_from_file(imset_path, anno_path,
few_filter.process)
# 保存筛选信息
def saving_file(xmls, imgset):
print("saving inner few{} mt {} {} sets:{}".format(
fewN, mtN, imgset, len(xmls)))
if mtN == 0:
saving_file = "innermt10valfew{}{}.txt".format(fewN, imgset)
else:
saving_file = "innermt10valfew{}mt{}{}.txt".format(
fewN, mtN, imgset)
with open(os.path.join(imgsets_path, saving_file), 'w') as f:
for x_name in xmls:
f.write(x_name + '\n')
few_filter.clean_discard_by_dishes(dishes)
saving_file(few_filter.reserver_xmls, 'train')
saving_file(few_filter.discard_xmls, 'val') |
Python | def create_inner_imagesets():
'''
inner is the inner set between train of excl{dataset} and trainval of {dataset}
'''
cantten = ['Arts', 'Science', 'TechMixedVeg',
'TechChicken', 'UTown', 'YIH']
for ct in cantten:
print("------processing {}-----------".format(ct))
imgsets_path = "../data/Food/Food_{}/ImageSets".format(ct)
anno_path = "../data/Food/Food_{}/Annotations".format(ct)
for N in [0, 10, 30, 50, 100]:
if N == 0:
excl_class = get_categories("excl"+ct+"_train")
else:
excl_class = get_categories("excl"+ct+"_trainmt{}".format(N))
# 3种方法实现通过回调函数,对xml进行筛选
# 1. save extra info of callback with class
# fx = filter_xml(tech_classes)
# process_all_xml_files_from_dir(path, fx.process)
# print(len(fx.reserver_xmls))
# 2. save extra info of callback with closet
fx_clo = filter_clo(excl_class)
process_all_xml_files_from_dir(anno_path, fx_clo)
# print(len(fx_clo.__closure__)) # __closure__ 有cell对象的元祖构成
filter_xmls = fx_clo.__closure__[
0].cell_contents # cell 对象有cell_contents的内容
# 3. 通过协程
# how to implement??
# NotImplemented
# 保存筛选信息
print("saving inner mt {} sets:{}".format(N, len(filter_xmls)))
print(imgsets_path)
if N == 0:
saving_file = "inner.txt"
else:
saving_file = "innermt{}.txt".format(N)
with open(os.path.join(imgsets_path, saving_file), 'w') as f:
for i in filter_xmls:
x_name = os.path.split(i)[1]
x_name = os.path.splitext(x_name)[0]
f.write(x_name + '\n') | def create_inner_imagesets():
'''
inner is the inner set between train of excl{dataset} and trainval of {dataset}
'''
cantten = ['Arts', 'Science', 'TechMixedVeg',
'TechChicken', 'UTown', 'YIH']
for ct in cantten:
print("------processing {}-----------".format(ct))
imgsets_path = "../data/Food/Food_{}/ImageSets".format(ct)
anno_path = "../data/Food/Food_{}/Annotations".format(ct)
for N in [0, 10, 30, 50, 100]:
if N == 0:
excl_class = get_categories("excl"+ct+"_train")
else:
excl_class = get_categories("excl"+ct+"_trainmt{}".format(N))
# 3种方法实现通过回调函数,对xml进行筛选
# 1. save extra info of callback with class
# fx = filter_xml(tech_classes)
# process_all_xml_files_from_dir(path, fx.process)
# print(len(fx.reserver_xmls))
# 2. save extra info of callback with closet
fx_clo = filter_clo(excl_class)
process_all_xml_files_from_dir(anno_path, fx_clo)
# print(len(fx_clo.__closure__)) # __closure__ 有cell对象的元祖构成
filter_xmls = fx_clo.__closure__[
0].cell_contents # cell 对象有cell_contents的内容
# 3. 通过协程
# how to implement??
# NotImplemented
# 保存筛选信息
print("saving inner mt {} sets:{}".format(N, len(filter_xmls)))
print(imgsets_path)
if N == 0:
saving_file = "inner.txt"
else:
saving_file = "innermt{}.txt".format(N)
with open(os.path.join(imgsets_path, saving_file), 'w') as f:
for i in filter_xmls:
x_name = os.path.split(i)[1]
x_name = os.path.splitext(x_name)[0]
f.write(x_name + '\n') |
Python | def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
if not (imdb.name.startswith('coco')):
# sizes = [PIL.Image.open(imdb.image_path_at(i)).size
# for i in range(imdb.num_images)]
widths = imdb._get_widths() * int(imdb.num_images / imdb.origin_img_len)
heights = imdb._get_heights() * int(imdb.num_images / imdb.origin_img_len)
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not (imdb.name.startswith('coco')):
if roidb[i]['rotated'] in [90, 270]:
roidb[i]['width'] = heights[i] # sizes[i][1]
roidb[i]['height'] = widths[i] # sizes[i][0]
else:
roidb[i]['width'] = widths[i] # sizes[i][0]
roidb[i]['height'] = heights[i] # sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
#assert all(max_classes[nonzero_inds] != 0)
if all(max_classes[nonzero_inds] == 0):
pdb.set_trace() | def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
if not (imdb.name.startswith('coco')):
# sizes = [PIL.Image.open(imdb.image_path_at(i)).size
# for i in range(imdb.num_images)]
widths = imdb._get_widths() * int(imdb.num_images / imdb.origin_img_len)
heights = imdb._get_heights() * int(imdb.num_images / imdb.origin_img_len)
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not (imdb.name.startswith('coco')):
if roidb[i]['rotated'] in [90, 270]:
roidb[i]['width'] = heights[i] # sizes[i][1]
roidb[i]['height'] = widths[i] # sizes[i][0]
else:
roidb[i]['width'] = widths[i] # sizes[i][0]
roidb[i]['height'] = heights[i] # sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
#assert all(max_classes[nonzero_inds] != 0)
if all(max_classes[nonzero_inds] == 0):
pdb.set_trace() |
Python | def append_rotated_images(self, anchor: int, flipped=False):
"""append_rotate_image
append rotated annotations from the origin gt rois
:param anchor: only support 90, 180, 270 degree, clockwised
:type anchor: int
"""
num_images = self.origin_img_len
# TODO width and heights may lead errors
widths = self._get_widths()
heights = self._get_heights()
if flipped:
base = (self._augmentations.index("flipped") + 1) * num_images
else:
base = 0
for i in range(base, base+num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldy1 = boxes[:, 1].copy()
oldx2 = boxes[:, 2].copy()
oldy2 = boxes[:, 3].copy()
size_i = i - num_images
if anchor == 90:
boxes[:, 0] = oldy1
boxes[:, 1] = widths[size_i] - oldx2
boxes[:, 2] = oldy2
boxes[:, 3] = widths[size_i] - oldx1
elif anchor == 180:
boxes[:, 0] = widths[size_i] - oldx2
boxes[:, 1] = heights[size_i] - oldy2
boxes[:, 2] = widths[size_i] - oldx1
boxes[:, 3] = heights[size_i] - oldy1
elif anchor == 270:
boxes[:, 0] = heights[size_i] - oldy2
boxes[:, 1] = oldx1
boxes[:, 2] = heights[size_i] - oldy1
boxes[:, 3] = oldx2
assert (boxes[:, 2] >= boxes[:, 0]).all()
assert (boxes[:, 3] >= boxes[:, 1]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': flipped,
'rotated': anchor}
self.roidb.append(entry)
self._image_index += self._image_index[:num_images]
if flipped:
self._augmentations.append("flippedrotated{}".format(anchor))
else:
self._augmentations.append("rotated{}".format(anchor)) | def append_rotated_images(self, anchor: int, flipped=False):
"""append_rotate_image
append rotated annotations from the origin gt rois
:param anchor: only support 90, 180, 270 degree, clockwised
:type anchor: int
"""
num_images = self.origin_img_len
# TODO width and heights may lead errors
widths = self._get_widths()
heights = self._get_heights()
if flipped:
base = (self._augmentations.index("flipped") + 1) * num_images
else:
base = 0
for i in range(base, base+num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldy1 = boxes[:, 1].copy()
oldx2 = boxes[:, 2].copy()
oldy2 = boxes[:, 3].copy()
size_i = i - num_images
if anchor == 90:
boxes[:, 0] = oldy1
boxes[:, 1] = widths[size_i] - oldx2
boxes[:, 2] = oldy2
boxes[:, 3] = widths[size_i] - oldx1
elif anchor == 180:
boxes[:, 0] = widths[size_i] - oldx2
boxes[:, 1] = heights[size_i] - oldy2
boxes[:, 2] = widths[size_i] - oldx1
boxes[:, 3] = heights[size_i] - oldy1
elif anchor == 270:
boxes[:, 0] = heights[size_i] - oldy2
boxes[:, 1] = oldx1
boxes[:, 2] = heights[size_i] - oldy1
boxes[:, 3] = oldx2
assert (boxes[:, 2] >= boxes[:, 0]).all()
assert (boxes[:, 3] >= boxes[:, 1]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': flipped,
'rotated': anchor}
self.roidb.append(entry)
self._image_index += self._image_index[:num_images]
if flipped:
self._augmentations.append("flippedrotated{}".format(anchor))
else:
self._augmentations.append("rotated{}".format(anchor)) |
Python | def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
obj_struct['height'] = int(tree.find('size').find('height').text)
obj_struct['width'] = int(tree.find('size').find('width').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects | def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
obj_struct['height'] = int(tree.find('size').find('height').text)
obj_struct['width'] = int(tree.find('size').find('width').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects |
Python | def topk_acc_of_cls_per_dish_2(all_boxes,
annopath,
imagesetfile,
cls_idx,
classname,
cachedir,
threshold=0.5,
ovthresh=0.5,
topk=5
):
"""topk_acc_of_cls_per_dish_2
topk accuracy of the dish whose categrioy is classname
topk accuracy: TP/npos. TP is 1 when topK reuslt has the correct result, otherwise is 0
:param all_boxes:
:param annopath:
:param imagesetfile:
:param classname:
:param cachedir:
:param threshold:
:param ovthresh:
:param topk:
"""
# first load gt
imagenames, recs = get_gt_recs(cachedir, imagesetfile, annopath)
# extract gt objects for this class
class_recs = {}
cls_idx_recs = {}
npos = 0
for img_idx, imagename in enumerate(imagenames):
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
cls_idx_recs[img_idx] = {'bbox': bbox,
'difficult': difficult,
'det': det}
TP = 0
FP = 0
# evaluate on each image
for img_idx in cls_idx_recs:
# get all box of img
img_all_boxes = [b[img_idx] for b in all_boxes]
# add cls_idx to box_cls
bboxes = None
for cls_idx_det, boxes_of_cls in enumerate(img_all_boxes):
if len(boxes_of_cls) != 0:
cls_cloumn = np.zeros(len(boxes_of_cls)) + cls_idx_det
# img_all_boxes[cls_idx] = np.c_[boxes_of_cls, cls_cloumn]
if bboxes is None:
bboxes = np.c_[boxes_of_cls, cls_cloumn]
else:
bboxes = np.vstack(
(bboxes, np.c_[boxes_of_cls, cls_cloumn]))
# choose topk results which include cls information
if bboxes is None:
continue
else:
# sort results
bboxes = bboxes[bboxes[:, 4].argsort()]
bboxes = bboxes[::-1]
bboxes = bboxes[np.where(bboxes[:, 5] != 1)]
# topk result
for det_box in bboxes[:topk]:
# cls_name = classes[int(det_box[5])]
if int(det_box[5]) != cls_idx:
continue
# gt
bbox = cls_idx_recs[img_idx]['bbox']
if len(bbox) > 0:
omax, jmax = cal_overlap(bbox, det_box)
if omax > ovthresh:
# if int(det_box[6]) == cls_idx
TP += 1
else:
FP += 1
else:
FP += 1
# use np to return nan while the npos is zero
accuracy = TP / np.float32(npos)
falseAlarm = FP / np.float32(TP + FP)
return accuracy, falseAlarm | def topk_acc_of_cls_per_dish_2(all_boxes,
annopath,
imagesetfile,
cls_idx,
classname,
cachedir,
threshold=0.5,
ovthresh=0.5,
topk=5
):
"""topk_acc_of_cls_per_dish_2
topk accuracy of the dish whose categrioy is classname
topk accuracy: TP/npos. TP is 1 when topK reuslt has the correct result, otherwise is 0
:param all_boxes:
:param annopath:
:param imagesetfile:
:param classname:
:param cachedir:
:param threshold:
:param ovthresh:
:param topk:
"""
# first load gt
imagenames, recs = get_gt_recs(cachedir, imagesetfile, annopath)
# extract gt objects for this class
class_recs = {}
cls_idx_recs = {}
npos = 0
for img_idx, imagename in enumerate(imagenames):
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
cls_idx_recs[img_idx] = {'bbox': bbox,
'difficult': difficult,
'det': det}
TP = 0
FP = 0
# evaluate on each image
for img_idx in cls_idx_recs:
# get all box of img
img_all_boxes = [b[img_idx] for b in all_boxes]
# add cls_idx to box_cls
bboxes = None
for cls_idx_det, boxes_of_cls in enumerate(img_all_boxes):
if len(boxes_of_cls) != 0:
cls_cloumn = np.zeros(len(boxes_of_cls)) + cls_idx_det
# img_all_boxes[cls_idx] = np.c_[boxes_of_cls, cls_cloumn]
if bboxes is None:
bboxes = np.c_[boxes_of_cls, cls_cloumn]
else:
bboxes = np.vstack(
(bboxes, np.c_[boxes_of_cls, cls_cloumn]))
# choose topk results which include cls information
if bboxes is None:
continue
else:
# sort results
bboxes = bboxes[bboxes[:, 4].argsort()]
bboxes = bboxes[::-1]
bboxes = bboxes[np.where(bboxes[:, 5] != 1)]
# topk result
for det_box in bboxes[:topk]:
# cls_name = classes[int(det_box[5])]
if int(det_box[5]) != cls_idx:
continue
# gt
bbox = cls_idx_recs[img_idx]['bbox']
if len(bbox) > 0:
omax, jmax = cal_overlap(bbox, det_box)
if omax > ovthresh:
# if int(det_box[6]) == cls_idx
TP += 1
else:
FP += 1
else:
FP += 1
# use np to return nan while the npos is zero
accuracy = TP / np.float32(npos)
falseAlarm = FP / np.float32(TP + FP)
return accuracy, falseAlarm |
Python | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
# image_set_file = os.path.join(self._data_path, 'ImageSets',
# self._image_set + '.txt')
# assert os.path.exists(image_set_file), \
# 'Path does not exist: {}'.format(image_set_file)
# with open(image_set_file) as f:
# image_index = [x.strip() for x in f.readlines()]
dishes = self.create_dishes()
random.shuffle(dishes)
train_dishes = dishes[0:5]
test_dishes = dishes[5:10]
self.train_index = []
self.test_index = []
for dish in train_dishes:
self.train_index.extend(dish)
for dish in test_dishes:
self.test_index.extend(dish)
# TODO save test_dishes
return self.train_index | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
# image_set_file = os.path.join(self._data_path, 'ImageSets',
# self._image_set + '.txt')
# assert os.path.exists(image_set_file), \
# 'Path does not exist: {}'.format(image_set_file)
# with open(image_set_file) as f:
# image_index = [x.strip() for x in f.readlines()]
dishes = self.create_dishes()
random.shuffle(dishes)
train_dishes = dishes[0:5]
test_dishes = dishes[5:10]
self.train_index = []
self.test_index = []
for dish in train_dishes:
self.train_index.extend(dish)
for dish in test_dishes:
self.test_index.extend(dish)
# TODO save test_dishes
return self.train_index |
Python | def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'Food') | def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'Food') |
Python | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
#cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
# if os.path.exists(cache_file):
# with open(cache_file, 'rb') as fid:
# roidb = pickle.load(fid)
# print('{} gt roidb loaded from {}'.format(self.name, cache_file))
# return roidb
print("{} gt roidb are reading".format(self.name))
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
# with open(cache_file, 'wb') as fid:
# pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
#print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
#cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
# if os.path.exists(cache_file):
# with open(cache_file, 'rb') as fid:
# roidb = pickle.load(fid)
# print('{} gt roidb loaded from {}'.format(self.name, cache_file))
# return roidb
print("{} gt roidb are reading".format(self.name))
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
# with open(cache_file, 'wb') as fid:
# pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
#print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb |
Python | def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
# exlcude unused cls
ori_num_objs = len(objs)
num_objs = 0
for obj in objs:
try:
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
num_objs += 1
except:
continue
# assert num_objs == 0
if num_objs == 0:
import pdb
pdb.set_trace()
num_objs = num_objs # len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
ix = 0
for obj in objs:
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
# the range of food label is (0, width) which may cause by bugs in labelimg 1.4
x1 = max(0.0, float(bbox.find('xmin').text) - 1)
y1 = max(0.0, float(bbox.find('ymin').text) - 1)
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
try:
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
# cls = int(obj.find('name').text.strip())
except:
print("Warning:*******cls can not found in file:******")
print(filename)
continue
raise
ishards[ix] = difficult
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
try:
overlaps[ix, cls] = 1.0
except:
print(filename)
raise
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
ix += 1
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'rotated': 0,
'seg_areas': seg_areas} | def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
# exlcude unused cls
ori_num_objs = len(objs)
num_objs = 0
for obj in objs:
try:
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
num_objs += 1
except:
continue
# assert num_objs == 0
if num_objs == 0:
import pdb
pdb.set_trace()
num_objs = num_objs # len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
ix = 0
for obj in objs:
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
# the range of food label is (0, width) which may cause by bugs in labelimg 1.4
x1 = max(0.0, float(bbox.find('xmin').text) - 1)
y1 = max(0.0, float(bbox.find('ymin').text) - 1)
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
try:
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
# cls = int(obj.find('name').text.strip())
except:
print("Warning:*******cls can not found in file:******")
print(filename)
continue
raise
ishards[ix] = difficult
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
try:
overlaps[ix, cls] = 1.0
except:
print(filename)
raise
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
ix += 1
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'rotated': 0,
'seg_areas': seg_areas} |
Python | def create_excl_ct__softlink():
"""
build softlink of excl canteens from original canteens which we collect
"""
for exclude_ct in canteens + ['All']:
print("canteens: exclude ", exclude_ct)
# create
if exclude_ct == 'All':
exclude_food_root = os.path.join(
food_dataset_dir, 'Food_'+exclude_ct)
else:
exclude_food_root = os.path.join(
food_dataset_dir, 'Food_excl'+exclude_ct)
exclude_food_Anno_dir = os.path.join(exclude_food_root, "Annotations")
exclude_food_ImSet_dir = os.path.join(exclude_food_root, "ImageSets")
exclude_food_JPEG_dir = os.path.join(exclude_food_root, "JPEGImages")
create_dir(exclude_food_Anno_dir)
create_dir(exclude_food_JPEG_dir)
create_dir(exclude_food_ImSet_dir)
exclude_trainval_path = os.path.join(
exclude_food_ImSet_dir, 'trainval.txt')
trainval_content = []
for ct in canteens:
if exclude_ct == ct:
continue
ct_root = os.path.join(food_dataset_dir, 'Food_' + ct)
ct_Anno_dir = os.path.join(ct_root, 'Annotations')
ct_ImSet_dir = os.path.join(ct_root, 'ImageSets')
ct_JPEG_dir = os.path.join(ct_root, 'JPEGImages')
# 处理空格
# create soft link for mixed datset
for f in os.listdir(ct_Anno_dir):
os.symlink(ct_Anno_dir+'/' + f,
exclude_food_Anno_dir + '/' + f)
for f in os.listdir(ct_JPEG_dir):
os.symlink(ct_JPEG_dir+'/' + f, exclude_food_JPEG_dir+'/' + f)
# trainval.txt
ct_trainval_path = os.path.join(ct_ImSet_dir, 'trainval.txt')
with open(ct_trainval_path) as f:
trainval_content += f.readlines()
print(len(trainval_content))
with open(exclude_trainval_path, 'w') as f:
f.writelines(trainval_content)
#train_content = []
#val_content = []
# TODO: the images of one same dish which were taken from different angles should be splited.
# for i, sample in enumerate(trainval_content):
# if i % 8 == 0 or i % 9 == 0:
# val_content.append(sample)
# else:
# train_content.append(sample)
# with open(os.path.join(exclude_food_ImSet_dir, 'train.txt'), 'w') as f:
# print("len of training set", len(train_content))
# f.writelines(train_content)
# with open(os.path.join(exclude_food_ImSet_dir, 'val.txt'), 'w') as f:
# print("len of val set", len(val_content))
# f.writelines(val_content) | def create_excl_ct__softlink():
"""
build softlink of excl canteens from original canteens which we collect
"""
for exclude_ct in canteens + ['All']:
print("canteens: exclude ", exclude_ct)
# create
if exclude_ct == 'All':
exclude_food_root = os.path.join(
food_dataset_dir, 'Food_'+exclude_ct)
else:
exclude_food_root = os.path.join(
food_dataset_dir, 'Food_excl'+exclude_ct)
exclude_food_Anno_dir = os.path.join(exclude_food_root, "Annotations")
exclude_food_ImSet_dir = os.path.join(exclude_food_root, "ImageSets")
exclude_food_JPEG_dir = os.path.join(exclude_food_root, "JPEGImages")
create_dir(exclude_food_Anno_dir)
create_dir(exclude_food_JPEG_dir)
create_dir(exclude_food_ImSet_dir)
exclude_trainval_path = os.path.join(
exclude_food_ImSet_dir, 'trainval.txt')
trainval_content = []
for ct in canteens:
if exclude_ct == ct:
continue
ct_root = os.path.join(food_dataset_dir, 'Food_' + ct)
ct_Anno_dir = os.path.join(ct_root, 'Annotations')
ct_ImSet_dir = os.path.join(ct_root, 'ImageSets')
ct_JPEG_dir = os.path.join(ct_root, 'JPEGImages')
# 处理空格
# create soft link for mixed datset
for f in os.listdir(ct_Anno_dir):
os.symlink(ct_Anno_dir+'/' + f,
exclude_food_Anno_dir + '/' + f)
for f in os.listdir(ct_JPEG_dir):
os.symlink(ct_JPEG_dir+'/' + f, exclude_food_JPEG_dir+'/' + f)
# trainval.txt
ct_trainval_path = os.path.join(ct_ImSet_dir, 'trainval.txt')
with open(ct_trainval_path) as f:
trainval_content += f.readlines()
print(len(trainval_content))
with open(exclude_trainval_path, 'w') as f:
f.writelines(trainval_content)
#train_content = []
#val_content = []
# TODO: the images of one same dish which were taken from different angles should be splited.
# for i, sample in enumerate(trainval_content):
# if i % 8 == 0 or i % 9 == 0:
# val_content.append(sample)
# else:
# train_content.append(sample)
# with open(os.path.join(exclude_food_ImSet_dir, 'train.txt'), 'w') as f:
# print("len of training set", len(train_content))
# f.writelines(train_content)
# with open(os.path.join(exclude_food_ImSet_dir, 'val.txt'), 'w') as f:
# print("len of val set", len(val_content))
# f.writelines(val_content) |
Python | def rotate_bbox(im_shape, bbox, exif_flag):
"""
rotate one bbox base the exif flag and original image shape.
:param im_shape: tuples, shape of origin image: (width, height)
:param bbox:
:param exif_flag: exif flag of orientation(274) :
:return:
"""
width, height = im_shape
y = height
x = width
if exif_flag == 0 or exif_flag == 1:
return bbox
# 8 is clockwise 90 degree
# 3 is 180
# 6 is 270
if exif_flag == 6:
x1 = y - bbox[3]
y1 = bbox[0]
x2 = y - bbox[1]
y2 = bbox[2]
elif exif_flag == 8:
x1 = bbox[1]
y1 = x - bbox[2]
x2 = bbox[3]
y2 = x - bbox[0]
elif exif_flag == 3:
x1 = x - bbox[2]
y1 = y - bbox[3]
x2 = x - bbox[0]
y2 = y - bbox[1]
else:
assert "not right flag"
return [x1, y1, x2, y2] | def rotate_bbox(im_shape, bbox, exif_flag):
"""
rotate one bbox base the exif flag and original image shape.
:param im_shape: tuples, shape of origin image: (width, height)
:param bbox:
:param exif_flag: exif flag of orientation(274) :
:return:
"""
width, height = im_shape
y = height
x = width
if exif_flag == 0 or exif_flag == 1:
return bbox
# 8 is clockwise 90 degree
# 3 is 180
# 6 is 270
if exif_flag == 6:
x1 = y - bbox[3]
y1 = bbox[0]
x2 = y - bbox[1]
y2 = bbox[2]
elif exif_flag == 8:
x1 = bbox[1]
y1 = x - bbox[2]
x2 = bbox[3]
y2 = x - bbox[0]
elif exif_flag == 3:
x1 = x - bbox[2]
y1 = y - bbox[3]
x2 = x - bbox[0]
y2 = y - bbox[1]
else:
assert "not right flag"
return [x1, y1, x2, y2] |
Python | def mutate(self,scale=0.01, amount=0.1):
"""
Change random waights by a small amount
params:
scale: std of the weights deltas
amount: percent of the weights that will be modified
"""
for layer in self.layers:
h, w = layer.W.shape
to_mutate = np.random.rand(h+1,w) < amount
change = np.random.normal(loc=0, scale=scale, size=(h+1, w)) * to_mutate
layer.W += change[:-1, :]
# layer.b += change[-1, :] / 10 | def mutate(self,scale=0.01, amount=0.1):
"""
Change random waights by a small amount
params:
scale: std of the weights deltas
amount: percent of the weights that will be modified
"""
for layer in self.layers:
h, w = layer.W.shape
to_mutate = np.random.rand(h+1,w) < amount
change = np.random.normal(loc=0, scale=scale, size=(h+1, w)) * to_mutate
layer.W += change[:-1, :]
# layer.b += change[-1, :] / 10 |
Python | def default_context(papers, **kwargs):
""" build a default context for the frontend """
gvars = {'num_papers': len(jall['rels'])}
gvars.update(kwargs) # insert anything else from kwargs into global context
context = {'papers': papers, 'gvars': gvars}
return context | def default_context(papers, **kwargs):
""" build a default context for the frontend """
gvars = {'num_papers': len(jall['rels'])}
gvars.update(kwargs) # insert anything else from kwargs into global context
context = {'papers': papers, 'gvars': gvars}
return context |
Python | def main():
"""start the game of life in a new widget
"""
width, height = 20, 20
grid = _make_grid_from_size(width, height)
app = QApplication([])
game_of_life_widget = GameOfLifeWidget(width, height, grid)
game_of_life_widget.show()
app.exec_() | def main():
"""start the game of life in a new widget
"""
width, height = 20, 20
grid = _make_grid_from_size(width, height)
app = QApplication([])
game_of_life_widget = GameOfLifeWidget(width, height, grid)
game_of_life_widget.show()
app.exec_() |
Python | def project(directory: str, config_file: str, **overrides) -> dict:
"""Returns back the complete configuration - including all sub configuration components
defined below that `portray` was able to determine for the project
"""
if not (
os.path.isfile(os.path.join(directory, config_file))
or os.path.isfile(os.path.join(directory, "setup.py"))
):
raise NoProjectFound(directory)
project_config = {**PORTRAY_DEFAULTS, "directory": directory} # type: Dict[str, Any]
project_config.update(toml(os.path.join(directory, config_file), **overrides))
project_config.setdefault("modules", [os.path.basename(os.getcwd())])
project_config.setdefault("pdoc3", {}).setdefault("modules", project_config["modules"])
project_config["mkdocs"] = mkdocs(directory, **project_config.get("mkdocs", {}))
project_config["pdoc3"] = pdoc3(directory, **project_config.get("pdoc3", {}))
return project_config | def project(directory: str, config_file: str, **overrides) -> dict:
"""Returns back the complete configuration - including all sub configuration components
defined below that `portray` was able to determine for the project
"""
if not (
os.path.isfile(os.path.join(directory, config_file))
or os.path.isfile(os.path.join(directory, "setup.py"))
):
raise NoProjectFound(directory)
project_config = {**PORTRAY_DEFAULTS, "directory": directory} # type: Dict[str, Any]
project_config.update(toml(os.path.join(directory, config_file), **overrides))
project_config.setdefault("modules", [os.path.basename(os.getcwd())])
project_config.setdefault("pdoc3", {}).setdefault("modules", project_config["modules"])
project_config["mkdocs"] = mkdocs(directory, **project_config.get("mkdocs", {}))
project_config["pdoc3"] = pdoc3(directory, **project_config.get("pdoc3", {}))
return project_config |
Python | def pdoc3(config: dict) -> None:
"""Render this project using the specified pdoc config passed into pdoc.
This rendering is from code definition to Markdown so that
it will be compatible with MkDocs.
"""
try:
pdoc.cli.main(Namespace(**config))
except TypeError as type_error:
if not "show_type_annotations=True" in config["config"]:
raise
print(type_error)
print("WARNING: A type error was thrown. Attempting graceful degradation to no type hints")
config["config"].remove("show_type_annotations=True")
config["config"].append("show_type_annotations=False")
pdoc.cli.main(Namespace(**config)) | def pdoc3(config: dict) -> None:
"""Render this project using the specified pdoc config passed into pdoc.
This rendering is from code definition to Markdown so that
it will be compatible with MkDocs.
"""
try:
pdoc.cli.main(Namespace(**config))
except TypeError as type_error:
if not "show_type_annotations=True" in config["config"]:
raise
print(type_error)
print("WARNING: A type error was thrown. Attempting graceful degradation to no type hints")
config["config"].remove("show_type_annotations=True")
config["config"].append("show_type_annotations=False")
pdoc.cli.main(Namespace(**config)) |
Python | def documentation_in_temp_folder(config: dict):
"""Build documentation within a temp folder, returning that folder name before it is deleted."""
with tempfile.TemporaryDirectory() as input_dir:
input_dir = os.path.join(input_dir, "input")
with tempfile.TemporaryDirectory() as temp_output_dir:
shutil.copytree(config["directory"], input_dir)
if "output_dir" not in config["pdoc3"]:
config["pdoc3"]["output_dir"] = os.path.join(input_dir, "reference")
pdoc3(config["pdoc3"])
if "docs_dir" not in config["mkdocs"]:
config["mkdocs"]["docs_dir"] = input_dir
if "site_dir" not in config["mkdocs"]:
config["mkdocs"]["site_dir"] = temp_output_dir
if "nav" not in config["mkdocs"]:
nav = config["mkdocs"]["nav"] = []
root_docs = sorted(glob(os.path.join(input_dir, "*.md")))
readme_doc = os.path.join(input_dir, "README.md")
if readme_doc in root_docs:
root_docs.remove(readme_doc)
nav.append({"Home": "README.md"})
nav.extend(_doc(doc, input_dir, config) for doc in root_docs)
nav.extend(
_nested_docs(os.path.join(input_dir, config["docs_dir"]), input_dir, config)
)
reference_docs = _nested_docs(config["pdoc3"]["output_dir"], input_dir, config)
nav.append({"Reference": reference_docs}) # type: ignore
mkdocs(config["mkdocs"])
yield temp_output_dir | def documentation_in_temp_folder(config: dict):
"""Build documentation within a temp folder, returning that folder name before it is deleted."""
with tempfile.TemporaryDirectory() as input_dir:
input_dir = os.path.join(input_dir, "input")
with tempfile.TemporaryDirectory() as temp_output_dir:
shutil.copytree(config["directory"], input_dir)
if "output_dir" not in config["pdoc3"]:
config["pdoc3"]["output_dir"] = os.path.join(input_dir, "reference")
pdoc3(config["pdoc3"])
if "docs_dir" not in config["mkdocs"]:
config["mkdocs"]["docs_dir"] = input_dir
if "site_dir" not in config["mkdocs"]:
config["mkdocs"]["site_dir"] = temp_output_dir
if "nav" not in config["mkdocs"]:
nav = config["mkdocs"]["nav"] = []
root_docs = sorted(glob(os.path.join(input_dir, "*.md")))
readme_doc = os.path.join(input_dir, "README.md")
if readme_doc in root_docs:
root_docs.remove(readme_doc)
nav.append({"Home": "README.md"})
nav.extend(_doc(doc, input_dir, config) for doc in root_docs)
nav.extend(
_nested_docs(os.path.join(input_dir, config["docs_dir"]), input_dir, config)
)
reference_docs = _nested_docs(config["pdoc3"]["output_dir"], input_dir, config)
nav.append({"Reference": reference_docs}) # type: ignore
mkdocs(config["mkdocs"])
yield temp_output_dir |
Python | def _is_format_endpoint(self, pattern):
"""
Exclude endpoints with a "format" parameter
"""
return '?P<format>' in pattern._regex | def _is_format_endpoint(self, pattern):
"""
Exclude endpoints with a "format" parameter
"""
return '?P<format>' in pattern._regex |
Python | def _set_cor_per_frame(self):
""" Locate the index for the current frame/slice being processed.
Set the centre of rotation (cor) for the current frame.
"""
if isinstance(self.center, list) or \
isinstance(self.center, np.ndarray):
count = self.get_process_frames_counter()
current_idx = self.get_global_frame_index()[count]
self.frame_center = self.center[current_idx]
else:
self.frame_center = self.center | def _set_cor_per_frame(self):
""" Locate the index for the current frame/slice being processed.
Set the centre of rotation (cor) for the current frame.
"""
if isinstance(self.center, list) or \
isinstance(self.center, np.ndarray):
count = self.get_process_frames_counter()
current_idx = self.get_global_frame_index()[count]
self.frame_center = self.center[current_idx]
else:
self.frame_center = self.center |
Python | def _calculate_overlap(self):
""" Use the centre of rotation for the current frame to
calculate the overlap and shift values.
"""
if (self.frame_center <= 0) or (self.frame_center > self.width):
self.frame_center = self.mid_width
center_int = np.int16(np.floor(self.frame_center))
self.subpixel_shift = self.frame_center - center_int
if self.frame_center < self.mid_width:
self.overlap = 2 * center_int
self.cor = self.width + center_int
else:
self.overlap = 2 * (self.width - center_int)
self.cor = center_int
list_wedge = np.linspace(1.0, 0.0, self.overlap)
self.mat_wedge_left[:, -self.overlap:] = np.float32(list_wedge)
self.mat_wedge_right = np.fliplr(self.mat_wedge_left) | def _calculate_overlap(self):
""" Use the centre of rotation for the current frame to
calculate the overlap and shift values.
"""
if (self.frame_center <= 0) or (self.frame_center > self.width):
self.frame_center = self.mid_width
center_int = np.int16(np.floor(self.frame_center))
self.subpixel_shift = self.frame_center - center_int
if self.frame_center < self.mid_width:
self.overlap = 2 * center_int
self.cor = self.width + center_int
else:
self.overlap = 2 * (self.width - center_int)
self.cor = center_int
list_wedge = np.linspace(1.0, 0.0, self.overlap)
self.mat_wedge_left[:, -self.overlap:] = np.float32(list_wedge)
self.mat_wedge_right = np.fliplr(self.mat_wedge_left) |
Python | def search_overlap(self, mat1, mat2, win_width, side, denoise=True, norm=False,
use_overlap=False):
"""
Calculate the correlation metrics between a rectangular region, defined
by the window width, on the utmost left/right side of image 2 and the
same size region in image 1 where the region is slided across image 1.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
win_width : int
Width of the searching window.
side : {0, 1}
Only two options: 0 or 1. It is used to indicate the overlap side
respects to image 1. "0" corresponds to the left side. "1" corresponds
to the right side.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
list_metric : array_like
1D array. List of the correlation metrics.
offset : int
Initial position of the searching window where the position
corresponds to the center of the window.
"""
if denoise is True:
mat1 = ndi.gaussian_filter(mat1, (2, 2), mode='reflect')
mat2 = ndi.gaussian_filter(mat2, (2, 2), mode='reflect')
(nrow1, ncol1) = mat1.shape
(nrow2, ncol2) = mat2.shape
if nrow1 != nrow2:
raise ValueError("Two images are not at the same height!!!")
win_width = np.int16(np.clip(win_width, 6, min(ncol1, ncol2) // 2 - 1))
offset = win_width // 2
win_width = 2 * offset # Make it even
ramp_down = np.linspace(1.0, 0.0, win_width)
ramp_up = 1.0 - ramp_down
wei_down = np.tile(ramp_down, (nrow1, 1))
wei_up = np.tile(ramp_up, (nrow1, 1))
if side == 1:
mat2_roi = mat2[:, 0:win_width]
mat2_roi_wei = mat2_roi * wei_up
else:
mat2_roi = mat2[:, ncol2 - win_width:]
mat2_roi_wei = mat2_roi * wei_down
list_mean2 = np.mean(np.abs(mat2_roi), axis=1)
list_pos = np.arange(offset, ncol1 - offset)
num_metric = len(list_pos)
list_metric = np.ones(num_metric, dtype=np.float32)
for i, pos in enumerate(list_pos):
mat1_roi = mat1[:, pos - offset:pos + offset]
if use_overlap is True:
if side == 1:
mat1_roi_wei = mat1_roi * wei_down
else:
mat1_roi_wei = mat1_roi * wei_up
if norm is True:
list_mean1 = np.mean(np.abs(mat1_roi), axis=1)
list_fact = list_mean2 / list_mean1
mat_fact = np.transpose(np.tile(list_fact, (win_width, 1)))
mat1_roi = mat1_roi * mat_fact
if use_overlap is True:
mat1_roi_wei = mat1_roi_wei * mat_fact
if use_overlap is True:
mat_comb = mat1_roi_wei + mat2_roi_wei
list_metric[i] = (self.correlation_metric(mat1_roi, mat2_roi)
+ self.correlation_metric(mat1_roi, mat_comb)
+ self.correlation_metric(mat2_roi, mat_comb)) / 3.0
else:
list_metric[i] = self.correlation_metric(mat1_roi, mat2_roi)
min_metric = np.min(list_metric)
if min_metric != 0.0:
list_metric = list_metric / min_metric
return list_metric, offset | def search_overlap(self, mat1, mat2, win_width, side, denoise=True, norm=False,
use_overlap=False):
"""
Calculate the correlation metrics between a rectangular region, defined
by the window width, on the utmost left/right side of image 2 and the
same size region in image 1 where the region is slided across image 1.
Parameters
----------
mat1 : array_like
2D array. Projection image or sinogram image.
mat2 : array_like
2D array. Projection image or sinogram image.
win_width : int
Width of the searching window.
side : {0, 1}
Only two options: 0 or 1. It is used to indicate the overlap side
respects to image 1. "0" corresponds to the left side. "1" corresponds
to the right side.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
list_metric : array_like
1D array. List of the correlation metrics.
offset : int
Initial position of the searching window where the position
corresponds to the center of the window.
"""
if denoise is True:
mat1 = ndi.gaussian_filter(mat1, (2, 2), mode='reflect')
mat2 = ndi.gaussian_filter(mat2, (2, 2), mode='reflect')
(nrow1, ncol1) = mat1.shape
(nrow2, ncol2) = mat2.shape
if nrow1 != nrow2:
raise ValueError("Two images are not at the same height!!!")
win_width = np.int16(np.clip(win_width, 6, min(ncol1, ncol2) // 2 - 1))
offset = win_width // 2
win_width = 2 * offset # Make it even
ramp_down = np.linspace(1.0, 0.0, win_width)
ramp_up = 1.0 - ramp_down
wei_down = np.tile(ramp_down, (nrow1, 1))
wei_up = np.tile(ramp_up, (nrow1, 1))
if side == 1:
mat2_roi = mat2[:, 0:win_width]
mat2_roi_wei = mat2_roi * wei_up
else:
mat2_roi = mat2[:, ncol2 - win_width:]
mat2_roi_wei = mat2_roi * wei_down
list_mean2 = np.mean(np.abs(mat2_roi), axis=1)
list_pos = np.arange(offset, ncol1 - offset)
num_metric = len(list_pos)
list_metric = np.ones(num_metric, dtype=np.float32)
for i, pos in enumerate(list_pos):
mat1_roi = mat1[:, pos - offset:pos + offset]
if use_overlap is True:
if side == 1:
mat1_roi_wei = mat1_roi * wei_down
else:
mat1_roi_wei = mat1_roi * wei_up
if norm is True:
list_mean1 = np.mean(np.abs(mat1_roi), axis=1)
list_fact = list_mean2 / list_mean1
mat_fact = np.transpose(np.tile(list_fact, (win_width, 1)))
mat1_roi = mat1_roi * mat_fact
if use_overlap is True:
mat1_roi_wei = mat1_roi_wei * mat_fact
if use_overlap is True:
mat_comb = mat1_roi_wei + mat2_roi_wei
list_metric[i] = (self.correlation_metric(mat1_roi, mat2_roi)
+ self.correlation_metric(mat1_roi, mat_comb)
+ self.correlation_metric(mat2_roi, mat_comb)) / 3.0
else:
list_metric[i] = self.correlation_metric(mat1_roi, mat2_roi)
min_metric = np.min(list_metric)
if min_metric != 0.0:
list_metric = list_metric / min_metric
return list_metric, offset |
Python | def calculate_curvature(self, list_metric):
"""
Calculate the curvature of a fitted curve going through the minimum
value of a metric list.
Parameters
----------
list_metric : array_like
1D array. List of metrics.
Returns
-------
curvature : float
Quadratic coefficient of the parabola fitting.
min_pos : float
Position of the minimum value with sub-pixel accuracy.
"""
radi = 2
num_metric = len(list_metric)
min_pos = np.clip(
np.argmin(list_metric), radi, num_metric - radi - 1)
list1 = list_metric[min_pos - radi:min_pos + radi + 1]
(afact1, _, _) = np.polyfit(np.arange(0, 2 * radi + 1), list1, 2)
list2 = list_metric[min_pos - 1:min_pos + 2]
(afact2, bfact2, _) = np.polyfit(
np.arange(min_pos - 1, min_pos + 2), list2, 2)
curvature = np.abs(afact1)
if afact2 != 0.0:
num = - bfact2 / (2 * afact2)
if (num >= min_pos - 1) and (num <= min_pos + 1):
min_pos = num
return curvature, np.float32(min_pos) | def calculate_curvature(self, list_metric):
"""
Calculate the curvature of a fitted curve going through the minimum
value of a metric list.
Parameters
----------
list_metric : array_like
1D array. List of metrics.
Returns
-------
curvature : float
Quadratic coefficient of the parabola fitting.
min_pos : float
Position of the minimum value with sub-pixel accuracy.
"""
radi = 2
num_metric = len(list_metric)
min_pos = np.clip(
np.argmin(list_metric), radi, num_metric - radi - 1)
list1 = list_metric[min_pos - radi:min_pos + radi + 1]
(afact1, _, _) = np.polyfit(np.arange(0, 2 * radi + 1), list1, 2)
list2 = list_metric[min_pos - 1:min_pos + 2]
(afact2, bfact2, _) = np.polyfit(
np.arange(min_pos - 1, min_pos + 2), list2, 2)
curvature = np.abs(afact1)
if afact2 != 0.0:
num = - bfact2 / (2 * afact2)
if (num >= min_pos - 1) and (num <= min_pos + 1):
min_pos = num
return curvature, np.float32(min_pos) |
Python | def _link_nexus_file(self, data_obj, name, plugin_list):
"""Link phantom + synthetic projection data h5 files to a single nexus file containing both."""
if name == 'phantom':
data_obj.exp.meta_data.set(['group_name', 'phantom'], 'phantom')
data_obj.exp.meta_data.set(['link_type', 'phantom'], 'final_result')
data_obj.meta_data.set(["meta_data", "PLACEHOLDER", "VOLUME_XZ"], [10])
else:
data_obj.exp.meta_data.set(['group_name', 'synth_proj_data'], 'entry1/tomo_entry/data')
data_obj.exp.meta_data.set(['link_type', 'synth_proj_data'], 'entry1')
self._populate_nexus_file(data_obj)
self._link_datafile_to_nexus_file(data_obj) | def _link_nexus_file(self, data_obj, name, plugin_list):
"""Link phantom + synthetic projection data h5 files to a single nexus file containing both."""
if name == 'phantom':
data_obj.exp.meta_data.set(['group_name', 'phantom'], 'phantom')
data_obj.exp.meta_data.set(['link_type', 'phantom'], 'final_result')
data_obj.meta_data.set(["meta_data", "PLACEHOLDER", "VOLUME_XZ"], [10])
else:
data_obj.exp.meta_data.set(['group_name', 'synth_proj_data'], 'entry1/tomo_entry/data')
data_obj.exp.meta_data.set(['link_type', 'synth_proj_data'], 'entry1')
self._populate_nexus_file(data_obj)
self._link_datafile_to_nexus_file(data_obj) |
Python | def choices_from_queryset(queryset):
"""
Makes choices from a QuerySet in a format that is usable by the
django.forms.widget.Select widget.
queryset: An instance of django.db.models.query.QuerySet
"""
return chain(
[EMPTY_CHOICE],
[(o.pk, str(o)) for o in queryset],
) | def choices_from_queryset(queryset):
"""
Makes choices from a QuerySet in a format that is usable by the
django.forms.widget.Select widget.
queryset: An instance of django.db.models.query.QuerySet
"""
return chain(
[EMPTY_CHOICE],
[(o.pk, str(o)) for o in queryset],
) |
Python | def choices_from_instance(instance, widget):
"""
Builds choices from a model instance using the widgets queryset() method.
If any of the widgets trigger_field fields is not defined on the instance
or the instance itself is None, None is returned.
instance: An instance of the model used on the current admin page.
widget: A widget instance given to the FlexSelectWidget.
"""
try:
for trigger_field in widget.trigger_fields:
getattr(instance, trigger_field)
except (ObjectDoesNotExist, AttributeError):
return [('', widget.empty_choices_text(instance))]
return choices_from_queryset(widget.queryset(instance)) | def choices_from_instance(instance, widget):
"""
Builds choices from a model instance using the widgets queryset() method.
If any of the widgets trigger_field fields is not defined on the instance
or the instance itself is None, None is returned.
instance: An instance of the model used on the current admin page.
widget: A widget instance given to the FlexSelectWidget.
"""
try:
for trigger_field in widget.trigger_fields:
getattr(instance, trigger_field)
except (ObjectDoesNotExist, AttributeError):
return [('', widget.empty_choices_text(instance))]
return choices_from_queryset(widget.queryset(instance)) |
Python | def details_from_instance(instance, widget):
"""
Builds html from a model instance using the widgets details() method. If
any of the widgets trigger_field fields is not defined on the instance or
the instance itself is None, None is returned.
instance: An instance of the model used on the current admin page.
widget: A widget instance given to the FlexSelectWidget.
"""
try:
for trigger_field in widget.trigger_fields:
getattr(instance, trigger_field)
related_instance = getattr(instance, widget.base_field.name)
except (ObjectDoesNotExist, AttributeError):
return ''
return widget.details(related_instance, instance) | def details_from_instance(instance, widget):
"""
Builds html from a model instance using the widgets details() method. If
any of the widgets trigger_field fields is not defined on the instance or
the instance itself is None, None is returned.
instance: An instance of the model used on the current admin page.
widget: A widget instance given to the FlexSelectWidget.
"""
try:
for trigger_field in widget.trigger_fields:
getattr(instance, trigger_field)
related_instance = getattr(instance, widget.base_field.name)
except (ObjectDoesNotExist, AttributeError):
return ''
return widget.details(related_instance, instance) |
Python | def instance_from_request(request, widget):
"""
Returns a partial instance of the widgets model loading it with values
from a POST request.
"""
items = dict(request.POST.items())
values = {}
for f in widget.base_field.model._meta.fields:
if f.name in items:
try:
value = f.formfield().to_python(items[f.name])
if value is not None:
values[f.name] = value
except ValidationError:
pass
return widget.base_field.model(**values) | def instance_from_request(request, widget):
"""
Returns a partial instance of the widgets model loading it with values
from a POST request.
"""
items = dict(request.POST.items())
values = {}
for f in widget.base_field.model._meta.fields:
if f.name in items:
try:
value = f.formfield().to_python(items[f.name])
if value is not None:
values[f.name] = value
except ValidationError:
pass
return widget.base_field.model(**values) |
Python | def _hashed_name(self):
"""
Each widget will be unique by the name of the field and the class name
of the model admin.
"""
salted_string = "".join([
settings.SECRET_KEY,
self.base_field.name,
self.modeladmin.__class__.__name__,
]).encode('utf-8')
return "_%s" % hashlib.sha1(salted_string).hexdigest() | def _hashed_name(self):
"""
Each widget will be unique by the name of the field and the class name
of the model admin.
"""
salted_string = "".join([
settings.SECRET_KEY,
self.base_field.name,
self.modeladmin.__class__.__name__,
]).encode('utf-8')
return "_%s" % hashlib.sha1(salted_string).hexdigest() |
Python | def _get_instance(self):
"""
Returns a model instance from the url in the admin page.
"""
if self.request.method == 'POST':
return instance_from_request(self.request, self)
else:
try:
path = self.request.META['PATH_INFO'].strip('/')
object_id = int(path.split('/').pop())
return self.modeladmin.get_object(self.request, object_id)
except ValueError:
return None | def _get_instance(self):
"""
Returns a model instance from the url in the admin page.
"""
if self.request.method == 'POST':
return instance_from_request(self.request, self)
else:
try:
path = self.request.META['PATH_INFO'].strip('/')
object_id = int(path.split('/').pop())
return self.modeladmin.get_object(self.request, object_id)
except ValueError:
return None |
Python | def _build_js(self):
"""
Adds the widgets hashed_name as the key with an array of its
trigger_fields as the value to flexselect.selects.
"""
return """\
<script>
var flexselect = flexselect || {};
flexselect.fields = flexselect.fields || {};
flexselect.fields.%s = %s;
</script>""" % (
self.hashed_name, json.dumps({
'base_field': self.base_field.name,
'trigger_fields': self.trigger_fields, })
) | def _build_js(self):
"""
Adds the widgets hashed_name as the key with an array of its
trigger_fields as the value to flexselect.selects.
"""
return """\
<script>
var flexselect = flexselect || {};
flexselect.fields = flexselect.fields || {};
flexselect.fields.%s = %s;
</script>""" % (
self.hashed_name, json.dumps({
'base_field': self.base_field.name,
'trigger_fields': self.trigger_fields, })
) |
Python | def render(self, name, value, attrs=None, choices=(), *args, **kwargs):
"""
Overrides. Reduces the choices by calling the widgets queryset()
method and adds a details <span> that is filled with the widgets
details() method.
"""
instance = self._get_instance()
self.choices = choices_from_instance(instance, self)
html = []
html.append(super(FlexSelectWidget, self).render(
name, value, attrs=attrs,
*args, **kwargs
))
html.append(self._build_js())
html.append('<span class="flexselect_details">')
html.append(details_from_instance(instance, self))
html.append('</span>')
return mark_safe("".join(html)) | def render(self, name, value, attrs=None, choices=(), *args, **kwargs):
"""
Overrides. Reduces the choices by calling the widgets queryset()
method and adds a details <span> that is filled with the widgets
details() method.
"""
instance = self._get_instance()
self.choices = choices_from_instance(instance, self)
html = []
html.append(super(FlexSelectWidget, self).render(
name, value, attrs=attrs,
*args, **kwargs
))
html.append(self._build_js())
html.append('<span class="flexselect_details">')
html.append(details_from_instance(instance, self))
html.append('</span>')
return mark_safe("".join(html)) |
Python | def clean(self):
"""
Make sure that the company for client is the same as the company for
the company contact person.
"""
if not self.client.company == self.company_contact_person.company:
raise ValidationError('The clients and the contacts company does'
' not match.') | def clean(self):
"""
Make sure that the company for client is the same as the company for
the company contact person.
"""
if not self.client.company == self.company_contact_person.company:
raise ValidationError('The clients and the contacts company does'
' not match.') |
Python | def details(self, base_field_instance, instance):
"""
HTML appended to the base_field.
- base_field_instance: An instance of the field that this widget is
applied to.
- instance: A partial instance of the parent model loaded from the
request.
Returns a unicoded string.
"""
return """\
<div>
<dl>
<dt>%s</dt><dd>%s</dd>
<dt>%s</dt><dd>%s</dd>
</dl>
</div>
""" % ('Company', base_field_instance.company,
'Email', base_field_instance.email,) | def details(self, base_field_instance, instance):
"""
HTML appended to the base_field.
- base_field_instance: An instance of the field that this widget is
applied to.
- instance: A partial instance of the parent model loaded from the
request.
Returns a unicoded string.
"""
return """\
<div>
<dl>
<dt>%s</dt><dd>%s</dd>
<dt>%s</dt><dd>%s</dd>
</dl>
</div>
""" % ('Company', base_field_instance.company,
'Email', base_field_instance.email,) |
Python | def queryset(self, instance):
"""
Returns the QuerySet populating the base field. If either of the
trigger_fields is None, this function will not be called.
- instance: A partial instance of the parent model loaded from the
request.
"""
company = instance.client.company
return CompanyContactPerson.objects.filter(company=company) | def queryset(self, instance):
"""
Returns the QuerySet populating the base field. If either of the
trigger_fields is None, this function will not be called.
- instance: A partial instance of the parent model loaded from the
request.
"""
company = instance.client.company
return CompanyContactPerson.objects.filter(company=company) |
Python | def empty_choices_text(self, instance):
"""
If either of the trigger_fields is None this function will be called
to get the text for the empty choice in the select box of the base
field.
- instance: A partial instance of the parent model loaded from the
request.
"""
return "Please update the client field" | def empty_choices_text(self, instance):
"""
If either of the trigger_fields is None this function will be called
to get the text for the empty choice in the select box of the base
field.
- instance: A partial instance of the parent model loaded from the
request.
"""
return "Please update the client field" |
Python | def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Alters the widget displayed for the base field.
"""
if db_field.name == "company_contact_person":
kwargs['widget'] = CompanyContactPersonWidget(
base_field=db_field,
modeladmin=self,
request=request,
)
kwargs['label'] = 'Contact'
return super(CaseAdmin, self).\
formfield_for_foreignkey(db_field, request, **kwargs) | def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Alters the widget displayed for the base field.
"""
if db_field.name == "company_contact_person":
kwargs['widget'] = CompanyContactPersonWidget(
base_field=db_field,
modeladmin=self,
request=request,
)
kwargs['label'] = 'Contact'
return super(CaseAdmin, self).\
formfield_for_foreignkey(db_field, request, **kwargs) |
Python | def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data': None, 'rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors | def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data': None, 'rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors |
Python | def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if False:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
fc7 = None#net.blobs['fc7'].data
return None, scores, None, pred_boxes | def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if False:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
fc7 = None#net.blobs['fc7'].data
return None, scores, None, pred_boxes |
Python | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self.get_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False) | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self.get_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False) |
Python | def demo_orig(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
im_detect(net, im)
im_info = net.blobs['im_info'].data
rois = net.blobs['rois'].data
rois = rois/net.blobs['im_info'].data[0,2]
roi_scores = net.blobs['rois_score'].data
attention = net.blobs['attention'].data.squeeze()
ind = np.argsort(attention)[::-1]
attention = attention[ind]
rois_all = np.hstack((rois[:,1:],roi_scores))
rois_all = rois_all[ind]
for i in xrange(5):
ascore = attention[i]
roi = rois_all[i]
cv2.rectangle(im,(roi[0],roi[1]),(roi[2],roi[3]),(255,0,0),1)
cv2.imshow('im',im)
cv2.waitKey(0)
timer.toc() | def demo_orig(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
im_detect(net, im)
im_info = net.blobs['im_info'].data
rois = net.blobs['rois'].data
rois = rois/net.blobs['im_info'].data[0,2]
roi_scores = net.blobs['rois_score'].data
attention = net.blobs['attention'].data.squeeze()
ind = np.argsort(attention)[::-1]
attention = attention[ind]
rois_all = np.hstack((rois[:,1:],roi_scores))
rois_all = rois_all[ind]
for i in xrange(5):
ascore = attention[i]
roi = rois_all[i]
cv2.rectangle(im,(roi[0],roi[1]),(roi[2],roi[3]),(255,0,0),1)
cv2.imshow('im',im)
cv2.waitKey(0)
timer.toc() |
Python | def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
im_detect(net, im)
cls_score = net.blobs['cls_score'].data.copy()
cls_score_reindexed_caffe= net.blobs['cls_score_reindexed'].data.copy()
vatt_caffe = net.blobs['vatt'].data.copy()
cls_score_tiled_caffe= net.blobs['cls_score_tiled'].data.copy()
cls_score_tiled_transposed_caffe = net.blobs['cls_score_tiled_transposed'].data.copy()
vatt_raw_caffe = net.blobs['vatt_raw'].data.copy()
attention_caffe = net.blobs['attention'].data.copy()
attention_tiled_caffe = net.blobs['attention_tiled'].data.copy()
cls_score_tiled_caffe = net.blobs['cls_score_tiled'].data.copy()
cls_score_transposed = cls_score.transpose((1,0,2,3))
cls_score_reindexed = cls_score_transposed[15,...]
attention = softmax(cls_score_reindexed.squeeze())
rois = net.blobs['rois'].data
rois = rois/net.blobs['im_info'].data[0,2]
roi_scores = net.blobs['rois_score'].data
vatt = np.zeros((rois.shape[0],21,1,1),np.float32)
for i in xrange(vatt.shape[0]):
vatt[i] += attention[i] * cls_score[i]
#vatt = vatt.sum(axis=0)
vatt_summed= vatt.sum(axis=0)
attention = net.blobs['attention'].data[:,0].squeeze()
ind = np.argsort(attention)[::-1]
attention = attention[ind]
rois_all = np.hstack((rois[:,1:],roi_scores))
rois_all = rois_all[ind]
for i in xrange(5):
ascore = attention[i]
roi = rois_all[i]
cv2.rectangle(im,(roi[0],roi[1]),(roi[2],roi[3]),(255,0,0),1)
cv2.imshow('im',im)
cv2.waitKey(0)
timer.toc() | def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
im_detect(net, im)
cls_score = net.blobs['cls_score'].data.copy()
cls_score_reindexed_caffe= net.blobs['cls_score_reindexed'].data.copy()
vatt_caffe = net.blobs['vatt'].data.copy()
cls_score_tiled_caffe= net.blobs['cls_score_tiled'].data.copy()
cls_score_tiled_transposed_caffe = net.blobs['cls_score_tiled_transposed'].data.copy()
vatt_raw_caffe = net.blobs['vatt_raw'].data.copy()
attention_caffe = net.blobs['attention'].data.copy()
attention_tiled_caffe = net.blobs['attention_tiled'].data.copy()
cls_score_tiled_caffe = net.blobs['cls_score_tiled'].data.copy()
cls_score_transposed = cls_score.transpose((1,0,2,3))
cls_score_reindexed = cls_score_transposed[15,...]
attention = softmax(cls_score_reindexed.squeeze())
rois = net.blobs['rois'].data
rois = rois/net.blobs['im_info'].data[0,2]
roi_scores = net.blobs['rois_score'].data
vatt = np.zeros((rois.shape[0],21,1,1),np.float32)
for i in xrange(vatt.shape[0]):
vatt[i] += attention[i] * cls_score[i]
#vatt = vatt.sum(axis=0)
vatt_summed= vatt.sum(axis=0)
attention = net.blobs['attention'].data[:,0].squeeze()
ind = np.argsort(attention)[::-1]
attention = attention[ind]
rois_all = np.hstack((rois[:,1:],roi_scores))
rois_all = rois_all[ind]
for i in xrange(5):
ascore = attention[i]
roi = rois_all[i]
cv2.rectangle(im,(roi[0],roi[1]),(roi[2],roi[3]),(255,0,0),1)
cv2.imshow('im',im)
cv2.waitKey(0)
timer.toc() |
Python | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self.get_minibatch()
labels = blobs['labels']
sub_boxes = blobs['sub_boxes']
obj_boxes = blobs['obj_boxes']
union_boxes = blobs['union_boxes']
#rlp_labels = blobs['rlp_labels']
top[0].reshape(*blobs['conv_new_1'].shape)
top[0].data[...] = blobs['conv_new_1']
top[1].reshape(sub_boxes.shape[0],5,1,1)
top[1].data[...] = sub_boxes.reshape((-1,5,1,1))
top[2].reshape(obj_boxes.shape[0],5,1,1)
top[2].data[...] = obj_boxes.reshape((-1,5,1,1))
top[3].reshape(union_boxes.shape[0],5,1,1)
top[3].data[...] = union_boxes.reshape((-1,5,1,1))
top[4].reshape(labels.shape[0])
top[4].data[...] = labels.astype(np.float32) | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self.get_minibatch()
labels = blobs['labels']
sub_boxes = blobs['sub_boxes']
obj_boxes = blobs['obj_boxes']
union_boxes = blobs['union_boxes']
#rlp_labels = blobs['rlp_labels']
top[0].reshape(*blobs['conv_new_1'].shape)
top[0].data[...] = blobs['conv_new_1']
top[1].reshape(sub_boxes.shape[0],5,1,1)
top[1].data[...] = sub_boxes.reshape((-1,5,1,1))
top[2].reshape(obj_boxes.shape[0],5,1,1)
top[2].data[...] = obj_boxes.reshape((-1,5,1,1))
top[3].reshape(union_boxes.shape[0],5,1,1)
top[3].data[...] = union_boxes.reshape((-1,5,1,1))
top[4].reshape(labels.shape[0])
top[4].data[...] = labels.astype(np.float32) |
Python | def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
print inds.shape
inds = np.reshape(inds, (-1, 2))
print inds.shape
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0 | def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
print inds.shape
inds = np.reshape(inds, (-1, 2))
print inds.shape
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0 |
Python | def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
#db_inds = self._get_next_minibatch_inds()
#minibatch_db = [self._roidb[i] for i in db_inds]
minibatch_db=[]
if 'ilsvrc' in self._roidb[0]['db_name']:
while len(minibatch_db)<cfg.TRAIN.IMS_PER_BATCH:
if not cfg.TRAIN.USE_PERF_PROB:
#uniform distribution
c = random.choice(self._roidb_class.keys())
else:
#distribution based on performance
c = np.random.choice(self._roidb_class.keys(),p=self._p)
#print 'grabbing class %i'%c
if len(self._roidb_class[c])>0:
minibatch_db.append(random.choice(self._roidb_class[c]))
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes) | def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
#db_inds = self._get_next_minibatch_inds()
#minibatch_db = [self._roidb[i] for i in db_inds]
minibatch_db=[]
if 'ilsvrc' in self._roidb[0]['db_name']:
while len(minibatch_db)<cfg.TRAIN.IMS_PER_BATCH:
if not cfg.TRAIN.USE_PERF_PROB:
#uniform distribution
c = random.choice(self._roidb_class.keys())
else:
#distribution based on performance
c = np.random.choice(self._roidb_class.keys(),p=self._p)
#print 'grabbing class %i'%c
if len(self._roidb_class[c])>0:
minibatch_db.append(random.choice(self._roidb_class[c]))
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes) |
Python | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self.get_minibatch()
labels = blobs['labels']
sub_boxes = blobs['sub_boxes']
obj_boxes = blobs['obj_boxes']
top[0].reshape(*blobs['conv_new_1'].shape)
top[0].data[...] = blobs['conv_new_1']
top[1].reshape(sub_boxes.shape[0],5,1,1)
top[1].data[...] = sub_boxes.reshape((-1,5,1,1))
top[2].reshape(obj_boxes.shape[0],5,1,1)
top[2].data[...] = obj_boxes.reshape((-1,5,1,1))
top[3].reshape(*labels.shape)
top[3].data[...] = labels | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self.get_minibatch()
labels = blobs['labels']
sub_boxes = blobs['sub_boxes']
obj_boxes = blobs['obj_boxes']
top[0].reshape(*blobs['conv_new_1'].shape)
top[0].data[...] = blobs['conv_new_1']
top[1].reshape(sub_boxes.shape[0],5,1,1)
top[1].data[...] = sub_boxes.reshape((-1,5,1,1))
top[2].reshape(obj_boxes.shape[0],5,1,1)
top[2].data[...] = obj_boxes.reshape((-1,5,1,1))
top[3].reshape(*labels.shape)
top[3].data[...] = labels |
Python | def run_single(imid):
caffe.set_mode_gpu()
caffe.set_device(0)
m = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r', 'core')
net = caffe.Net('models/sg_vrd/vgg16/faster_rcnn_end2end/test.prototxt',
'output/faster_rcnn_end2end/sg_vrd_2016_train/vgg16_faster_rcnn_iter_80000.caffemodel',
caffe.TEST)
net2 = caffe.Net('models/sg_vrd/vgg16/faster_rcnn_end2end/test.prototxt',
'output/faster_rcnn_end2end/sg_vrd_2016_train/vgg16_faster_rcnn_finetune_iter_95000.caffemodel',
caffe.TEST)
# net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
net.name = 'sgvrd'
imdb = get_imdb('sg_vrd_2016_test')
imdb.competition_mode(0)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
root = 'data/sg_vrd_2016/Data/sg_test_images/'
cnt = 0
fpath = root+imid+'.jpg'
im_orig = cv2.imread(fpath)
if im_orig == None:
print fpath
cv2.namedWindow('ctrl')
cv2.createTrackbar('t1','ctrl',200,1000,nothing)
cv2.createTrackbar('t2','ctrl',200,1000,nothing)
dets1 = detect(im_orig,net,0.1,imid,m)
dets2 = detect(im_orig,net2,0.1,imid,m)
print_text = True
while True:
im = im_orig.copy()
t1 = cv2.getTrackbarPos('t1','ctrl')/1000.
t2 = cv2.getTrackbarPos('t2','ctrl')/1000.
# t1idx = []
# t2idx = []
t1idx = [2,10]
t2idx = [1,9]
for i in xrange(len(dets1)):
#if t1idx != -1 and t1idx!=i: continue
if len(t1idx)>0 and i not in t1idx:continue
d = dets1[i]
score = d['score']
if score <t1:continue
cls_name = d['cls_name']+'.'+str(i)
di = [d['x1'],d['y1'],d['x2'],d['y2']]
x, y = int(di[0]), int(di[1])
if x < 10:
x = 15
if y < 10:
y = 15
if print_text:
cv2.putText(im, cls_name, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
cv2.rectangle(im, (di[0], di[1]), (di[2], di[3]), (0, 0, 255), 1)
#print '%s %f %d %d %d %f\n' % (imid, score, di[0], di[1], di[2], di[3])
for i in xrange(len(dets2)):
if len(t2idx)>0 and i not in t2idx:continue
d = dets2[i]
score = d['score']
if score <t2:continue
cls_name = d['cls_name']+'.'+str(i)
di = [d['x1'],d['y1'],d['x2'],d['y2']]
x, y = int(di[0]), int(di[1])
if x < 10:
x = 15
if y < 10:
y = 15
if print_text:
cv2.putText(im, cls_name, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
cv2.rectangle(im, (di[0], di[1]), (di[2], di[3]), (0, 255, 0), 1)
#print '%s %f %d %d %d %f\n' % (imid, score, di[0], di[1], di[2], di[3])
cv2.imshow('im',im)
c = cv2.waitKey(100)&0xFF
if c == ord('s'):
cv2.imwrite('output/results/'+imid+'.jpg',im)
print 'written'
elif c == ord('b'):
print_text=not print_text
elif c== ord(' '):
break
elif c == 27:
exit(0) | def run_single(imid):
caffe.set_mode_gpu()
caffe.set_device(0)
m = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r', 'core')
net = caffe.Net('models/sg_vrd/vgg16/faster_rcnn_end2end/test.prototxt',
'output/faster_rcnn_end2end/sg_vrd_2016_train/vgg16_faster_rcnn_iter_80000.caffemodel',
caffe.TEST)
net2 = caffe.Net('models/sg_vrd/vgg16/faster_rcnn_end2end/test.prototxt',
'output/faster_rcnn_end2end/sg_vrd_2016_train/vgg16_faster_rcnn_finetune_iter_95000.caffemodel',
caffe.TEST)
# net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
net.name = 'sgvrd'
imdb = get_imdb('sg_vrd_2016_test')
imdb.competition_mode(0)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
root = 'data/sg_vrd_2016/Data/sg_test_images/'
cnt = 0
fpath = root+imid+'.jpg'
im_orig = cv2.imread(fpath)
if im_orig == None:
print fpath
cv2.namedWindow('ctrl')
cv2.createTrackbar('t1','ctrl',200,1000,nothing)
cv2.createTrackbar('t2','ctrl',200,1000,nothing)
dets1 = detect(im_orig,net,0.1,imid,m)
dets2 = detect(im_orig,net2,0.1,imid,m)
print_text = True
while True:
im = im_orig.copy()
t1 = cv2.getTrackbarPos('t1','ctrl')/1000.
t2 = cv2.getTrackbarPos('t2','ctrl')/1000.
# t1idx = []
# t2idx = []
t1idx = [2,10]
t2idx = [1,9]
for i in xrange(len(dets1)):
#if t1idx != -1 and t1idx!=i: continue
if len(t1idx)>0 and i not in t1idx:continue
d = dets1[i]
score = d['score']
if score <t1:continue
cls_name = d['cls_name']+'.'+str(i)
di = [d['x1'],d['y1'],d['x2'],d['y2']]
x, y = int(di[0]), int(di[1])
if x < 10:
x = 15
if y < 10:
y = 15
if print_text:
cv2.putText(im, cls_name, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
cv2.rectangle(im, (di[0], di[1]), (di[2], di[3]), (0, 0, 255), 1)
#print '%s %f %d %d %d %f\n' % (imid, score, di[0], di[1], di[2], di[3])
for i in xrange(len(dets2)):
if len(t2idx)>0 and i not in t2idx:continue
d = dets2[i]
score = d['score']
if score <t2:continue
cls_name = d['cls_name']+'.'+str(i)
di = [d['x1'],d['y1'],d['x2'],d['y2']]
x, y = int(di[0]), int(di[1])
if x < 10:
x = 15
if y < 10:
y = 15
if print_text:
cv2.putText(im, cls_name, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
cv2.rectangle(im, (di[0], di[1]), (di[2], di[3]), (0, 255, 0), 1)
#print '%s %f %d %d %d %f\n' % (imid, score, di[0], di[1], di[2], di[3])
cv2.imshow('im',im)
c = cv2.waitKey(100)&0xFF
if c == ord('s'):
cv2.imwrite('output/results/'+imid+'.jpg',im)
print 'written'
elif c == ord('b'):
print_text=not print_text
elif c== ord(' '):
break
elif c == 27:
exit(0) |
Python | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self._get_next_minibatch()
labels = blobs['labels']
gt_boxes = blobs['gt_boxes']
im_info = blobs['im_info']
#gt_boxes = gt_boxes
#blobs['labels']=blobs['gt_boxes']
#blobs['word_embeddings'] = np.zeros((1,1,1,1),np.float32)
#blobs['regression_targets'] = np.zeros((1,20,1,1),np.float32)
#for blob_name, blob in blobs.iteritems():
# top_ind = self._name_to_top_map[blob_name]
# shape = blob.shape
# top[top_ind].reshape(*(blob.shape))
# # Copy data into net's input blobs
# top[top_ind].data[...] = blob.astype(np.float32, copy=False)
top[0].reshape(*blobs['data'].shape)
top[0].data[...] = blobs['data']
gt_boxes = gt_boxes.reshape((gt_boxes.shape[0],gt_boxes.shape[1],1,1))
zeros = np.zeros((gt_boxes.shape[0],1,1, 1), dtype=gt_boxes.dtype)
all_rois = np.concatenate((zeros, gt_boxes),axis=1)
top[1].reshape(*all_rois.shape)
top[1].data[...] = all_rois
#labels = np.concatenate((labels,np.array([0,0,0])))
labels = np.unique(labels)
labels_v = np.zeros((20))
labels_v[...]=-1
labels_v[labels.astype(np.int32)-1]=1
#labels_v = np.tile(labels_v,labels.shape[0])
labels_v = labels_v.reshape((1,20))
#for i in xrange(labels.shape[0]):
# labels_v[i,labels[i]]=1
labels = labels.reshape((labels.shape[0],1,1,1))
#labels[-1,0,0,0]=0
top[2].reshape(*labels.shape)
top[2].data[...] = labels
top[3].reshape(*labels_v.shape)
top[3].data[...] =labels_v | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self._get_next_minibatch()
labels = blobs['labels']
gt_boxes = blobs['gt_boxes']
im_info = blobs['im_info']
#gt_boxes = gt_boxes
#blobs['labels']=blobs['gt_boxes']
#blobs['word_embeddings'] = np.zeros((1,1,1,1),np.float32)
#blobs['regression_targets'] = np.zeros((1,20,1,1),np.float32)
#for blob_name, blob in blobs.iteritems():
# top_ind = self._name_to_top_map[blob_name]
# shape = blob.shape
# top[top_ind].reshape(*(blob.shape))
# # Copy data into net's input blobs
# top[top_ind].data[...] = blob.astype(np.float32, copy=False)
top[0].reshape(*blobs['data'].shape)
top[0].data[...] = blobs['data']
gt_boxes = gt_boxes.reshape((gt_boxes.shape[0],gt_boxes.shape[1],1,1))
zeros = np.zeros((gt_boxes.shape[0],1,1, 1), dtype=gt_boxes.dtype)
all_rois = np.concatenate((zeros, gt_boxes),axis=1)
top[1].reshape(*all_rois.shape)
top[1].data[...] = all_rois
#labels = np.concatenate((labels,np.array([0,0,0])))
labels = np.unique(labels)
labels_v = np.zeros((20))
labels_v[...]=-1
labels_v[labels.astype(np.int32)-1]=1
#labels_v = np.tile(labels_v,labels.shape[0])
labels_v = labels_v.reshape((1,20))
#for i in xrange(labels.shape[0]):
# labels_v[i,labels[i]]=1
labels = labels.reshape((labels.shape[0],1,1,1))
#labels[-1,0,0,0]=0
top[2].reshape(*labels.shape)
top[2].data[...] = labels
top[3].reshape(*labels_v.shape)
top[3].data[...] =labels_v |
Python | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self.get_minibatch()
labels = blobs['labels']
sub_boxes = blobs['sub_boxes']
obj_boxes = blobs['obj_boxes']
rlp_labels = blobs['rlp_labels']
top[0].reshape(*blobs['data'].shape)
top[0].data[...] = blobs['data']
top[1].reshape(sub_boxes.shape[0],5,1,1)
top[1].data[...] = sub_boxes.reshape((-1,5,1,1))
top[2].reshape(obj_boxes.shape[0],5,1,1)
top[2].data[...] = obj_boxes.reshape((-1,5,1,1))
top[3].reshape(labels.shape[0],1)
top[3].data[...] = labels.reshape((-1,1))
top[4].reshape(*rlp_labels.shape)
top[4].data[...] =rlp_labels | def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
# todo: modify mini_batch
blobs = self.get_minibatch()
labels = blobs['labels']
sub_boxes = blobs['sub_boxes']
obj_boxes = blobs['obj_boxes']
rlp_labels = blobs['rlp_labels']
top[0].reshape(*blobs['data'].shape)
top[0].data[...] = blobs['data']
top[1].reshape(sub_boxes.shape[0],5,1,1)
top[1].data[...] = sub_boxes.reshape((-1,5,1,1))
top[2].reshape(obj_boxes.shape[0],5,1,1)
top[2].data[...] = obj_boxes.reshape((-1,5,1,1))
top[3].reshape(labels.shape[0],1)
top[3].data[...] = labels.reshape((-1,1))
top[4].reshape(*rlp_labels.shape)
top[4].data[...] =rlp_labels |
Python | def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# v = np.array([1, 1e3, 1e6, 1e9, 1e12])
# hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
# _, index, inv_index = np.unique(hashes, return_index=True,
# return_inverse=True)
# blobs['rois'] = blobs['rois'][index, :]
# boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if False:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# # Map scores and predictions back to the original set of boxes
# scores = scores[inv_index, :]
# pred_boxes = pred_boxes[inv_index, :]
fc7 = net.blobs['fc7'].data
return net.blobs['cls_score'].data[:, :], scores, fc7, pred_boxes | def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# v = np.array([1, 1e3, 1e6, 1e9, 1e12])
# hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
# _, index, inv_index = np.unique(hashes, return_index=True,
# return_inverse=True)
# blobs['rois'] = blobs['rois'][index, :]
# boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if False:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# # Map scores and predictions back to the original set of boxes
# scores = scores[inv_index, :]
# pred_boxes = pred_boxes[inv_index, :]
fc7 = net.blobs['fc7'].data
return net.blobs['cls_score'].data[:, :], scores, fc7, pred_boxes |
Python | def prep_jointbox_train():
caffe.set_mode_gpu()
caffe.set_device(0)
rdata = sio.loadmat('data/meta/vrd/annotation_train.mat', struct_as_record=False,squeeze_me=True)
# map im_id to annotation
r_annos = {}
for i in xrange(len(rdata['annotation_train'])):
anno = rdata['annotation_train'][i]
im_id = anno.filename.split('.')[0]
r_annos[im_id] = anno
m = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r', 'core')
net = caffe.Net('models/sg_vrd/vgg16/faster_rcnn_end2end/test_jointbox.prototxt',
'output/faster_rcnn_end2end/sg_vrd_2016_train/vgg16_faster_rcnn_finetune_iter_40000.caffemodel',caffe.TEST)
# net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
net.name = 'sgvrd'
imdb = get_imdb('sg_vrd_2016_train')
imdb.competition_mode(0)
cfg.TEST.HAS_RPN=False
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
h5path = 'output/' + imdb.name + '_predicate_exp_train.hdf5'
h5f = h5py.File(h5path)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
root = 'data/sg_vrd_2016/Data/sg_train_images/'
_t = {'im_detect': Timer(), 'misc': Timer()}
cnt = 0
thresh = .01
for path, subdirs, files in os.walk(root):
for name in files:
cnt += 1
if cnt %100==0:
print cnt
im_id = name.split('.')[0]
fpath = os.path.join(path, name)
im = cv2.imread(fpath)
if im == None:
print fpath
r_anno = r_annos[im_id]
sub_boxes = []
obj_boxes = []
joint_boxes = []
boxes_batch = []
b_type = {}
b_idx = 0
sub_visual = []
obj_visual = []
joint_visual = []
pre_label = []
if hasattr(r_anno, 'relationship'):
if not isinstance(r_anno.relationship, np.ndarray):
r_anno.relationship = [r_anno.relationship]
for r in xrange(len(r_anno.relationship)):
if not hasattr(r_anno.relationship[r], 'phrase'):
continue
predicate = r_anno.relationship[r].phrase[1]
pre_idx = int(str(m['meta/pre/name2idx/' + predicate][...]))
pre_label.append(pre_idx)
sub_lbl = r_anno.relationship[r].phrase[0]
obj_lbl = r_anno.relationship[r].phrase[2]
#print sub_lbl,predicate,obj_lbl
ymin, ymax, xmin, xmax = r_anno.relationship[r].subBox
sub_bbox = [xmin, ymin, xmax, ymax]
ymin, ymax, xmin, xmax = r_anno.relationship[r].objBox
obj_bbox= [xmin, ymin, xmax, ymax]
joint_bbox = [min(sub_bbox[0],obj_bbox[0]), min(sub_bbox[1],obj_bbox[1]),max(sub_bbox[2],obj_bbox[2]),max(sub_bbox[3],obj_bbox[3])]
joint_boxes.append(joint_bbox)
sub_boxes.append(sub_bbox)
obj_boxes.append(obj_bbox)
# cv2.rectangle(im,(joint_bbox[0],joint_bbox[1]),(joint_bbox[2],joint_bbox[3]),(255,255,255),4)
# cv2.rectangle(im,(sub_bbox[0],sub_bbox[1]),(sub_bbox[2],sub_bbox[3]),(0,0,255),2)
# cv2.rectangle(im,(obj_bbox[0],obj_bbox[1]),(obj_bbox[2],obj_bbox[3]),(255,0,0),2)
for i in xrange(len(sub_boxes)):
boxes_batch.append(sub_boxes[i])
b_type[b_idx]='s'
b_idx += 1
for i in xrange(len(obj_boxes)):
boxes_batch.append(obj_boxes[i])
b_type[b_idx]='o'
b_idx += 1
for i in xrange(len(joint_boxes)):
boxes_batch.append(joint_boxes[i])
b_type[b_idx]='j'
b_idx += 1
box_proposals = None
_t['im_detect'].tic()
score_raw, scores, fc7, boxes = im_detect(net, im, np.array(boxes_batch))
for i in xrange(scores.shape[0]):
s_idx = np.argmax(scores[i,1:])+1
cls_box=None
cls_box = boxes[i, s_idx * 4:(s_idx + 1) * 4]
if b_type[i] == 's':
sub_visual.append(fc7[i])
if b_type[i] == 'o':
obj_visual.append(fc7[i])
if b_type[i] == 'j':
joint_visual.append(fc7[i])
# cls_name = str(m['meta/cls/idx2name/' + str(s_idx)][...])
# if b_type[i] == 's':
# print str(m['meta/pre/idx2name/'+str(pre_label[i])][...])
# cv2.rectangle(im,(cls_box[0],cls_box[1]),(cls_box[2],cls_box[3]),(255,0,0),2)
# cv2.imshow('im',im)
_t['im_detect'].toc()
_t['misc'].tic()
sub_visual= np.array(sub_visual).astype(np.float16)
obj_visual= np.array(obj_visual).astype(np.float16)
joint_visual= np.array(joint_visual).astype(np.float16)
pre_label = np.array(pre_label).astype(np.int32)
h5f.create_dataset(im_id + '/sub_visual', dtype='float16', data=sub_visual)
h5f.create_dataset(im_id + '/obj_visual', dtype='float16', data=obj_visual)
h5f.create_dataset(im_id + '/joint_visual', dtype='float16', data=joint_visual)
h5f.create_dataset(im_id + '/pre_label', dtype='float16', data=pre_label)
_t['misc'].toc()
# rpn_rois = net.blobs['rois'].data
# pool5 = net.blobs['pool5'].data
# _t['misc'].tic()
# cnt += 1
print 'im_detect: {:d} {:.3f}s {:.3f}s' \
.format(cnt, _t['im_detect'].average_time,
_t['misc'].average_time) | def prep_jointbox_train():
caffe.set_mode_gpu()
caffe.set_device(0)
rdata = sio.loadmat('data/meta/vrd/annotation_train.mat', struct_as_record=False,squeeze_me=True)
# map im_id to annotation
r_annos = {}
for i in xrange(len(rdata['annotation_train'])):
anno = rdata['annotation_train'][i]
im_id = anno.filename.split('.')[0]
r_annos[im_id] = anno
m = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r', 'core')
net = caffe.Net('models/sg_vrd/vgg16/faster_rcnn_end2end/test_jointbox.prototxt',
'output/faster_rcnn_end2end/sg_vrd_2016_train/vgg16_faster_rcnn_finetune_iter_40000.caffemodel',caffe.TEST)
# net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
net.name = 'sgvrd'
imdb = get_imdb('sg_vrd_2016_train')
imdb.competition_mode(0)
cfg.TEST.HAS_RPN=False
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
h5path = 'output/' + imdb.name + '_predicate_exp_train.hdf5'
h5f = h5py.File(h5path)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
root = 'data/sg_vrd_2016/Data/sg_train_images/'
_t = {'im_detect': Timer(), 'misc': Timer()}
cnt = 0
thresh = .01
for path, subdirs, files in os.walk(root):
for name in files:
cnt += 1
if cnt %100==0:
print cnt
im_id = name.split('.')[0]
fpath = os.path.join(path, name)
im = cv2.imread(fpath)
if im == None:
print fpath
r_anno = r_annos[im_id]
sub_boxes = []
obj_boxes = []
joint_boxes = []
boxes_batch = []
b_type = {}
b_idx = 0
sub_visual = []
obj_visual = []
joint_visual = []
pre_label = []
if hasattr(r_anno, 'relationship'):
if not isinstance(r_anno.relationship, np.ndarray):
r_anno.relationship = [r_anno.relationship]
for r in xrange(len(r_anno.relationship)):
if not hasattr(r_anno.relationship[r], 'phrase'):
continue
predicate = r_anno.relationship[r].phrase[1]
pre_idx = int(str(m['meta/pre/name2idx/' + predicate][...]))
pre_label.append(pre_idx)
sub_lbl = r_anno.relationship[r].phrase[0]
obj_lbl = r_anno.relationship[r].phrase[2]
#print sub_lbl,predicate,obj_lbl
ymin, ymax, xmin, xmax = r_anno.relationship[r].subBox
sub_bbox = [xmin, ymin, xmax, ymax]
ymin, ymax, xmin, xmax = r_anno.relationship[r].objBox
obj_bbox= [xmin, ymin, xmax, ymax]
joint_bbox = [min(sub_bbox[0],obj_bbox[0]), min(sub_bbox[1],obj_bbox[1]),max(sub_bbox[2],obj_bbox[2]),max(sub_bbox[3],obj_bbox[3])]
joint_boxes.append(joint_bbox)
sub_boxes.append(sub_bbox)
obj_boxes.append(obj_bbox)
# cv2.rectangle(im,(joint_bbox[0],joint_bbox[1]),(joint_bbox[2],joint_bbox[3]),(255,255,255),4)
# cv2.rectangle(im,(sub_bbox[0],sub_bbox[1]),(sub_bbox[2],sub_bbox[3]),(0,0,255),2)
# cv2.rectangle(im,(obj_bbox[0],obj_bbox[1]),(obj_bbox[2],obj_bbox[3]),(255,0,0),2)
for i in xrange(len(sub_boxes)):
boxes_batch.append(sub_boxes[i])
b_type[b_idx]='s'
b_idx += 1
for i in xrange(len(obj_boxes)):
boxes_batch.append(obj_boxes[i])
b_type[b_idx]='o'
b_idx += 1
for i in xrange(len(joint_boxes)):
boxes_batch.append(joint_boxes[i])
b_type[b_idx]='j'
b_idx += 1
box_proposals = None
_t['im_detect'].tic()
score_raw, scores, fc7, boxes = im_detect(net, im, np.array(boxes_batch))
for i in xrange(scores.shape[0]):
s_idx = np.argmax(scores[i,1:])+1
cls_box=None
cls_box = boxes[i, s_idx * 4:(s_idx + 1) * 4]
if b_type[i] == 's':
sub_visual.append(fc7[i])
if b_type[i] == 'o':
obj_visual.append(fc7[i])
if b_type[i] == 'j':
joint_visual.append(fc7[i])
# cls_name = str(m['meta/cls/idx2name/' + str(s_idx)][...])
# if b_type[i] == 's':
# print str(m['meta/pre/idx2name/'+str(pre_label[i])][...])
# cv2.rectangle(im,(cls_box[0],cls_box[1]),(cls_box[2],cls_box[3]),(255,0,0),2)
# cv2.imshow('im',im)
_t['im_detect'].toc()
_t['misc'].tic()
sub_visual= np.array(sub_visual).astype(np.float16)
obj_visual= np.array(obj_visual).astype(np.float16)
joint_visual= np.array(joint_visual).astype(np.float16)
pre_label = np.array(pre_label).astype(np.int32)
h5f.create_dataset(im_id + '/sub_visual', dtype='float16', data=sub_visual)
h5f.create_dataset(im_id + '/obj_visual', dtype='float16', data=obj_visual)
h5f.create_dataset(im_id + '/joint_visual', dtype='float16', data=joint_visual)
h5f.create_dataset(im_id + '/pre_label', dtype='float16', data=pre_label)
_t['misc'].toc()
# rpn_rois = net.blobs['rois'].data
# pool5 = net.blobs['pool5'].data
# _t['misc'].tic()
# cnt += 1
print 'im_detect: {:d} {:.3f}s {:.3f}s' \
.format(cnt, _t['im_detect'].average_time,
_t['misc'].average_time) |
Python | def insert_srv6_header(pkt, sid_list):
"""Applies SRv6 insert transformation to the given packet.
"""
# Set IPv6 dst to first SID...
pkt[IPv6].dst = sid_list[0]
# Insert SRv6 header between IPv6 header and payload
sid_len = len(sid_list)
srv6_hdr = IPv6ExtHdrSegmentRouting(
nh=pkt[IPv6].nh,
addresses=sid_list[::-1],
len=sid_len * 2,
segleft=sid_len - 1,
lastentry=sid_len - 1)
pkt[IPv6].nh = 43 # next IPv6 header is SR header
pkt[IPv6].payload = srv6_hdr / pkt[IPv6].payload
return pkt | def insert_srv6_header(pkt, sid_list):
"""Applies SRv6 insert transformation to the given packet.
"""
# Set IPv6 dst to first SID...
pkt[IPv6].dst = sid_list[0]
# Insert SRv6 header between IPv6 header and payload
sid_len = len(sid_list)
srv6_hdr = IPv6ExtHdrSegmentRouting(
nh=pkt[IPv6].nh,
addresses=sid_list[::-1],
len=sid_len * 2,
segleft=sid_len - 1,
lastentry=sid_len - 1)
pkt[IPv6].nh = 43 # next IPv6 header is SR header
pkt[IPv6].payload = srv6_hdr / pkt[IPv6].payload
return pkt |
Python | def pop_srv6_header(pkt):
"""Removes SRv6 header from the given packet.
"""
pkt[IPv6].nh = pkt[IPv6ExtHdrSegmentRouting].nh
pkt[IPv6].payload = pkt[IPv6ExtHdrSegmentRouting].payload | def pop_srv6_header(pkt):
"""Removes SRv6 header from the given packet.
"""
pkt[IPv6].nh = pkt[IPv6ExtHdrSegmentRouting].nh
pkt[IPv6].payload = pkt[IPv6ExtHdrSegmentRouting].payload |
Python | def generate_min_card_cost_arrangement(self):
"""Generate a ensemble with minimum cost_cards_only
takes entries with lowest index.
Assumes cards are sorted by price in ascending order"""
return [np.argmin(np.sum(np.array(e),1)) for e in self.ensembles] | def generate_min_card_cost_arrangement(self):
"""Generate a ensemble with minimum cost_cards_only
takes entries with lowest index.
Assumes cards are sorted by price in ascending order"""
return [np.argmin(np.sum(np.array(e),1)) for e in self.ensembles] |
Subsets and Splits