language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def formatByteList(byteList):
'''
\brief Format a byte list into a string, which can then be printed.
For example:
[0x00,0x11,0x22] -> '(3 bytes) 001122'
\param[in] byteList A list of integer, each representing a byte.
\return A string representing the byte list.
'''
return '({0} bytes) {1}'.format(len(byteList),''.join(['%02x'%b for b in byteList])) | def formatByteList(byteList):
'''
\brief Format a byte list into a string, which can then be printed.
For example:
[0x00,0x11,0x22] -> '(3 bytes) 001122'
\param[in] byteList A list of integer, each representing a byte.
\return A string representing the byte list.
'''
return '({0} bytes) {1}'.format(len(byteList),''.join(['%02x'%b for b in byteList])) |
Python | def carry_around_add(a, b):
'''
\brief Helper function for checksum calculation.
'''
c = a + b
return (c & 0xffff) + (c >> 16) | def carry_around_add(a, b):
'''
\brief Helper function for checksum calculation.
'''
c = a + b
return (c & 0xffff) + (c >> 16) |
Python | def checksum(byteList):
'''
\brief Calculate the checksum over a byte list.
This is the checksum calculation used in e.g. the ICMPv6 header.
\return The checksum, a 2-byte integer.
'''
s = 0
for i in range(0, len(byteList), 2):
w = byteList[i] + (byteList[i+1] << 8)
s = carry_around_add(s, w)
return ~s & 0xffff | def checksum(byteList):
'''
\brief Calculate the checksum over a byte list.
This is the checksum calculation used in e.g. the ICMPv6 header.
\return The checksum, a 2-byte integer.
'''
s = 0
for i in range(0, len(byteList), 2):
w = byteList[i] + (byteList[i+1] << 8)
s = carry_around_add(s, w)
return ~s & 0xffff |
Python | def _createEchoRequest(self):
'''
\brief Create an echo request.
This function switches between IPv4 and IPv6 echo requests.
'''
# toggle createIPv6 flag
self.createIPv6 = not self.createIPv6
# create IPv4 or IPv6 echo request
if self.createIPv6:
print 'Transmitting IPv6 echo request'
return self._createIPv6echoRequest()
else:
print 'Transmitting IPv4 echo request'
return self._createIPv4echoRequest() | def _createEchoRequest(self):
'''
\brief Create an echo request.
This function switches between IPv4 and IPv6 echo requests.
'''
# toggle createIPv6 flag
self.createIPv6 = not self.createIPv6
# create IPv4 or IPv6 echo request
if self.createIPv6:
print 'Transmitting IPv6 echo request'
return self._createIPv6echoRequest()
else:
print 'Transmitting IPv4 echo request'
return self._createIPv4echoRequest() |
Python | def _createIPv4echoRequest(self):
'''
\brief Create a value IPv4 echo request.
'''
echoRequest = []
# IPv4 header
echoRequest += [0x45] # Version | IHL
echoRequest += [0x00] # DSCP | ECN
echoRequest += [0x00,60] # Total Length (20 for IPv4 + 40 ICMPv4)
echoRequest += [0x00,0x00] # Identification
echoRequest += [0x00,0x00] # Flags | Fragment Offset
echoRequest += [128] # TTL
echoRequest += [1] # Protocol (1==ICMP)
echoRequest += [0x00,0x00] # Header Checksum (to be filled out later)
echoRequest += [10,2,0,5] # Source IP
echoRequest += [10,2,0,1] # Destination IP
# calculate IPv4 Header checksum
crc = checksum(echoRequest)
echoRequest[10] = (crc&0x00ff)>>0
echoRequest[11] = (crc&0xff00)>>8
# ICMPv4 header
echoRequest += [8] # type (8==echo request)
echoRequest += [0] # code
echoRequest += [0x00,0x00] # Checksum (to be filled out later)
echoRequest += [0x00,0x00] # Identifier
echoRequest += [0x00,0x00] # Sequence Number
# ICMPv4 payload
echoRequest += [ord('a')+b for b in range(32)]
# calculate ICMPv4 checksum
crc = checksum(echoRequest[20:])
echoRequest[22] = (crc&0x00ff)>>0
echoRequest[23] = (crc&0xff00)>>8
return echoRequest | def _createIPv4echoRequest(self):
'''
\brief Create a value IPv4 echo request.
'''
echoRequest = []
# IPv4 header
echoRequest += [0x45] # Version | IHL
echoRequest += [0x00] # DSCP | ECN
echoRequest += [0x00,60] # Total Length (20 for IPv4 + 40 ICMPv4)
echoRequest += [0x00,0x00] # Identification
echoRequest += [0x00,0x00] # Flags | Fragment Offset
echoRequest += [128] # TTL
echoRequest += [1] # Protocol (1==ICMP)
echoRequest += [0x00,0x00] # Header Checksum (to be filled out later)
echoRequest += [10,2,0,5] # Source IP
echoRequest += [10,2,0,1] # Destination IP
# calculate IPv4 Header checksum
crc = checksum(echoRequest)
echoRequest[10] = (crc&0x00ff)>>0
echoRequest[11] = (crc&0xff00)>>8
# ICMPv4 header
echoRequest += [8] # type (8==echo request)
echoRequest += [0] # code
echoRequest += [0x00,0x00] # Checksum (to be filled out later)
echoRequest += [0x00,0x00] # Identifier
echoRequest += [0x00,0x00] # Sequence Number
# ICMPv4 payload
echoRequest += [ord('a')+b for b in range(32)]
# calculate ICMPv4 checksum
crc = checksum(echoRequest[20:])
echoRequest[22] = (crc&0x00ff)>>0
echoRequest[23] = (crc&0xff00)>>8
return echoRequest |
Python | def _createIPv6echoRequest(self):
'''
\brief Create an IPv6 echo request.
'''
echoRequest = []
# IPv6 header
echoRequest += [0x60,0x00,0x00,0x00] # ver, TF
echoRequest += [0x00, 40] # length
echoRequest += [58] # Next header (58==ICMPv6)
echoRequest += [128] # HLIM
echoRequest += [0xbb, 0xbb, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x05,] # source
echoRequest += [0xbb, 0xbb, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,] # destination
# ICMPv6 header
echoRequest += [128] # type (128==echo request)
echoRequest += [0] # code
echoRequest += [0x00,0x00] # Checksum (to be filled out later)
echoRequest += [0x00,0x04] # Identifier
echoRequest += [0x00,0x12] # Sequence
# ICMPv6 payload
echoRequest += [ord('a')+b for b in range(32)]
# calculate ICMPv6 checksum
pseudo = []
pseudo += echoRequest[24:40] # source address
pseudo += echoRequest[8:24] # destination address
pseudo += [0x00]*3+[len(echoRequest[40:])] # upper-layer packet length
pseudo += [0x00]*3 # zero
pseudo += [58] # next header
pseudo += echoRequest[40:] # ICMPv6 header+payload
crc = checksum(pseudo)
echoRequest[42] = (crc&0x00ff)>>0
echoRequest[43] = (crc&0xff00)>>8
return echoRequest | def _createIPv6echoRequest(self):
'''
\brief Create an IPv6 echo request.
'''
echoRequest = []
# IPv6 header
echoRequest += [0x60,0x00,0x00,0x00] # ver, TF
echoRequest += [0x00, 40] # length
echoRequest += [58] # Next header (58==ICMPv6)
echoRequest += [128] # HLIM
echoRequest += [0xbb, 0xbb, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x05,] # source
echoRequest += [0xbb, 0xbb, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,] # destination
# ICMPv6 header
echoRequest += [128] # type (128==echo request)
echoRequest += [0] # code
echoRequest += [0x00,0x00] # Checksum (to be filled out later)
echoRequest += [0x00,0x04] # Identifier
echoRequest += [0x00,0x12] # Sequence
# ICMPv6 payload
echoRequest += [ord('a')+b for b in range(32)]
# calculate ICMPv6 checksum
pseudo = []
pseudo += echoRequest[24:40] # source address
pseudo += echoRequest[8:24] # destination address
pseudo += [0x00]*3+[len(echoRequest[40:])] # upper-layer packet length
pseudo += [0x00]*3 # zero
pseudo += [58] # next header
pseudo += echoRequest[40:] # ICMPv6 header+payload
crc = checksum(pseudo)
echoRequest[42] = (crc&0x00ff)>>0
echoRequest[43] = (crc&0xff00)>>8
return echoRequest |
Python | def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
) -> Tensor:
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DPRConfig`) and inputs:
pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):
The DPR encoder outputs the `pooler_output` that corresponds to the context representation.
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer. This output is to be used to embed contexts for
nearest neighbors queries with questions embeddings.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
tokenizer = DPRContextEncoderTokenizer.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')
model = DPRContextEncoder.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')
input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"]
embeddings = model(input_ids)[0] # the embeddings of the given context.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = (
torch.ones(input_shape, device=device)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.ctx_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
return (pooled_output,) + outputs[2:] | def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
) -> Tensor:
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DPRConfig`) and inputs:
pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):
The DPR encoder outputs the `pooler_output` that corresponds to the context representation.
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer. This output is to be used to embed contexts for
nearest neighbors queries with questions embeddings.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
tokenizer = DPRContextEncoderTokenizer.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')
model = DPRContextEncoder.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')
input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"]
embeddings = model(input_ids)[0] # the embeddings of the given context.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = (
torch.ones(input_shape, device=device)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.ctx_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
return (pooled_output,) + outputs[2:] |
Python | def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
) -> Tensor:
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DPRConfig`) and inputs:
pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):
The DPR encoder outputs the `pooler_output` that corresponds to the question representation.
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer. This output is to be used to embed questions for
nearest neighbors queries with context embeddings.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
model = DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"]
embeddings = model(input_ids)[0] # the embeddings of the given question.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = (
torch.ones(input_shape, device=device)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.question_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
return (pooled_output,) + outputs[2:] | def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
) -> Tensor:
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DPRConfig`) and inputs:
pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):
The DPR encoder outputs the `pooler_output` that corresponds to the question representation.
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer. This output is to be used to embed questions for
nearest neighbors queries with context embeddings.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
model = DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"]
embeddings = model(input_ids)[0] # the embeddings of the given question.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = (
torch.ones(input_shape, device=device)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.question_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
return (pooled_output,) + outputs[2:] |
Python | def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions: bool = None,
output_hidden_states: bool = None,
) -> Tuple[Tensor, ...]:
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DPRConfig`) and inputs:
input_ids: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``)
They correspond to the combined `input_ids` from `(question + context title + context content`).
start_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):
Logits of the start index of the span for each passage.
end_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):
Logits of the end index of the span for each passage.
relevance_logits: (:obj:`torch.FloatTensor`` of shape ``(n_passages, )``):
Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage
to answer the question, compared to all the other passages.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DPRReader, DPRReaderTokenizer
tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base')
model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base')
encoded_inputs = tokenizer(
questions=["What is love ?"],
titles=["Haddaway"],
texts=["'What Is Love' is a song recorded by the artist Haddaway"],
return_tensors='pt'
)
outputs = model(**encoded_inputs)
start_logits = outputs[0] # The logits of the start of the spans
end_logits = outputs[1] # The logits of the end of the spans
relevance_logits = outputs[2] # The relevance scores of the passages
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
span_outputs = self.span_predictor(
input_ids,
attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
start_logits, end_logits, relevance_logits = span_outputs[:3]
return (start_logits, end_logits, relevance_logits) + span_outputs[3:] | def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions: bool = None,
output_hidden_states: bool = None,
) -> Tuple[Tensor, ...]:
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DPRConfig`) and inputs:
input_ids: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``)
They correspond to the combined `input_ids` from `(question + context title + context content`).
start_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):
Logits of the start index of the span for each passage.
end_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):
Logits of the end index of the span for each passage.
relevance_logits: (:obj:`torch.FloatTensor`` of shape ``(n_passages, )``):
Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage
to answer the question, compared to all the other passages.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import DPRReader, DPRReaderTokenizer
tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base')
model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base')
encoded_inputs = tokenizer(
questions=["What is love ?"],
titles=["Haddaway"],
texts=["'What Is Love' is a song recorded by the artist Haddaway"],
return_tensors='pt'
)
outputs = model(**encoded_inputs)
start_logits = outputs[0] # The logits of the start of the spans
end_logits = outputs[1] # The logits of the end of the spans
relevance_logits = outputs[2] # The relevance scores of the passages
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
span_outputs = self.span_predictor(
input_ids,
attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
start_logits, end_logits, relevance_logits = span_outputs[:3]
return (start_logits, end_logits, relevance_logits) + span_outputs[3:] |
Python | def dtype(self) -> dtype:
"""
Get torch.dtype from module, assuming that the whole module has one dtype.
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype | def dtype(self) -> dtype:
"""
Get torch.dtype from module, assuming that the whole module has one dtype.
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype |
Python | def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of whether we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings | def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of whether we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings |
Python | def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds | def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds |
Python | def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
old_embeddings: ``torch.nn.Embedding``
Old embeddings to be resized.
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embedding``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings | def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
old_embeddings: ``torch.nn.Embedding``
Old embeddings to be resized.
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embedding``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings |
Python | def prune_heads(self, heads_to_prune: Dict):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune) | def prune_heads(self, heads_to_prune: Dict):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune) |
Python | def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory: directory to which to save.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False):
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(model_to_save.state_dict(), output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file)) | def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory: directory to which to save.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False):
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(model_to_save.state_dict(), output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file)) |
Python | def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
token_type_embeds = self.w(token_type_ids)
token_type_embeds *= np.sqrt(self.d_model_size)
else:
token_type_embeds = 0
position_ids = position_ids.view(-1, input_shape[-1])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids)
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
seq_len = input_shape[-1]
mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(inputs_embeds.device)
inputs_embeds *= np.sqrt(self.d_model_size)
pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states)
output_shape = input_shape + (inputs_embeds.size(-1),)
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = h(
hidden_states,
mask,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = hidden_states.view(*output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if use_cache is True:
outputs = outputs + (presents,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs | def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
token_type_embeds = self.w(token_type_ids)
token_type_embeds *= np.sqrt(self.d_model_size)
else:
token_type_embeds = 0
position_ids = position_ids.view(-1, input_shape[-1])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids)
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
seq_len = input_shape[-1]
mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(inputs_embeds.device)
inputs_embeds *= np.sqrt(self.d_model_size)
pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states)
output_shape = input_shape + (inputs_embeds.size(-1),)
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = h(
hidden_states,
mask,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = hidden_states.view(*output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if use_cache is True:
outputs = outputs + (presents,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs |
Python | def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs | def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs |
Python | def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path | def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path |
Python | def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""Matrix multiplication of query and key tensors using with a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer)
with an overlap of size window_overlap"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multipication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap
chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (chunked_query, chunked_key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores | def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""Matrix multiplication of query and key tensors using with a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer)
with an overlap of size window_overlap"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multipication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap
chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (chunked_query, chunked_key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores |
Python | def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors.
Returned tensor will be of the same shape as `attn_probs`"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_by_window_overlap_except_last_row(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) | def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors.
Returned tensor will be of the same shape as `attn_probs`"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_by_window_overlap_except_last_row(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) |
Python | def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer selfattention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
"Input ids are automatically padded from {} to {} to be a multiple of `config.attention_window`: {}".format(
seq_len, seq_len + padding_len, attention_window
)
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len), self.config.pad_token_id, dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds | def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer selfattention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
"Input ids are automatically padded from {} to {} to be a multiple of `config.attention_window`: {}".format(
seq_len, seq_len + padding_len, attention_window
)
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len), self.config.pad_token_id, dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds |
Python | def objective(nodes: list, adj_matrix: np.ndarray, diff: np.ndarray) -> tuple:
""" Compute the difference of the subnetwork associated to the nodes received as argument
between the group Gmax and the group Gmin (the difference has been precomputed in the variable
diff) returning the average. """
subnetwork = np.zeros(adj_matrix.shape, dtype=np.int8)
subnetwork[tuple(np.meshgrid(nodes, nodes))] = 1
subnetwork = np.multiply(subnetwork, adj_matrix)
coords = np.where(subnetwork == 1)
return float(np.mean(diff[coords])), | def objective(nodes: list, adj_matrix: np.ndarray, diff: np.ndarray) -> tuple:
""" Compute the difference of the subnetwork associated to the nodes received as argument
between the group Gmax and the group Gmin (the difference has been precomputed in the variable
diff) returning the average. """
subnetwork = np.zeros(adj_matrix.shape, dtype=np.int8)
subnetwork[tuple(np.meshgrid(nodes, nodes))] = 1
subnetwork = np.multiply(subnetwork, adj_matrix)
coords = np.where(subnetwork == 1)
return float(np.mean(diff[coords])), |
Python | def pheromoneConvergence(report: Report, output: str = 'PheromoneConvergence.mp4',
color: str = 'Oranges'):
"""
Method that generates a video showing how the values of the pheromone matrix evolve throughout
the iterations of the algorithm.
Parameters
----------
report: antco.Report
antco.report.Report instance returned by the antco.run() function.
output: str
Name of the output file under which the video will be saved.
color: str
Matplotlib colormap used to represent the pheromone values.
"""
# Check that the parameters necessary to represent convergence can be obtained.
try:
report.get('pheromones')
except Exception:
raise Exception(
'The Report instance does not have the "pheromones" value, make sure you have saved '
'the "pheromones" value throughout the interactions of the algorithm using the method '
'report.save("pheromones").')
pheromone_evolution = report.get('pheromones')
# Get max and min value
min_val, max_val = None, None
for values in pheromone_evolution.values():
p_min = np.min(values)
p_max = np.max(values)
if min_val is None or p_min < min_val:
min_val = p_min
if max_val is None or p_max > max_val:
max_val = p_max
cmap = plt.cm.get_cmap(color)
fig, ax = plt.subplots()
ims = []
for it, values in pheromone_evolution.items():
im = ax.imshow(values, vmin=min_val, vmax=max_val, cmap=cmap, animated=True)
if it == 1: # Initial frame
ax.imshow(values, vmin=min_val, vmax=max_val, cmap=cmap)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=1000)
# Save animation
ani.save(output)
plt.show() | def pheromoneConvergence(report: Report, output: str = 'PheromoneConvergence.mp4',
color: str = 'Oranges'):
"""
Method that generates a video showing how the values of the pheromone matrix evolve throughout
the iterations of the algorithm.
Parameters
----------
report: antco.Report
antco.report.Report instance returned by the antco.run() function.
output: str
Name of the output file under which the video will be saved.
color: str
Matplotlib colormap used to represent the pheromone values.
"""
# Check that the parameters necessary to represent convergence can be obtained.
try:
report.get('pheromones')
except Exception:
raise Exception(
'The Report instance does not have the "pheromones" value, make sure you have saved '
'the "pheromones" value throughout the interactions of the algorithm using the method '
'report.save("pheromones").')
pheromone_evolution = report.get('pheromones')
# Get max and min value
min_val, max_val = None, None
for values in pheromone_evolution.values():
p_min = np.min(values)
p_max = np.max(values)
if min_val is None or p_min < min_val:
min_val = p_min
if max_val is None or p_max > max_val:
max_val = p_max
cmap = plt.cm.get_cmap(color)
fig, ax = plt.subplots()
ims = []
for it, values in pheromone_evolution.items():
im = ax.imshow(values, vmin=min_val, vmax=max_val, cmap=cmap, animated=True)
if it == 1: # Initial frame
ax.imshow(values, vmin=min_val, vmax=max_val, cmap=cmap)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=1000)
# Save animation
ani.save(output)
plt.show() |
Python | def basic(aco_obj: ACO, metaheuristic: MetaHeuristic = None, apply_meta_each: int = 1,
scores_decay: DecaySchedule = None, evaporation_decay: DecaySchedule = None,
report: Report = None, save_pheromones: bool = True, verbose: bool = True) -> Report:
"""
Function that executes a simple Ant Colony Optimization algorithm based on the parameters
defined in the ACO object.
Parameters
----------
aco_obj: antco.ACO
Initialised antco.ACO instance, for more information on this type of object use:
help(antco.ACO)
metaheuristic: antco.hybrid.base.MetaHeuristic subclass, default=None
Instance following the interface defined in antco.extensions.MetaHeuristic. For more info
use help(antco.extensions.MetaHeuristic).
apply_meta_each: int, default=1
Parameter indicating how many generations the hybrid will be applied for the
refinement of the solutions given by the ants.
scores_decay: antco.optim.DecaySchedule subclass, default=None
Instance of a antco.optim.DecaySchedule representing a decay schedule that will applied to
the score values with which the pheromone values are updated in each iteration.
For more info use help(antco.optim.DecaySchedule).
evaporation_decay: antco.optim.DecaySchedule subclass, default=None
Same as scores_decay, in this case used to update the evaporation rate value.
For more info use help(antco.optim.DecaySchedule).
report: antco.report.Report, default=None
antco.report.Report instance, by default a report will be initialised.
save_pheromones: bool, default=True
Value indicating whether to store in the report the values of the pheromone matrix at each
iteration. If True these may be retrieved by:
>>> report.get('pheromones')
verbose: bool, default=True
Indicates whether to show convergence information during the execution of the algorithm.
Returns
-------
:antco.report.Report
Returns a antco.report.Report instance with information about the convergence of the
algorithm. The convergence parameters can be accessed via:
>>> report = antco.algorithm.basic(...)
>>> report.get('min_cost')
>>> report.get('mean_cost')
>>> report.get('max_cost')
For more info about Report instances use help(antco.report.Report).
"""
# Get parameters from aco_obj
seed = aco_obj.seed
graph = aco_obj.graph
iterations = aco_obj.iterations
n_jobs = aco_obj.n_jobs
ants = aco_obj.ants
H = aco_obj.heuristic
P = aco_obj.pheromones
alpha = aco_obj.alpha
beta = aco_obj.beta
if aco_obj.precompute_heuristic:
H = np.power(H, beta)
pheromone_update_kw = aco_obj.pheromone_update_kw
rho = pheromone_update_kw['rho'];
del pheromone_update_kw['rho'] # Get evaporation parameter
updatePheromones = aco_obj.updatePheromones
objectiveFunction = aco_obj.objectiveFunction
tol = aco_obj.tol
fixed = aco_obj.fixed_positions
accessory_node = aco_obj.accessory_node
Q = aco_obj.Q
R = aco_obj.R
scaleScores = aco_obj.scaleScores
if report is None: # Initialize a Report instance (if not provided)
report = Report({'BranchingFactor': {'lambda_values': [0.5]}})
if seed is not None: # Set random state
random.seed(seed)
np.random.seed(seed)
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# Pre-compute the array to store the ant scores
ant_scores, norm_scores = np.empty(shape=len(ants)), np.empty(shape=len(ants))
current_iteration = 1
it_without_improvements = 0
best_score = -float('inf')
while current_iteration <= iterations:
seeds = np.random.randint(np.iinfo(np.int32).max, size=len(ants))
ants = generatePaths( # Graph exploration
ants=ants, graph=graph, H=H, P=P, alpha=alpha, beta=beta, Q=Q, R=R, n_jobs=n_jobs,
exp_heuristic=False, seeds=seeds)
if metaheuristic is None or current_iteration % apply_meta_each != 0:
# Evaluate ant paths using the objectiveFunction function (it will be maximized)
ant_scores = evaluateAnts(ants=ants, objectiveFunction=objectiveFunction)
else:
ants, ant_scores = metaheuristic(ants)
# Update best score and save best solution
new_best = updateReportWithBest(
ants=ants, scores=ant_scores, best_score=best_score, report=report,
iteration=current_iteration)
if new_best > best_score:
best_score = new_best
it_without_improvements = 0
else:
it_without_improvements += 1
# Scale scores
if scaleScores is not None:
norm_scores = deepcopy(ant_scores) # Copy scores
norm_scores = scaleScores(norm_scores, best_score)
else:
norm_scores = ant_scores
# Update pheromones according to the scores
updatePheromones(
paths=np.array([ant.adj_matrix for ant in ants], dtype=np.int8), P=P,
ant_scores=norm_scores if scores_decay is None else norm_scores * scores_decay(
current_iteration),
rho=rho if evaporation_decay is None else rho * evaporation_decay(current_iteration),
**pheromone_update_kw)
# Compute convergence statistics
mean_scores = np.mean(ant_scores)
min_score = np.min(ant_scores)
max_score = np.max(ant_scores)
report.save(current_iteration, mean_cost=mean_scores, max_cost=max_score)
if save_pheromones: # Save pheromone values
report.save(current_iteration, pheromones=deepcopy(P))
# Compute monitoring computeMetrics
report.computeMetrics(current_iteration, P, graph)
if metaheuristic is None:
if not fixed and accessory_node is None: # Restart ants initial position
deleteInitialPosition(ants)
else:
# Reposition the ants
ants = aco_obj.ants
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# After several generations without improvements, do an early stopping of the algorithm.
if it_without_improvements > tol: break
if verbose:
sys.stdout.write('\rCost: Mean: %.4f (Min: %.4f Max: %.4f) (iteration %d)' %
(float(mean_scores), min_score, max_score, current_iteration))
current_iteration += 1
return report | def basic(aco_obj: ACO, metaheuristic: MetaHeuristic = None, apply_meta_each: int = 1,
scores_decay: DecaySchedule = None, evaporation_decay: DecaySchedule = None,
report: Report = None, save_pheromones: bool = True, verbose: bool = True) -> Report:
"""
Function that executes a simple Ant Colony Optimization algorithm based on the parameters
defined in the ACO object.
Parameters
----------
aco_obj: antco.ACO
Initialised antco.ACO instance, for more information on this type of object use:
help(antco.ACO)
metaheuristic: antco.hybrid.base.MetaHeuristic subclass, default=None
Instance following the interface defined in antco.extensions.MetaHeuristic. For more info
use help(antco.extensions.MetaHeuristic).
apply_meta_each: int, default=1
Parameter indicating how many generations the hybrid will be applied for the
refinement of the solutions given by the ants.
scores_decay: antco.optim.DecaySchedule subclass, default=None
Instance of a antco.optim.DecaySchedule representing a decay schedule that will applied to
the score values with which the pheromone values are updated in each iteration.
For more info use help(antco.optim.DecaySchedule).
evaporation_decay: antco.optim.DecaySchedule subclass, default=None
Same as scores_decay, in this case used to update the evaporation rate value.
For more info use help(antco.optim.DecaySchedule).
report: antco.report.Report, default=None
antco.report.Report instance, by default a report will be initialised.
save_pheromones: bool, default=True
Value indicating whether to store in the report the values of the pheromone matrix at each
iteration. If True these may be retrieved by:
>>> report.get('pheromones')
verbose: bool, default=True
Indicates whether to show convergence information during the execution of the algorithm.
Returns
-------
:antco.report.Report
Returns a antco.report.Report instance with information about the convergence of the
algorithm. The convergence parameters can be accessed via:
>>> report = antco.algorithm.basic(...)
>>> report.get('min_cost')
>>> report.get('mean_cost')
>>> report.get('max_cost')
For more info about Report instances use help(antco.report.Report).
"""
# Get parameters from aco_obj
seed = aco_obj.seed
graph = aco_obj.graph
iterations = aco_obj.iterations
n_jobs = aco_obj.n_jobs
ants = aco_obj.ants
H = aco_obj.heuristic
P = aco_obj.pheromones
alpha = aco_obj.alpha
beta = aco_obj.beta
if aco_obj.precompute_heuristic:
H = np.power(H, beta)
pheromone_update_kw = aco_obj.pheromone_update_kw
rho = pheromone_update_kw['rho'];
del pheromone_update_kw['rho'] # Get evaporation parameter
updatePheromones = aco_obj.updatePheromones
objectiveFunction = aco_obj.objectiveFunction
tol = aco_obj.tol
fixed = aco_obj.fixed_positions
accessory_node = aco_obj.accessory_node
Q = aco_obj.Q
R = aco_obj.R
scaleScores = aco_obj.scaleScores
if report is None: # Initialize a Report instance (if not provided)
report = Report({'BranchingFactor': {'lambda_values': [0.5]}})
if seed is not None: # Set random state
random.seed(seed)
np.random.seed(seed)
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# Pre-compute the array to store the ant scores
ant_scores, norm_scores = np.empty(shape=len(ants)), np.empty(shape=len(ants))
current_iteration = 1
it_without_improvements = 0
best_score = -float('inf')
while current_iteration <= iterations:
seeds = np.random.randint(np.iinfo(np.int32).max, size=len(ants))
ants = generatePaths( # Graph exploration
ants=ants, graph=graph, H=H, P=P, alpha=alpha, beta=beta, Q=Q, R=R, n_jobs=n_jobs,
exp_heuristic=False, seeds=seeds)
if metaheuristic is None or current_iteration % apply_meta_each != 0:
# Evaluate ant paths using the objectiveFunction function (it will be maximized)
ant_scores = evaluateAnts(ants=ants, objectiveFunction=objectiveFunction)
else:
ants, ant_scores = metaheuristic(ants)
# Update best score and save best solution
new_best = updateReportWithBest(
ants=ants, scores=ant_scores, best_score=best_score, report=report,
iteration=current_iteration)
if new_best > best_score:
best_score = new_best
it_without_improvements = 0
else:
it_without_improvements += 1
# Scale scores
if scaleScores is not None:
norm_scores = deepcopy(ant_scores) # Copy scores
norm_scores = scaleScores(norm_scores, best_score)
else:
norm_scores = ant_scores
# Update pheromones according to the scores
updatePheromones(
paths=np.array([ant.adj_matrix for ant in ants], dtype=np.int8), P=P,
ant_scores=norm_scores if scores_decay is None else norm_scores * scores_decay(
current_iteration),
rho=rho if evaporation_decay is None else rho * evaporation_decay(current_iteration),
**pheromone_update_kw)
# Compute convergence statistics
mean_scores = np.mean(ant_scores)
min_score = np.min(ant_scores)
max_score = np.max(ant_scores)
report.save(current_iteration, mean_cost=mean_scores, max_cost=max_score)
if save_pheromones: # Save pheromone values
report.save(current_iteration, pheromones=deepcopy(P))
# Compute monitoring computeMetrics
report.computeMetrics(current_iteration, P, graph)
if metaheuristic is None:
if not fixed and accessory_node is None: # Restart ants initial position
deleteInitialPosition(ants)
else:
# Reposition the ants
ants = aco_obj.ants
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# After several generations without improvements, do an early stopping of the algorithm.
if it_without_improvements > tol: break
if verbose:
sys.stdout.write('\rCost: Mean: %.4f (Min: %.4f Max: %.4f) (iteration %d)' %
(float(mean_scores), min_score, max_score, current_iteration))
current_iteration += 1
return report |
Python | def bagOfAnts(aco_obj: ACO, bag_size: int, out_of_bag_size: int = 0,
metaheuristic: MetaHeuristic = None, apply_meta_each: int = 1,
scores_decay: DecaySchedule = None, evaporation_decay: DecaySchedule = None,
report: Report = None, save_pheromones: bool = True, verbose: bool = True) -> Report:
"""
Function that executes an Ant Colony Optimization algorithm based on the parameters defined in
the ACO object. This version of the algorithm maintains an bag_size aka a bag of ants (of the size
specified in the execution parameters) that is preserved with the best ants. This bag will be
the one used to update the values of the pheromone matrix (therefore, note that if an elitist
strategy is used to update the values of the pheromone matrix, the size of the bag_size passed
as an argument to the function and of the bag_size used to update the values of the pheromone
matrix must be congruent).
Parameters
----------
aco_obj: antco.ACO
Initialised antco.ACO instance, for more information on this type of object use:
help(antco.ACO)
bag_size: int
Elite size maintained and used to update pheromone matrix values.
out_of_bag_size: int, default=0
Number of non-elite ants from the current iteration to include in the bag of ants.
metaheuristic: antco.hybrid.base.MetaHeuristic subclass, default=None
Instance following the interface defined in antco.extensions.MetaHeuristic. For more info
use help(antco.extensions.MetaHeuristic).
apply_meta_each: int, default=1
Parameter indicating how many generations the hybrid will be applied for the
refinement of the solutions given by the ants.
scores_decay: antco.optim.DecaySchedule subclass, default=None
Instance of a antco.optim.DecaySchedule representing a decay schedule that will applied to
the score values with which the pher200omone values are updated in each iteration.
For more info use help(antco.optim.DecaySchedule).
evaporation_decay: antco.optim.DecaySchedule subclass, default=None
Same as scores_decay, in this case used to update the evaporation rate value.
For more info use help(antco.optim.DecaySchedule).
report: antco.report.Report, default=None
antco.report.Report instance, by default a report will be initialised.
save_pheromones: bool, default=True
Value indicating whether to store in the report the values of the pheromone matrix at each
iteration. If True these may be retrieved by:
>>> report.get('pheromones')
verbose: bool, default=True
Indicates whether to show convergence information during the execution of the algorithm.
Returns
-------
:antco.report.Report
Returns a antco.report.Report instance with information about the convergence of the
algorithm. The convergence parameters can be accessed via:
>>> report = bagOfAnts(...)
>>> report.get('min_cost')
>>> report.get('mean_cost')
>>> report.get('max_cost')
For more info about Report instances use help(antco.report.Report).
"""
# Get parameters from aco_obj
seed = aco_obj.seed
graph = aco_obj.graph
iterations = aco_obj.iterations
n_jobs = aco_obj.n_jobs
ants = aco_obj.ants
H = aco_obj.heuristic
P = aco_obj.pheromones
alpha = aco_obj.alpha
beta = aco_obj.beta
if aco_obj.precompute_heuristic:
H = np.power(H, beta)
pheromone_update_kw = aco_obj.pheromone_update_kw
rho = pheromone_update_kw['rho']; del pheromone_update_kw['rho'] # Get evaporation parameter
updatePheromones = aco_obj.updatePheromones
objectiveFunction = aco_obj.objectiveFunction
tol = aco_obj.tol
fixed = aco_obj.fixed_positions
accessory_node = aco_obj.accessory_node
Q = aco_obj.Q
R = aco_obj.R
scaleScores = aco_obj.scaleScores
if report is None: # Initialize a Report instance (if not provided)
report = Report({'BranchingFactor': {'lambda_values': [0.5]}})
if seed is not None: # Set random state
random.seed(seed)
np.random.seed(seed)
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# Pre-compute the array to store the ant scores
ant_scores, norm_scores = np.empty(shape=len(ants)), np.empty(shape=bag_size+out_of_bag_size)
bag_of_ants = None
boa_scores = np.full(shape=bag_size+out_of_bag_size, fill_value=-999_999.999)
current_iteration = 1
it_without_improvements = 0
best_score = -float('inf')
while current_iteration <= iterations:
seeds = np.random.randint(np.iinfo(np.int32).max, size=len(ants))
ants = generatePaths( # Graph exploration
ants=ants, graph=graph, H=H, P=P, alpha=alpha, beta=beta, Q=Q, R=R,
n_jobs=n_jobs, exp_heuristic=False, seeds=seeds)
if metaheuristic is None or current_iteration % apply_meta_each != 0:
# Evaluate ant paths using the objectiveFunction function (it will be maximized)
ant_scores = evaluateAnts(ants=ants, objectiveFunction=objectiveFunction)
else:
ants, ant_scores = metaheuristic(ants)
assert len(ants) >= bag_size + out_of_bag_size, \
'The number of ants returned by the metaheuristic strategy is lower than the ' \
'number of ants that make up the bag of best ants. '
# HACK: Elite upgrade (the current implementation is inefficient)
if bag_of_ants is None:
ordered_scores = np.argsort(ant_scores)[::-1] # Ascending order
bag_of_ants = [ants[ordered_scores[i]] for i in range(bag_size + out_of_bag_size)]
boa_scores = np.array(ant_scores)[ordered_scores][:bag_size + out_of_bag_size]
boa_scores = np.array(boa_scores)
else:
all_scores = np.append(boa_scores[:bag_size], ant_scores)
all_ants = bag_of_ants[:bag_size] + ants
ordered_scores = np.argsort(all_scores)[::-1] # Ascending order
bag_of_ants = [all_ants[ordered_scores[i]] for i in range(bag_size + out_of_bag_size)]
boa_scores = np.array(all_scores)[ordered_scores][:bag_size + out_of_bag_size]
# Update best score and save best solution
new_best = updateReportWithBest(
ants=bag_of_ants, scores=boa_scores, best_score=best_score, report=report,
iteration=current_iteration)
if new_best > best_score:
best_score = new_best
it_without_improvements = 0
else:
it_without_improvements += 1
# Scale scores
if scaleScores is not None:
norm_scores[:] = boa_scores[:] # Copy scores
norm_scores = scaleScores(norm_scores, best_score)
else:
norm_scores = boa_scores
# Update pheromones according to the scores
updatePheromones(
paths=np.array([ant.adj_matrix for ant in bag_of_ants], dtype=np.int8), P=P,
ant_scores=norm_scores if scores_decay is None else norm_scores * scores_decay(current_iteration),
rho=rho if evaporation_decay is None else rho * evaporation_decay(current_iteration),
**pheromone_update_kw)
# Compute convergence statistics
mean_scores = np.mean(boa_scores)
min_score = boa_scores[bag_size + out_of_bag_size - 1]
max_score = boa_scores[0]
report.save(current_iteration, mean_cost=mean_scores, max_cost=max_score)
if save_pheromones: # Save pheromone values
report.save(current_iteration, pheromones=deepcopy(P))
# Compute monitoring computeMetrics
report.computeMetrics(current_iteration, P, graph)
if metaheuristic is None:
if not fixed and accessory_node is None: # Restart ants initial position
deleteInitialPosition(ants)
else:
# Reposition the ants
ants = aco_obj.ants
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# After several generations without improvements, do an early stopping of the algorithm.
if it_without_improvements > tol: break
if verbose:
sys.stdout.write('\rCost: Mean: %.4f (Min: %.4f Max: %.4f) (iteration %d)' %
(float(mean_scores), min_score, max_score, current_iteration))
current_iteration += 1
return report | def bagOfAnts(aco_obj: ACO, bag_size: int, out_of_bag_size: int = 0,
metaheuristic: MetaHeuristic = None, apply_meta_each: int = 1,
scores_decay: DecaySchedule = None, evaporation_decay: DecaySchedule = None,
report: Report = None, save_pheromones: bool = True, verbose: bool = True) -> Report:
"""
Function that executes an Ant Colony Optimization algorithm based on the parameters defined in
the ACO object. This version of the algorithm maintains an bag_size aka a bag of ants (of the size
specified in the execution parameters) that is preserved with the best ants. This bag will be
the one used to update the values of the pheromone matrix (therefore, note that if an elitist
strategy is used to update the values of the pheromone matrix, the size of the bag_size passed
as an argument to the function and of the bag_size used to update the values of the pheromone
matrix must be congruent).
Parameters
----------
aco_obj: antco.ACO
Initialised antco.ACO instance, for more information on this type of object use:
help(antco.ACO)
bag_size: int
Elite size maintained and used to update pheromone matrix values.
out_of_bag_size: int, default=0
Number of non-elite ants from the current iteration to include in the bag of ants.
metaheuristic: antco.hybrid.base.MetaHeuristic subclass, default=None
Instance following the interface defined in antco.extensions.MetaHeuristic. For more info
use help(antco.extensions.MetaHeuristic).
apply_meta_each: int, default=1
Parameter indicating how many generations the hybrid will be applied for the
refinement of the solutions given by the ants.
scores_decay: antco.optim.DecaySchedule subclass, default=None
Instance of a antco.optim.DecaySchedule representing a decay schedule that will applied to
the score values with which the pher200omone values are updated in each iteration.
For more info use help(antco.optim.DecaySchedule).
evaporation_decay: antco.optim.DecaySchedule subclass, default=None
Same as scores_decay, in this case used to update the evaporation rate value.
For more info use help(antco.optim.DecaySchedule).
report: antco.report.Report, default=None
antco.report.Report instance, by default a report will be initialised.
save_pheromones: bool, default=True
Value indicating whether to store in the report the values of the pheromone matrix at each
iteration. If True these may be retrieved by:
>>> report.get('pheromones')
verbose: bool, default=True
Indicates whether to show convergence information during the execution of the algorithm.
Returns
-------
:antco.report.Report
Returns a antco.report.Report instance with information about the convergence of the
algorithm. The convergence parameters can be accessed via:
>>> report = bagOfAnts(...)
>>> report.get('min_cost')
>>> report.get('mean_cost')
>>> report.get('max_cost')
For more info about Report instances use help(antco.report.Report).
"""
# Get parameters from aco_obj
seed = aco_obj.seed
graph = aco_obj.graph
iterations = aco_obj.iterations
n_jobs = aco_obj.n_jobs
ants = aco_obj.ants
H = aco_obj.heuristic
P = aco_obj.pheromones
alpha = aco_obj.alpha
beta = aco_obj.beta
if aco_obj.precompute_heuristic:
H = np.power(H, beta)
pheromone_update_kw = aco_obj.pheromone_update_kw
rho = pheromone_update_kw['rho']; del pheromone_update_kw['rho'] # Get evaporation parameter
updatePheromones = aco_obj.updatePheromones
objectiveFunction = aco_obj.objectiveFunction
tol = aco_obj.tol
fixed = aco_obj.fixed_positions
accessory_node = aco_obj.accessory_node
Q = aco_obj.Q
R = aco_obj.R
scaleScores = aco_obj.scaleScores
if report is None: # Initialize a Report instance (if not provided)
report = Report({'BranchingFactor': {'lambda_values': [0.5]}})
if seed is not None: # Set random state
random.seed(seed)
np.random.seed(seed)
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# Pre-compute the array to store the ant scores
ant_scores, norm_scores = np.empty(shape=len(ants)), np.empty(shape=bag_size+out_of_bag_size)
bag_of_ants = None
boa_scores = np.full(shape=bag_size+out_of_bag_size, fill_value=-999_999.999)
current_iteration = 1
it_without_improvements = 0
best_score = -float('inf')
while current_iteration <= iterations:
seeds = np.random.randint(np.iinfo(np.int32).max, size=len(ants))
ants = generatePaths( # Graph exploration
ants=ants, graph=graph, H=H, P=P, alpha=alpha, beta=beta, Q=Q, R=R,
n_jobs=n_jobs, exp_heuristic=False, seeds=seeds)
if metaheuristic is None or current_iteration % apply_meta_each != 0:
# Evaluate ant paths using the objectiveFunction function (it will be maximized)
ant_scores = evaluateAnts(ants=ants, objectiveFunction=objectiveFunction)
else:
ants, ant_scores = metaheuristic(ants)
assert len(ants) >= bag_size + out_of_bag_size, \
'The number of ants returned by the metaheuristic strategy is lower than the ' \
'number of ants that make up the bag of best ants. '
# HACK: Elite upgrade (the current implementation is inefficient)
if bag_of_ants is None:
ordered_scores = np.argsort(ant_scores)[::-1] # Ascending order
bag_of_ants = [ants[ordered_scores[i]] for i in range(bag_size + out_of_bag_size)]
boa_scores = np.array(ant_scores)[ordered_scores][:bag_size + out_of_bag_size]
boa_scores = np.array(boa_scores)
else:
all_scores = np.append(boa_scores[:bag_size], ant_scores)
all_ants = bag_of_ants[:bag_size] + ants
ordered_scores = np.argsort(all_scores)[::-1] # Ascending order
bag_of_ants = [all_ants[ordered_scores[i]] for i in range(bag_size + out_of_bag_size)]
boa_scores = np.array(all_scores)[ordered_scores][:bag_size + out_of_bag_size]
# Update best score and save best solution
new_best = updateReportWithBest(
ants=bag_of_ants, scores=boa_scores, best_score=best_score, report=report,
iteration=current_iteration)
if new_best > best_score:
best_score = new_best
it_without_improvements = 0
else:
it_without_improvements += 1
# Scale scores
if scaleScores is not None:
norm_scores[:] = boa_scores[:] # Copy scores
norm_scores = scaleScores(norm_scores, best_score)
else:
norm_scores = boa_scores
# Update pheromones according to the scores
updatePheromones(
paths=np.array([ant.adj_matrix for ant in bag_of_ants], dtype=np.int8), P=P,
ant_scores=norm_scores if scores_decay is None else norm_scores * scores_decay(current_iteration),
rho=rho if evaporation_decay is None else rho * evaporation_decay(current_iteration),
**pheromone_update_kw)
# Compute convergence statistics
mean_scores = np.mean(boa_scores)
min_score = boa_scores[bag_size + out_of_bag_size - 1]
max_score = boa_scores[0]
report.save(current_iteration, mean_cost=mean_scores, max_cost=max_score)
if save_pheromones: # Save pheromone values
report.save(current_iteration, pheromones=deepcopy(P))
# Compute monitoring computeMetrics
report.computeMetrics(current_iteration, P, graph)
if metaheuristic is None:
if not fixed and accessory_node is None: # Restart ants initial position
deleteInitialPosition(ants)
else:
# Reposition the ants
ants = aco_obj.ants
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# After several generations without improvements, do an early stopping of the algorithm.
if it_without_improvements > tol: break
if verbose:
sys.stdout.write('\rCost: Mean: %.4f (Min: %.4f Max: %.4f) (iteration %d)' %
(float(mean_scores), min_score, max_score, current_iteration))
current_iteration += 1
return report |
Python | def checkParameters(args: dict, optional_args: dict):
""" Function that checks the validity of the received arguments. """
def _checkType(name: str, value: object, required: type):
assert isinstance(value, required), 'Parameter: %s must be %s.' % (name, str(required))
def _intType(name: str, value: int, min_val: int = None, max_val: int = None):
_checkType(name, value, int)
if min_val is not None:
assert value > min_val, 'Parameter: %s must be grater than %d' % (name, min_val)
if max_val is not None:
assert value < max_val, 'Parameter: %s must be less than %d' % (name, max_val)
def _floatType(name: str, value: float, min_val: float = None, max_val: float = None):
_checkType(name, value, float)
if min_val is not None:
assert value > min_val, 'Parameter: %s must be grater than %.5f' % (name, min_val)
if max_val is not None:
assert value < max_val, 'Parameter: %s must be less than %.5f' % (name, max_val)
# Check types
_checkType('graph', args['graph'], np.ndarray)
_checkType('heuristic', args['heuristic'], np.ndarray)
_checkType('objectiveFunction', args['objectiveFunction'], ObjectiveFunction)
_intType('n_ants', args['n_ants'], min_val=0)
_intType('iterations', args['iterations'], min_val=0)
# Check graph and heuristic dimensions (n_nodes, n_nodes)
assert len(args['graph'].shape) == 2, \
'Parameter graph must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['graph'].shape)
assert len(args['heuristic'].shape) == 2, \
'Parameter heuristic must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['heuristic'].shape)
assert args['graph'].shape[0] == args['graph'].shape[1], \
'Parameter graph must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['graph'].shape)
assert args['heuristic'].shape[0] == args['heuristic'].shape[1], \
'Parameter heuristic must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['heuristic'].shape)
assert args['graph'].shape[0] == args['heuristic'].shape[0], \
'The dimensions of the graph and heuristic parameters must match. Provided shapes: ' \
'graph: %d; heuristic: %d' % (args['graph'].shape[0], args['heuristic'].shape[0])
# Check optional arguments
if optional_args.get('evaporation', None) is not None:
_floatType('evaporation', optional_args['evaporation'], min_val=0.0)
if optional_args.get('alpha', None) is not None:
_floatType('alpha', optional_args['alpha'], min_val=0.0)
if optional_args.get('beta', None) is not None:
_floatType('beta', optional_args['beta'], min_val=0.0)
if optional_args.get('fixed_position', None) is not None:
_checkType('fixed_position', optional_args['fixed_position'], bool)
if optional_args.get('Q', None) is not None:
_floatType('Q', optional_args['Q'], min_val=0.0, max_val=1.0)
if optional_args.get('R', None) is not None:
_floatType('R', optional_args['R'], min_val=0.0, max_val=1.0)
if optional_args.get('graph_type', None) is not None:
assert (optional_args['graph_type'] == 'd') or (optional_args['graph_type'] == 'u') or \
(optional_args['graph_type'] == 'directed') or (optional_args['graph_type'] == 'undirected'), \
'The accepted values for the graph_type parameter are: "directed" or "d" for directed ' \
'graphs and "undirected" or "u" for undirected graphs.'
if optional_args.get('path_limits', None) is not None:
assert isinstance(optional_args['path_limits'], list) or \
isinstance(optional_args['path_limits'], tuple), 'Parameter: path_limits must be a tuple or a list'
assert len(optional_args['path_limits']) == 2, \
'The limits of the length of the path travelled by the ants must be defined between two integers.'
if optional_args.get('pheromone_init', None) is not None:
_floatType('pheromone_init', optional_args['pheromone_init'])
if optional_args.get('tol', None) is not None:
_intType('tol', optional_args['tol'], min_val=5)
if optional_args.get('pheromone_update', None) is not None:
assert isinstance(optional_args['pheromone_update'], str) or \
isinstance(optional_args['pheromone_update'], dict), \
'The pheromone_update parameter must be provided as a string or a dictionary as ' \
'specified in the documentation.'
if isinstance(optional_args['pheromone_update'], str):
assert optional_args['pheromone_update'].lower() in PHEROMONE_UPDATE_STRATEGIES, \
'Pheromone update strategy %s not recognised. Available strategies: %r' % \
(optional_args['pheromone_update'], PHEROMONE_UPDATE_STRATEGIES)
else:
if optional_args['pheromone_update'].get('bag_size', None) is not None:
_intType('pheromone_update "bag_size"', optional_args['pheromone_update']['bag_size'],
min_val=1, max_val=args['n_ants'])
assert 'strategy' in optional_args['pheromone_update'], \
'Hormone update strategy not defined using the "strategy" key. Available strategies: %r' % \
PHEROMONE_UPDATE_STRATEGIES
assert optional_args['pheromone_update']['strategy'].lower() in PHEROMONE_UPDATE_STRATEGIES, \
'Pheromone update strategy %s not recognised. Available strategies: %r' % \
(optional_args['pheromone_update']['strategy'], PHEROMONE_UPDATE_STRATEGIES)
if optional_args.get('scaleScores', None) is not None:
_checkType('scaleScores', optional_args['scaleScores'], ScoreScaler)
if optional_args.get('seed', None) is not None:
_intType('seed', optional_args['seed'], min_val=0) | def checkParameters(args: dict, optional_args: dict):
""" Function that checks the validity of the received arguments. """
def _checkType(name: str, value: object, required: type):
assert isinstance(value, required), 'Parameter: %s must be %s.' % (name, str(required))
def _intType(name: str, value: int, min_val: int = None, max_val: int = None):
_checkType(name, value, int)
if min_val is not None:
assert value > min_val, 'Parameter: %s must be grater than %d' % (name, min_val)
if max_val is not None:
assert value < max_val, 'Parameter: %s must be less than %d' % (name, max_val)
def _floatType(name: str, value: float, min_val: float = None, max_val: float = None):
_checkType(name, value, float)
if min_val is not None:
assert value > min_val, 'Parameter: %s must be grater than %.5f' % (name, min_val)
if max_val is not None:
assert value < max_val, 'Parameter: %s must be less than %.5f' % (name, max_val)
# Check types
_checkType('graph', args['graph'], np.ndarray)
_checkType('heuristic', args['heuristic'], np.ndarray)
_checkType('objectiveFunction', args['objectiveFunction'], ObjectiveFunction)
_intType('n_ants', args['n_ants'], min_val=0)
_intType('iterations', args['iterations'], min_val=0)
# Check graph and heuristic dimensions (n_nodes, n_nodes)
assert len(args['graph'].shape) == 2, \
'Parameter graph must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['graph'].shape)
assert len(args['heuristic'].shape) == 2, \
'Parameter heuristic must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['heuristic'].shape)
assert args['graph'].shape[0] == args['graph'].shape[1], \
'Parameter graph must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['graph'].shape)
assert args['heuristic'].shape[0] == args['heuristic'].shape[1], \
'Parameter heuristic must be a square matrix (n_nodes, n_nodes). Provided shape: {}'.\
format(args['heuristic'].shape)
assert args['graph'].shape[0] == args['heuristic'].shape[0], \
'The dimensions of the graph and heuristic parameters must match. Provided shapes: ' \
'graph: %d; heuristic: %d' % (args['graph'].shape[0], args['heuristic'].shape[0])
# Check optional arguments
if optional_args.get('evaporation', None) is not None:
_floatType('evaporation', optional_args['evaporation'], min_val=0.0)
if optional_args.get('alpha', None) is not None:
_floatType('alpha', optional_args['alpha'], min_val=0.0)
if optional_args.get('beta', None) is not None:
_floatType('beta', optional_args['beta'], min_val=0.0)
if optional_args.get('fixed_position', None) is not None:
_checkType('fixed_position', optional_args['fixed_position'], bool)
if optional_args.get('Q', None) is not None:
_floatType('Q', optional_args['Q'], min_val=0.0, max_val=1.0)
if optional_args.get('R', None) is not None:
_floatType('R', optional_args['R'], min_val=0.0, max_val=1.0)
if optional_args.get('graph_type', None) is not None:
assert (optional_args['graph_type'] == 'd') or (optional_args['graph_type'] == 'u') or \
(optional_args['graph_type'] == 'directed') or (optional_args['graph_type'] == 'undirected'), \
'The accepted values for the graph_type parameter are: "directed" or "d" for directed ' \
'graphs and "undirected" or "u" for undirected graphs.'
if optional_args.get('path_limits', None) is not None:
assert isinstance(optional_args['path_limits'], list) or \
isinstance(optional_args['path_limits'], tuple), 'Parameter: path_limits must be a tuple or a list'
assert len(optional_args['path_limits']) == 2, \
'The limits of the length of the path travelled by the ants must be defined between two integers.'
if optional_args.get('pheromone_init', None) is not None:
_floatType('pheromone_init', optional_args['pheromone_init'])
if optional_args.get('tol', None) is not None:
_intType('tol', optional_args['tol'], min_val=5)
if optional_args.get('pheromone_update', None) is not None:
assert isinstance(optional_args['pheromone_update'], str) or \
isinstance(optional_args['pheromone_update'], dict), \
'The pheromone_update parameter must be provided as a string or a dictionary as ' \
'specified in the documentation.'
if isinstance(optional_args['pheromone_update'], str):
assert optional_args['pheromone_update'].lower() in PHEROMONE_UPDATE_STRATEGIES, \
'Pheromone update strategy %s not recognised. Available strategies: %r' % \
(optional_args['pheromone_update'], PHEROMONE_UPDATE_STRATEGIES)
else:
if optional_args['pheromone_update'].get('bag_size', None) is not None:
_intType('pheromone_update "bag_size"', optional_args['pheromone_update']['bag_size'],
min_val=1, max_val=args['n_ants'])
assert 'strategy' in optional_args['pheromone_update'], \
'Hormone update strategy not defined using the "strategy" key. Available strategies: %r' % \
PHEROMONE_UPDATE_STRATEGIES
assert optional_args['pheromone_update']['strategy'].lower() in PHEROMONE_UPDATE_STRATEGIES, \
'Pheromone update strategy %s not recognised. Available strategies: %r' % \
(optional_args['pheromone_update']['strategy'], PHEROMONE_UPDATE_STRATEGIES)
if optional_args.get('scaleScores', None) is not None:
_checkType('scaleScores', optional_args['scaleScores'], ScoreScaler)
if optional_args.get('seed', None) is not None:
_intType('seed', optional_args['seed'], min_val=0) |
Python | def _selectPheromoneUpdate(kwargs: dict):
""" Method that selects the pheromone update strategy provided by the user. """
unrecognised_strategy = \
'Pheromone update strategy not recognized, please check the parameter ' \
'pheromone_update. Available strategies: %r' % PHEROMONE_UPDATE_STRATEGIES
# Default pheromone update strategy
if kwargs.get('pheromone_update', None) is None:
update_strategy_ = updateAS('D')
elif isinstance(kwargs['pheromone_update'], str):
if kwargs['pheromone_update'].lower() == 'as':
update_strategy_ = updateAS('D')
elif kwargs['pheromone_update'].lower() == 'mmas':
update_strategy_ = updateMMAS('D')
elif kwargs['pheromone_update'].lower() == 'acs':
update_strategy_ = updateACS('D')
else:
assert False, unrecognised_strategy
elif isinstance(kwargs['pheromone_update'], dict):
if 'strategy' not in kwargs['pheromone_update']:
assert False, 'The key "strategy" in charge of specifying the update strategy is ' \
'not isPresent in the pheromone_update parameter.'
if kwargs['pheromone_update']['strategy'].lower() == 'as':
update_strategy_ = updateAS(kwargs.get('graph_type', 'D'),
kwargs['pheromone_update'].get('elite', False))
elif kwargs['pheromone_update']['strategy'].lower() == 'mmas':
update_strategy_ = updateMMAS(kwargs.get('graph_type', 'D'),
kwargs['pheromone_update'].get('elite', False))
elif kwargs['pheromone_update']['strategy'].lower() == 'acs':
update_strategy_ = updateACS(kwargs.get('graph_type', 'D'))
else:
assert False, unrecognised_strategy
else:
assert False, 'Unrecognised error in colony.ACO._selectPheromoneUpdate()'
return update_strategy_ | def _selectPheromoneUpdate(kwargs: dict):
""" Method that selects the pheromone update strategy provided by the user. """
unrecognised_strategy = \
'Pheromone update strategy not recognized, please check the parameter ' \
'pheromone_update. Available strategies: %r' % PHEROMONE_UPDATE_STRATEGIES
# Default pheromone update strategy
if kwargs.get('pheromone_update', None) is None:
update_strategy_ = updateAS('D')
elif isinstance(kwargs['pheromone_update'], str):
if kwargs['pheromone_update'].lower() == 'as':
update_strategy_ = updateAS('D')
elif kwargs['pheromone_update'].lower() == 'mmas':
update_strategy_ = updateMMAS('D')
elif kwargs['pheromone_update'].lower() == 'acs':
update_strategy_ = updateACS('D')
else:
assert False, unrecognised_strategy
elif isinstance(kwargs['pheromone_update'], dict):
if 'strategy' not in kwargs['pheromone_update']:
assert False, 'The key "strategy" in charge of specifying the update strategy is ' \
'not isPresent in the pheromone_update parameter.'
if kwargs['pheromone_update']['strategy'].lower() == 'as':
update_strategy_ = updateAS(kwargs.get('graph_type', 'D'),
kwargs['pheromone_update'].get('elite', False))
elif kwargs['pheromone_update']['strategy'].lower() == 'mmas':
update_strategy_ = updateMMAS(kwargs.get('graph_type', 'D'),
kwargs['pheromone_update'].get('elite', False))
elif kwargs['pheromone_update']['strategy'].lower() == 'acs':
update_strategy_ = updateACS(kwargs.get('graph_type', 'D'))
else:
assert False, unrecognised_strategy
else:
assert False, 'Unrecognised error in colony.ACO._selectPheromoneUpdate()'
return update_strategy_ |
Python | def _selectPheromoneUpdateKW(kwargs: dict):
""" Method that selects the optional arguments associated with the pheromone update and
returns them as a dictionary. """
if kwargs.get('pheromone_update', None) is None:
return {'weight': 1.0}
if isinstance(kwargs.get('pheromone_update', None), str):
if kwargs['pheromone_update'].lower() == 'as':
return {'weight': 1.0}
elif kwargs['pheromone_update'].lower() == 'mmas':
return {'limits': (0, 1), 'weight': 1.0}
elif kwargs['pheromone_update'].lower() == 'acs':
return {'decay': 0.1, 'weight': 1.0}
elif isinstance(kwargs.get('pheromone_update', None), dict):
if kwargs['pheromone_update']['strategy'].lower() == 'as':
if kwargs['pheromone_update'].get('elite', None) is not None:
return {'elite': kwargs['pheromone_update']['elite'],
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
return {'weight': 1.0}
elif kwargs['pheromone_update']['strategy'].lower() == 'mmas':
if kwargs['pheromone_update'].get('elite', None) is not None:
return {'limits': tuple(kwargs['pheromone_update'].get('limits', (0, 1))),
'elite': kwargs['pheromone_update']['elite'],
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
return {'limits': tuple(kwargs['pheromone_update'].get('limits', (0, 1))),
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
elif kwargs['pheromone_update']['strategy'].lower() == 'acs':
return {'decay': kwargs['pheromone_update'].get('decay', 0.1),
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
else:
assert False, 'Unrecognised pheromone_update in antco.colony.ACO._selectPheromoneUpdateKW()' | def _selectPheromoneUpdateKW(kwargs: dict):
""" Method that selects the optional arguments associated with the pheromone update and
returns them as a dictionary. """
if kwargs.get('pheromone_update', None) is None:
return {'weight': 1.0}
if isinstance(kwargs.get('pheromone_update', None), str):
if kwargs['pheromone_update'].lower() == 'as':
return {'weight': 1.0}
elif kwargs['pheromone_update'].lower() == 'mmas':
return {'limits': (0, 1), 'weight': 1.0}
elif kwargs['pheromone_update'].lower() == 'acs':
return {'decay': 0.1, 'weight': 1.0}
elif isinstance(kwargs.get('pheromone_update', None), dict):
if kwargs['pheromone_update']['strategy'].lower() == 'as':
if kwargs['pheromone_update'].get('elite', None) is not None:
return {'elite': kwargs['pheromone_update']['elite'],
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
return {'weight': 1.0}
elif kwargs['pheromone_update']['strategy'].lower() == 'mmas':
if kwargs['pheromone_update'].get('elite', None) is not None:
return {'limits': tuple(kwargs['pheromone_update'].get('limits', (0, 1))),
'elite': kwargs['pheromone_update']['elite'],
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
return {'limits': tuple(kwargs['pheromone_update'].get('limits', (0, 1))),
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
elif kwargs['pheromone_update']['strategy'].lower() == 'acs':
return {'decay': kwargs['pheromone_update'].get('decay', 0.1),
'weight': kwargs['pheromone_update'].get('weight', 1.0)}
else:
assert False, 'Unrecognised pheromone_update in antco.colony.ACO._selectPheromoneUpdateKW()' |
Python | def graph(self, input_val: tuple):
""" Select a new graph matrix, to prevent accidental changes this must be received as a
tuple where the first element will be the new graph matrix and the second a boolean true
value. """
assert input_val[1], 'Incorrect graph assignation protocol.'
self._graph = input_val[0] | def graph(self, input_val: tuple):
""" Select a new graph matrix, to prevent accidental changes this must be received as a
tuple where the first element will be the new graph matrix and the second a boolean true
value. """
assert input_val[1], 'Incorrect graph assignation protocol.'
self._graph = input_val[0] |
Python | def heuristic(self, input_val: tuple):
""" Select a new heuristic matrix, to prevent accidental changes this must be received as a
tuple where the first element will be the new heuristic matrix and the second a boolean
true value. """
assert input_val[1], 'Incorrect heuristic assignation protocol.'
self._H = input_val[0] | def heuristic(self, input_val: tuple):
""" Select a new heuristic matrix, to prevent accidental changes this must be received as a
tuple where the first element will be the new heuristic matrix and the second a boolean
true value. """
assert input_val[1], 'Incorrect heuristic assignation protocol.'
self._H = input_val[0] |
Python | def pheromones(self, input_val: tuple):
""" Select a new pheromone matrix, to prevent accidental changes this must be received as
a tuple where the first element will be the new pheromone matrix and the second a boolean
true value. """
assert input_val[1], 'Incorrect pheromone assignation protocol.'
self._P = input_val[0] | def pheromones(self, input_val: tuple):
""" Select a new pheromone matrix, to prevent accidental changes this must be received as
a tuple where the first element will be the new pheromone matrix and the second a boolean
true value. """
assert input_val[1], 'Incorrect pheromone assignation protocol.'
self._P = input_val[0] |
Python | def step(ant: Ant, adjacency_matrix: np.ndarray, heuristic: np.ndarray, pheromone: np.ndarray,
alpha: float, beta: float, seed: int, exp_heuristic: bool = True, Q: float = None,
R: float = None) -> Ant:
"""
Basic step function which ensures that an ant makes a path around the graph. The steps carried
out by this function include:
1. Initial positioning of the ant in the graph to be explored.
2. Calling the getRandomWalk function so that the ant traverses the network.
3. Processing of the path returned by the function.
Parameters
----------
ant: Ant
Ant. Important note: for parallelization reasons this function will not modify the internal
state of the received ant, it will returns a new ant.
adjacency_matrix: np.ndarray (nodes, nodes), dtype=np.int8
Binary adjacency matrix that defines the structure of the graph to be covered.
heuristic: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information matrix used by stochastic politics to select the nodes towards which
the ant will move.
pheromone: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information matrix used by stochastic politics to select the edges towards which
the ant will move. This matrix will be optimised over successive iterations of the algorithm.
alpha: float
Parameter that reference the influence of pheromones when the ant makes a decision on the
path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information.
seed: int
Random seed.
exp_heuristic: bool, default=False
Boolean value indicating whether to exponentiate the heuristic matrix to beta or not. By
default it will not be exponentiated. It will be assumed that it has been previously
exponentiated. Set this parameter to True for problems where the values of the heuristic
matrix are changing throughout the interactions of the algorithm.
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
R: float, default=None
Parameter that determines the probability of selecting the next move randomly without
taking into account the computation of the pheromone matrix and heuristics. By default
this parameter will not be considered.
Returns
-------
:Ant
Returns the ant that has travelled along the network.
"""
if exp_heuristic:
heuristic = np.power(heuristic, beta)
random.seed(seed)
np.random.seed(seed)
new_ant = Ant(l_min=ant.min_length, l_max=ant.max_length, graph_type=ant.representation,
check_params=False)
new_ant.initAdjMatrix(n_nodes=adjacency_matrix.shape[0])
if ant.initial_position is None: # Random initial position
initial_position = randomInit(adjacency_matrix)
else:
initial_position = ant.initial_position
new_ant.setInitialPosition(initial_position)
ant_path = getRandomWalk(
new_ant.initial_position, new_ant.visited_nodes, adjacency_matrix, heuristic, pheromone,
alpha, new_ant.max_length, Q, R)
new_ant.visited_nodes = ant_path
return new_ant | def step(ant: Ant, adjacency_matrix: np.ndarray, heuristic: np.ndarray, pheromone: np.ndarray,
alpha: float, beta: float, seed: int, exp_heuristic: bool = True, Q: float = None,
R: float = None) -> Ant:
"""
Basic step function which ensures that an ant makes a path around the graph. The steps carried
out by this function include:
1. Initial positioning of the ant in the graph to be explored.
2. Calling the getRandomWalk function so that the ant traverses the network.
3. Processing of the path returned by the function.
Parameters
----------
ant: Ant
Ant. Important note: for parallelization reasons this function will not modify the internal
state of the received ant, it will returns a new ant.
adjacency_matrix: np.ndarray (nodes, nodes), dtype=np.int8
Binary adjacency matrix that defines the structure of the graph to be covered.
heuristic: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information matrix used by stochastic politics to select the nodes towards which
the ant will move.
pheromone: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information matrix used by stochastic politics to select the edges towards which
the ant will move. This matrix will be optimised over successive iterations of the algorithm.
alpha: float
Parameter that reference the influence of pheromones when the ant makes a decision on the
path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information.
seed: int
Random seed.
exp_heuristic: bool, default=False
Boolean value indicating whether to exponentiate the heuristic matrix to beta or not. By
default it will not be exponentiated. It will be assumed that it has been previously
exponentiated. Set this parameter to True for problems where the values of the heuristic
matrix are changing throughout the interactions of the algorithm.
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
R: float, default=None
Parameter that determines the probability of selecting the next move randomly without
taking into account the computation of the pheromone matrix and heuristics. By default
this parameter will not be considered.
Returns
-------
:Ant
Returns the ant that has travelled along the network.
"""
if exp_heuristic:
heuristic = np.power(heuristic, beta)
random.seed(seed)
np.random.seed(seed)
new_ant = Ant(l_min=ant.min_length, l_max=ant.max_length, graph_type=ant.representation,
check_params=False)
new_ant.initAdjMatrix(n_nodes=adjacency_matrix.shape[0])
if ant.initial_position is None: # Random initial position
initial_position = randomInit(adjacency_matrix)
else:
initial_position = ant.initial_position
new_ant.setInitialPosition(initial_position)
ant_path = getRandomWalk(
new_ant.initial_position, new_ant.visited_nodes, adjacency_matrix, heuristic, pheromone,
alpha, new_ant.max_length, Q, R)
new_ant.visited_nodes = ant_path
return new_ant |
Python | def generatePaths(ants: list, graph: np.ndarray, H: np.ndarray, P: np.ndarray, alpha: float,
beta: float, Q: float, R: float, n_jobs: int, seeds: np.ndarray,
exp_heuristic: bool = True) -> list:
"""
Function that performs the exploration of the network according to the values of the heuristic
and pheromone matrix.
Parameters
----------
ants: list
List of ant instances.
graph: np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored.
H: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information.
P: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information.
alpha: float
The alpha parameter reference the influence of pheromones when the ant makes a decision
on the path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information received in H.
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
R: float, default=None
Parameter that determines the probability of selecting the next move randomly without
taking into account the computation of the pheromone matrix and heuristics. By default
this parameter will not be considered.
n_jobs: int
Number of processes to run in parallel.
seeds: np.ndarray
Random seeds.
exp_heuristic: bool, default=True
Parameter indicating whether to exponentiate the heuristic matrix to the beta value. By
default it will not be assumed that the exponentiation has been precomputed.
Returns
-------
:list
List of ant instances that have traversed the graph to be explored.
"""
if n_jobs == 1:
ants = [
step(ant, graph, H, P, alpha, beta, exp_heuristic=exp_heuristic, Q=Q, R=R, seed=seed)
for ant, seed in zip(ants, seeds)]
else:
ants = joblib.Parallel(n_jobs=n_jobs, backend='loky')(
joblib.delayed(step)(
ant, graph, H, P, alpha, beta, exp_heuristic=exp_heuristic, Q=Q, R=R, seed=seed)
for ant, seed in zip(ants, seeds))
return ants | def generatePaths(ants: list, graph: np.ndarray, H: np.ndarray, P: np.ndarray, alpha: float,
beta: float, Q: float, R: float, n_jobs: int, seeds: np.ndarray,
exp_heuristic: bool = True) -> list:
"""
Function that performs the exploration of the network according to the values of the heuristic
and pheromone matrix.
Parameters
----------
ants: list
List of ant instances.
graph: np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored.
H: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information.
P: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information.
alpha: float
The alpha parameter reference the influence of pheromones when the ant makes a decision
on the path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information received in H.
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
R: float, default=None
Parameter that determines the probability of selecting the next move randomly without
taking into account the computation of the pheromone matrix and heuristics. By default
this parameter will not be considered.
n_jobs: int
Number of processes to run in parallel.
seeds: np.ndarray
Random seeds.
exp_heuristic: bool, default=True
Parameter indicating whether to exponentiate the heuristic matrix to the beta value. By
default it will not be assumed that the exponentiation has been precomputed.
Returns
-------
:list
List of ant instances that have traversed the graph to be explored.
"""
if n_jobs == 1:
ants = [
step(ant, graph, H, P, alpha, beta, exp_heuristic=exp_heuristic, Q=Q, R=R, seed=seed)
for ant, seed in zip(ants, seeds)]
else:
ants = joblib.Parallel(n_jobs=n_jobs, backend='loky')(
joblib.delayed(step)(
ant, graph, H, P, alpha, beta, exp_heuristic=exp_heuristic, Q=Q, R=R, seed=seed)
for ant, seed in zip(ants, seeds))
return ants |
Python | def generatePathsACS(ants: list, graph: np.ndarray, H: np.ndarray, P: np.ndarray, alpha: float,
beta: float, decay: float, pher_init: float, Q: float,
exp_heuristic: bool = True) -> list:
"""
Function that performs the exploration of the graph using the Ant Colony System strategy
proposed in
Dorigo, M., & Gambardella, L. M. (1997). Ant colony system: a cooperative learning
approach to the traveling salesman problem. IEEE Transactions on evolutionary computation,
1(1), 53-66.
Parameters
----------
ants: list
List of ant instances.
graph: np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored.
H: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information.
P: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information.
alpha: float
The alpha parameter reference the influence of pheromones when the ant makes a decision
on the path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information received in H.
decay: float
Decay to be applied during the local update of the pheromone matrix values after an ant
has made the tour. This parameter is used in the local pheromone update given by equation
P[i,j] = (1 - decay) * P[i,j] + decay * pher_init
pher_init: float
Parameter involved in the local update of the pheromone matrix values according to the
equation
P[i,j] = (1 - decay) * P[i,j] + decay * pher_init
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
exp_heuristic: bool, default=True
Parameter indicating whether to exponentiate the heuristic matrix to the beta value. By
default it will not be assumed that the exponentiation has been precomputed.
Returns
-------
:list
List of ant instances that have traversed the graph to be explored.
"""
if exp_heuristic:
H_beta = np.power(H, beta)
else:
H_beta = H
new_ants = []
for ant in ants:
new_ant = Ant(l_min=ant.min_length, l_max=ant.max_length, graph_type=ant.representation,
check_params=False)
new_ant.initAdjMatrix(n_nodes=graph.shape[0])
init_pos = randomInit(graph) if new_ant.initial_position is None else new_ant.initial_position
new_ant.setInitialPosition(init_pos)
# Generate random walk
new_ant.visited_nodes = getRandomWalk(
initial_position=new_ant.initial_position, current_path=new_ant.visited_nodes,
adjacency_matrix=graph, heuristic=H_beta, pheromone=P, alpha=alpha,
max_lim=new_ant.max_length, Q=Q, R=None)
# Local pheromone update
if new_ant.representation == 'u':
updateUndLocalPher(ant=new_ant, P=P, decay=decay, init_val=pher_init)
else:
updateDirLocalPher(ant=new_ant, P=P, decay=decay, init_val=pher_init)
new_ants.append(new_ant)
return new_ants | def generatePathsACS(ants: list, graph: np.ndarray, H: np.ndarray, P: np.ndarray, alpha: float,
beta: float, decay: float, pher_init: float, Q: float,
exp_heuristic: bool = True) -> list:
"""
Function that performs the exploration of the graph using the Ant Colony System strategy
proposed in
Dorigo, M., & Gambardella, L. M. (1997). Ant colony system: a cooperative learning
approach to the traveling salesman problem. IEEE Transactions on evolutionary computation,
1(1), 53-66.
Parameters
----------
ants: list
List of ant instances.
graph: np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored.
H: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information.
P: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information.
alpha: float
The alpha parameter reference the influence of pheromones when the ant makes a decision
on the path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information received in H.
decay: float
Decay to be applied during the local update of the pheromone matrix values after an ant
has made the tour. This parameter is used in the local pheromone update given by equation
P[i,j] = (1 - decay) * P[i,j] + decay * pher_init
pher_init: float
Parameter involved in the local update of the pheromone matrix values according to the
equation
P[i,j] = (1 - decay) * P[i,j] + decay * pher_init
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
exp_heuristic: bool, default=True
Parameter indicating whether to exponentiate the heuristic matrix to the beta value. By
default it will not be assumed that the exponentiation has been precomputed.
Returns
-------
:list
List of ant instances that have traversed the graph to be explored.
"""
if exp_heuristic:
H_beta = np.power(H, beta)
else:
H_beta = H
new_ants = []
for ant in ants:
new_ant = Ant(l_min=ant.min_length, l_max=ant.max_length, graph_type=ant.representation,
check_params=False)
new_ant.initAdjMatrix(n_nodes=graph.shape[0])
init_pos = randomInit(graph) if new_ant.initial_position is None else new_ant.initial_position
new_ant.setInitialPosition(init_pos)
# Generate random walk
new_ant.visited_nodes = getRandomWalk(
initial_position=new_ant.initial_position, current_path=new_ant.visited_nodes,
adjacency_matrix=graph, heuristic=H_beta, pheromone=P, alpha=alpha,
max_lim=new_ant.max_length, Q=Q, R=None)
# Local pheromone update
if new_ant.representation == 'u':
updateUndLocalPher(ant=new_ant, P=P, decay=decay, init_val=pher_init)
else:
updateDirLocalPher(ant=new_ant, P=P, decay=decay, init_val=pher_init)
new_ants.append(new_ant)
return new_ants |
Python | def evaluateAnts(ants: list, objectiveFunction: ObjectiveFunction) -> np.ndarray:
"""
Function that performs the evaluation of the paths traversed by the ants using the defined cost
function.
Parameters
----------
ants: list
List of ant instances.
objectiveFunction: antco.optim.ObjectiveFunction
Subclass of antco.optim.ObjectiveFunction defined by the user. This function will be
maximized.
Returns
-------
:np.ndarray (len(ants)), dtype=np.float64
Scores associated with the ants.
"""
ant_scores = np.array([
objectiveFunction((ant, idx)) for idx, ant in enumerate(ants)], dtype=np.float64)[:, 0]
return ant_scores | def evaluateAnts(ants: list, objectiveFunction: ObjectiveFunction) -> np.ndarray:
"""
Function that performs the evaluation of the paths traversed by the ants using the defined cost
function.
Parameters
----------
ants: list
List of ant instances.
objectiveFunction: antco.optim.ObjectiveFunction
Subclass of antco.optim.ObjectiveFunction defined by the user. This function will be
maximized.
Returns
-------
:np.ndarray (len(ants)), dtype=np.float64
Scores associated with the ants.
"""
ant_scores = np.array([
objectiveFunction((ant, idx)) for idx, ant in enumerate(ants)], dtype=np.float64)[:, 0]
return ant_scores |
Python | def apply(aco_obj: ACO, **kwargs):
"""
Function that applies the specified transformations (passed as a string) to the ACO instance.
Parameters
----------
aco_obj: antco.ACO
Ant Colony Optimization (aka ACO) instance on which the transformations are to be applied.
**kwargs
Transformations to be applied to the ACO instance. Only the indicated transformations will
be applied.
scale_heuristic: dict
Scales the heuristic matrix to a range of values defined by [min_val, max_val]
>>> apply(aco_obj, scale_heuristic={'min_val': 1.0, 'max_val': 2.0})
accessory_node: bool
Indicates whether to add an accessory node densely connected to all nodes in the
network. This way all the ants will start from this accessory node, optimising the
initial positioning in the network.
>>> apply(aco_obj, accessory_node=True)
"""
if 'scale_heuristic' in kwargs:
_scaleHeuristic(aco_obj, **kwargs['scale_heuristic'])
if 'accessory_node' in kwargs and kwargs['accessory_node'] is True:
_addAccessoryNode(aco_obj) | def apply(aco_obj: ACO, **kwargs):
"""
Function that applies the specified transformations (passed as a string) to the ACO instance.
Parameters
----------
aco_obj: antco.ACO
Ant Colony Optimization (aka ACO) instance on which the transformations are to be applied.
**kwargs
Transformations to be applied to the ACO instance. Only the indicated transformations will
be applied.
scale_heuristic: dict
Scales the heuristic matrix to a range of values defined by [min_val, max_val]
>>> apply(aco_obj, scale_heuristic={'min_val': 1.0, 'max_val': 2.0})
accessory_node: bool
Indicates whether to add an accessory node densely connected to all nodes in the
network. This way all the ants will start from this accessory node, optimising the
initial positioning in the network.
>>> apply(aco_obj, accessory_node=True)
"""
if 'scale_heuristic' in kwargs:
_scaleHeuristic(aco_obj, **kwargs['scale_heuristic'])
if 'accessory_node' in kwargs and kwargs['accessory_node'] is True:
_addAccessoryNode(aco_obj) |
Python | def _addAccessoryNode(aco_obj: ACO):
""" Method that adds an accessory node to the heuristics, pheromone and connectivity graph
matrices. """
def _add_node(matrix: np.ndarray):
""" Adds an extra densely connected node to all nodes of the matrix in the last position. """
new_shape = matrix.shape[0] + 1, matrix.shape[1] + 1
new_matrix = np.empty(new_shape, dtype=matrix.dtype)
# Copy matrix values
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
new_matrix[i, j] = matrix[i, j]
# Add node using the maximum value
new_matrix[:, -1] = np.max(matrix)
new_matrix[-1, :] = np.max(matrix)
return new_matrix
graph = _add_node(aco_obj.graph)
heuristic = _add_node(aco_obj.heuristic)
pheromones = _add_node(aco_obj.pheromones)
assert graph.shape[0] == graph.shape[1] == heuristic.shape[0] == heuristic.shape[1] == pheromones.shape[0] == \
pheromones.shape[1], 'Incorrect shapes adding accessory node.'
# Accessory node will be in the last position
aco_obj.accessory_node = graph.shape[0] - 1
aco_obj.graph = (graph, True)
aco_obj.heuristic = (heuristic, True)
aco_obj.pheromones = (pheromones, True)
aco_obj.objectiveFunction.accessory_node = True | def _addAccessoryNode(aco_obj: ACO):
""" Method that adds an accessory node to the heuristics, pheromone and connectivity graph
matrices. """
def _add_node(matrix: np.ndarray):
""" Adds an extra densely connected node to all nodes of the matrix in the last position. """
new_shape = matrix.shape[0] + 1, matrix.shape[1] + 1
new_matrix = np.empty(new_shape, dtype=matrix.dtype)
# Copy matrix values
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
new_matrix[i, j] = matrix[i, j]
# Add node using the maximum value
new_matrix[:, -1] = np.max(matrix)
new_matrix[-1, :] = np.max(matrix)
return new_matrix
graph = _add_node(aco_obj.graph)
heuristic = _add_node(aco_obj.heuristic)
pheromones = _add_node(aco_obj.pheromones)
assert graph.shape[0] == graph.shape[1] == heuristic.shape[0] == heuristic.shape[1] == pheromones.shape[0] == \
pheromones.shape[1], 'Incorrect shapes adding accessory node.'
# Accessory node will be in the last position
aco_obj.accessory_node = graph.shape[0] - 1
aco_obj.graph = (graph, True)
aco_obj.heuristic = (heuristic, True)
aco_obj.pheromones = (pheromones, True)
aco_obj.objectiveFunction.accessory_node = True |
Python | def _add_node(matrix: np.ndarray):
""" Adds an extra densely connected node to all nodes of the matrix in the last position. """
new_shape = matrix.shape[0] + 1, matrix.shape[1] + 1
new_matrix = np.empty(new_shape, dtype=matrix.dtype)
# Copy matrix values
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
new_matrix[i, j] = matrix[i, j]
# Add node using the maximum value
new_matrix[:, -1] = np.max(matrix)
new_matrix[-1, :] = np.max(matrix)
return new_matrix | def _add_node(matrix: np.ndarray):
""" Adds an extra densely connected node to all nodes of the matrix in the last position. """
new_shape = matrix.shape[0] + 1, matrix.shape[1] + 1
new_matrix = np.empty(new_shape, dtype=matrix.dtype)
# Copy matrix values
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
new_matrix[i, j] = matrix[i, j]
# Add node using the maximum value
new_matrix[:, -1] = np.max(matrix)
new_matrix[-1, :] = np.max(matrix)
return new_matrix |
Python | def decay(self, iteration: int) -> float:
"""
Method that must return a numeric value that will act as a decay on the scores or
evaporation parameter. In this way the scores or evaporation will be multiplied by the
value received. This allow scores or evaporation to be smoothed as a function of the
current iteration. Depending on which iteration the algorithm is in the pheromones
will be updated in a more abrupt or more smooth way depending on the value returned.
Parameters
----------
iteration: int
Algorithm iteration.
Returns
-------
:float
Decay value
"""
raise NotImplementedError | def decay(self, iteration: int) -> float:
"""
Method that must return a numeric value that will act as a decay on the scores or
evaporation parameter. In this way the scores or evaporation will be multiplied by the
value received. This allow scores or evaporation to be smoothed as a function of the
current iteration. Depending on which iteration the algorithm is in the pheromones
will be updated in a more abrupt or more smooth way depending on the value returned.
Parameters
----------
iteration: int
Algorithm iteration.
Returns
-------
:float
Decay value
"""
raise NotImplementedError |
Python | def scale(self, ant_scores: np.ndarray, best_historic: float):
"""
Method that receives as arguments an array with the scores given by the cost function and
the best score seen by the algorithm and must return an array of the same size as the one
received with the scores scaled as decided by the user.
Parameters
----------
ant_scores: np.ndarray (n_ants), dtype=np.float64
Scores given by the objective function.
best_historic: float
Best score found until the current iteration.
Returns
-------
:np.ndarray (n_ants), dtype=np.float64
Scaled scores.
"""
raise NotImplementedError | def scale(self, ant_scores: np.ndarray, best_historic: float):
"""
Method that receives as arguments an array with the scores given by the cost function and
the best score seen by the algorithm and must return an array of the same size as the one
received with the scores scaled as decided by the user.
Parameters
----------
ant_scores: np.ndarray (n_ants), dtype=np.float64
Scores given by the objective function.
best_historic: float
Best score found until the current iteration.
Returns
-------
:np.ndarray (n_ants), dtype=np.float64
Scaled scores.
"""
raise NotImplementedError |
Python | def updateAS(topology: str, elite: bool = False):
"""
Function that returns the Ant System pheromone value update strategy optimised on the basis of
the graph topology received as an argument.
Parameters
----------
topology: str
Graph topology: 'directed' or 'D' and 'undirected' or 'U'.
elite: bool (default False)
Indicates whether to use only the best ants for the pheromone update.
Returns
-------
:function
Algorithm optimised to work with the type of topology specified.
"""
assert isinstance(topology, str), 'topology argument must be a string.'
topology_ = topology.lower()
if topology_ == 'd' or topology_ == 'directed':
if elite:
return updateDirEliteAS
return updateDirAS
elif topology_ == 'u' or topology_ == 'undirected':
if elite:
return updateUndEliteAS
return updateUndAS
else:
assert False, 'Unrecognised topology parameter. Available options are: "directed" or "D" ' \
'and "undirected" or "U".' | def updateAS(topology: str, elite: bool = False):
"""
Function that returns the Ant System pheromone value update strategy optimised on the basis of
the graph topology received as an argument.
Parameters
----------
topology: str
Graph topology: 'directed' or 'D' and 'undirected' or 'U'.
elite: bool (default False)
Indicates whether to use only the best ants for the pheromone update.
Returns
-------
:function
Algorithm optimised to work with the type of topology specified.
"""
assert isinstance(topology, str), 'topology argument must be a string.'
topology_ = topology.lower()
if topology_ == 'd' or topology_ == 'directed':
if elite:
return updateDirEliteAS
return updateDirAS
elif topology_ == 'u' or topology_ == 'undirected':
if elite:
return updateUndEliteAS
return updateUndAS
else:
assert False, 'Unrecognised topology parameter. Available options are: "directed" or "D" ' \
'and "undirected" or "U".' |
Python | def updateMMAS(topology: str, elite: bool = False):
"""
Function that returns the MIN-MAX Ant System pheromone value update strategy optimised on the
basis of the graph topology received as an argument.
Parameters
----------
topology: str
Graph topology: 'directed' or 'D' and 'undirected' or 'U'.
elite: bool (default False)
Indicates whether to use only the best ants for the pheromone update.
Returns
-------
:function
Algorithm optimised to work with the type of topology specified.
"""
assert isinstance(topology, str), 'topology argument must be a string.'
topology_ = topology.lower()
if topology_ == 'd' or topology_ == 'directed':
if elite:
return updateDirEliteMMAS
return updateDirMMAS
elif topology_ == 'u' or topology_ == 'undirected':
if elite:
return updateUndEliteMMAS
return updateUndMMAS
else:
assert False, 'Unrecognised topology parameter. Available options are: "directed" or "D" ' \
'and "undirected" or "U".' | def updateMMAS(topology: str, elite: bool = False):
"""
Function that returns the MIN-MAX Ant System pheromone value update strategy optimised on the
basis of the graph topology received as an argument.
Parameters
----------
topology: str
Graph topology: 'directed' or 'D' and 'undirected' or 'U'.
elite: bool (default False)
Indicates whether to use only the best ants for the pheromone update.
Returns
-------
:function
Algorithm optimised to work with the type of topology specified.
"""
assert isinstance(topology, str), 'topology argument must be a string.'
topology_ = topology.lower()
if topology_ == 'd' or topology_ == 'directed':
if elite:
return updateDirEliteMMAS
return updateDirMMAS
elif topology_ == 'u' or topology_ == 'undirected':
if elite:
return updateUndEliteMMAS
return updateUndMMAS
else:
assert False, 'Unrecognised topology parameter. Available options are: "directed" or "D" ' \
'and "undirected" or "U".' |
Python | def updateACS(topology: str):
"""
Function that returns the Ant Colony System pheromone update strategy optimised for the
graph representation to be explored.
Parameters
----------
topology: str
Graph topology: 'directed' or 'D' and 'undirected' or 'U'.
Returns
-------
:function
Algorithm optimised to work with the type of topology specified.
"""
assert isinstance(topology, str), 'topology argument must be a string.'
topology_ = topology.lower()
if topology_ == 'd' or topology_ == 'directed':
return updateDirACS
elif topology_ == 'u' or topology_ == 'undirected':
return updateUndACS
else:
assert False, 'Unrecognised topology parameter. Available options are: "directed" or "D" ' \
'and "undirected" or "U".' | def updateACS(topology: str):
"""
Function that returns the Ant Colony System pheromone update strategy optimised for the
graph representation to be explored.
Parameters
----------
topology: str
Graph topology: 'directed' or 'D' and 'undirected' or 'U'.
Returns
-------
:function
Algorithm optimised to work with the type of topology specified.
"""
assert isinstance(topology, str), 'topology argument must be a string.'
topology_ = topology.lower()
if topology_ == 'd' or topology_ == 'directed':
return updateDirACS
elif topology_ == 'u' or topology_ == 'undirected':
return updateUndACS
else:
assert False, 'Unrecognised topology parameter. Available options are: "directed" or "D" ' \
'and "undirected" or "U".' |
Python | def _evaluate(self, ants: list):
""" Method to assess the quality of the received ant list using the defined objectiveFunction
function."""
assert len(ants) > 0, 'At least one ant must be passed to the hybrid'
scores = np.array([
self.evaluate((ant, idx)) for idx, ant in enumerate(ants)], dtype=np.float64)[:, 0]
return scores | def _evaluate(self, ants: list):
""" Method to assess the quality of the received ant list using the defined objectiveFunction
function."""
assert len(ants) > 0, 'At least one ant must be passed to the hybrid'
scores = np.array([
self.evaluate((ant, idx)) for idx, ant in enumerate(ants)], dtype=np.float64)[:, 0]
return scores |
Python | def optimise(self, ants: list, scores: list) -> tuple:
"""
Method that using the paths travelled by the ants must return a list with an arbitrary
number of ants (note that the number of ants returned does not have to coincide with the
number of ants received).
Parameters
----------
ants: list
List of antco.Ant instances.
scores: list
List of scores associated with each ant.
Returns
-------
:tuple
Returns a tuple where the first element corresponds to the list of improved ants and
the second element to the scores given by the cost function. Note that if the evaluation
procedure of the cost function defined in the hybrid strategy does not coincide with
the cost function defined in the ACO algorithm, strange behaviours may occur.
"""
raise NotImplementedError | def optimise(self, ants: list, scores: list) -> tuple:
"""
Method that using the paths travelled by the ants must return a list with an arbitrary
number of ants (note that the number of ants returned does not have to coincide with the
number of ants received).
Parameters
----------
ants: list
List of antco.Ant instances.
scores: list
List of scores associated with each ant.
Returns
-------
:tuple
Returns a tuple where the first element corresponds to the list of improved ants and
the second element to the scores given by the cost function. Note that if the evaluation
procedure of the cost function defined in the hybrid strategy does not coincide with
the cost function defined in the ACO algorithm, strange behaviours may occur.
"""
raise NotImplementedError |
Python | def antColonySystem(aco_obj: ACO, scores_decay: DecaySchedule = None,
evaporation_decay: DecaySchedule = None, report: Report = None,
save_pheromones: bool = True, verbose: bool = True) -> Report:
"""
Function that executes the Ant Colony System algorithm based on
Dorigo, M., & Gambardella, L. M. (1997). Ant colony system: a cooperative learning
approach to the traveling salesman problem. IEEE Transactions on evolutionary computation,
1(1), 53-66.
Parameters
----------
aco_obj: antco.ACO
Initialised antco.ACO instance, for more information on this type of object use:
help(antco.ACO)
scores_decay: antco.optim.DecaySchedule subclass, default=None
Instance of a antco.optim.DecaySchedule representing a decay schedule that will applied to
the score values with which the pheromone values are updated in each iteration.
For more info use help(antco.optim.DecaySchedule).
evaporation_decay: antco.optim.DecaySchedule subclass, default=None
Same as scores_decay, in this case used to update the evaporation rate value.
For more info use help(antco.optim.DecaySchedule).
report: antco.report.Report, default=None
antco.report.Report instance, by default a report will be initialised.
save_pheromones: bool, default=True
Value indicating whether to store in the report the values of the pheromone matrix at each
iteration. If True these may be retrieved by:
>>> report.get('pheromones')
verbose: bool, default=True
Indicates whether to show convergence information during the execution of the algorithm.
Returns
-------
:antco.report.Report
Returns a antco.report.Report instance with information about the convergence of the
algorithm. The convergence parameters can be accessed via:
>>> report = antco.algorithm.basic(...)
>>> report.get('min_cost')
>>> report.get('mean_cost')
>>> report.get('max_cost')
For more info about Report instances use help(antco.report.Report).
"""
# Get parameters from aco_obj
seed = aco_obj.seed
graph = aco_obj.graph
iterations = aco_obj.iterations
n_jobs = aco_obj.n_jobs
ants = aco_obj.ants
H = aco_obj.heuristic
P = aco_obj.pheromones
alpha = aco_obj.alpha
beta = aco_obj.beta
if aco_obj.precompute_heuristic:
H = np.power(H, beta)
pheromone_update_kw = aco_obj.pheromone_update_kw
rho = pheromone_update_kw['rho']; del pheromone_update_kw['rho'] # Get evaporation parameter
decay = pheromone_update_kw['decay']; del pheromone_update_kw['decay'] # Get decay parameter
pher_init_val = aco_obj.pher_init_val
updatePheromones = aco_obj.updatePheromones
objectiveFunction = aco_obj.objectiveFunction
tol = aco_obj.tol
fixed = aco_obj.fixed_positions
accessory_node = aco_obj.accessory_node
Q = aco_obj.Q
scaleScores = aco_obj.scaleScores
if report is None: # Initialize a Report instance (if not provided)
report = Report({'BranchingFactor': {'lambda_values': [0.5]}})
if seed is not None: # Set random state
random.seed(seed)
np.random.seed(seed)
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# Pre-compute the array to store the ant scores
ant_scores, norm_scores = np.empty(shape=len(ants)), np.empty(shape=len(ants))
current_iteration = 1
it_without_improvements = 0
best_score = -float('inf')
while current_iteration <= iterations:
ants = generatePathsACS( # Graph exploration
ants=ants, graph=graph, H=H, P=P, alpha=alpha, beta=beta, decay=decay,
pher_init=pher_init_val, Q=Q, exp_heuristic=False)
# Evaluate ant paths using the objectiveFunction function (it will be maximized)
ant_scores = evaluateAnts(ants=ants, objectiveFunction=objectiveFunction)
# Update best score and save best solution
new_best = updateReportWithBest(
ants=ants, scores=ant_scores, best_score=best_score, report=report,
iteration=current_iteration)
if new_best > best_score:
best_score = new_best
it_without_improvements = 0
else:
it_without_improvements += 1
# Scale scores
if scaleScores is not None:
norm_scores[:] = ant_scores[:] # Copy scores
norm_scores = scaleScores(norm_scores, best_score)
else:
norm_scores = ant_scores
# Update pheromones according to the scores
updatePheromones(
paths=np.array([ant.adj_matrix for ant in ants], dtype=np.int8), P=P,
ant_scores=norm_scores if scores_decay is None else norm_scores * scores_decay(current_iteration),
rho=rho if evaporation_decay is None else rho * evaporation_decay(current_iteration),
**pheromone_update_kw)
if not fixed and accessory_node is None: # Restart ants initial position
deleteInitialPosition(ants)
# Compute convergence statistics
mean_scores = np.mean(ant_scores)
min_score = np.min(ant_scores)
max_score = np.max(ant_scores)
report.save(current_iteration, mean_cost=mean_scores, max_cost=max_score)
if save_pheromones: # Save pheromone values
report.save(current_iteration, pheromones=deepcopy(P))
# Compute monitoring computeMetrics
report.computeMetrics(current_iteration, P, graph)
# After several generations without improvements, do an early stopping of the algorithm.
if it_without_improvements > tol: break
if verbose:
sys.stdout.write('\rCost: Mean: %.4f (Min: %.4f Max: %.4f) (iteration %d)' %
(float(mean_scores), min_score, max_score, current_iteration))
current_iteration += 1
return report | def antColonySystem(aco_obj: ACO, scores_decay: DecaySchedule = None,
evaporation_decay: DecaySchedule = None, report: Report = None,
save_pheromones: bool = True, verbose: bool = True) -> Report:
"""
Function that executes the Ant Colony System algorithm based on
Dorigo, M., & Gambardella, L. M. (1997). Ant colony system: a cooperative learning
approach to the traveling salesman problem. IEEE Transactions on evolutionary computation,
1(1), 53-66.
Parameters
----------
aco_obj: antco.ACO
Initialised antco.ACO instance, for more information on this type of object use:
help(antco.ACO)
scores_decay: antco.optim.DecaySchedule subclass, default=None
Instance of a antco.optim.DecaySchedule representing a decay schedule that will applied to
the score values with which the pheromone values are updated in each iteration.
For more info use help(antco.optim.DecaySchedule).
evaporation_decay: antco.optim.DecaySchedule subclass, default=None
Same as scores_decay, in this case used to update the evaporation rate value.
For more info use help(antco.optim.DecaySchedule).
report: antco.report.Report, default=None
antco.report.Report instance, by default a report will be initialised.
save_pheromones: bool, default=True
Value indicating whether to store in the report the values of the pheromone matrix at each
iteration. If True these may be retrieved by:
>>> report.get('pheromones')
verbose: bool, default=True
Indicates whether to show convergence information during the execution of the algorithm.
Returns
-------
:antco.report.Report
Returns a antco.report.Report instance with information about the convergence of the
algorithm. The convergence parameters can be accessed via:
>>> report = antco.algorithm.basic(...)
>>> report.get('min_cost')
>>> report.get('mean_cost')
>>> report.get('max_cost')
For more info about Report instances use help(antco.report.Report).
"""
# Get parameters from aco_obj
seed = aco_obj.seed
graph = aco_obj.graph
iterations = aco_obj.iterations
n_jobs = aco_obj.n_jobs
ants = aco_obj.ants
H = aco_obj.heuristic
P = aco_obj.pheromones
alpha = aco_obj.alpha
beta = aco_obj.beta
if aco_obj.precompute_heuristic:
H = np.power(H, beta)
pheromone_update_kw = aco_obj.pheromone_update_kw
rho = pheromone_update_kw['rho']; del pheromone_update_kw['rho'] # Get evaporation parameter
decay = pheromone_update_kw['decay']; del pheromone_update_kw['decay'] # Get decay parameter
pher_init_val = aco_obj.pher_init_val
updatePheromones = aco_obj.updatePheromones
objectiveFunction = aco_obj.objectiveFunction
tol = aco_obj.tol
fixed = aco_obj.fixed_positions
accessory_node = aco_obj.accessory_node
Q = aco_obj.Q
scaleScores = aco_obj.scaleScores
if report is None: # Initialize a Report instance (if not provided)
report = Report({'BranchingFactor': {'lambda_values': [0.5]}})
if seed is not None: # Set random state
random.seed(seed)
np.random.seed(seed)
if fixed and accessory_node is None: # Put ants in fixed positions (if specified)
fixedPositions(ants, graph)
# If an accessory node has been specified, place the ants on that node
if accessory_node is not None:
for ant in ants:
ant.setInitialPosition(accessory_node)
# Pre-compute the array to store the ant scores
ant_scores, norm_scores = np.empty(shape=len(ants)), np.empty(shape=len(ants))
current_iteration = 1
it_without_improvements = 0
best_score = -float('inf')
while current_iteration <= iterations:
ants = generatePathsACS( # Graph exploration
ants=ants, graph=graph, H=H, P=P, alpha=alpha, beta=beta, decay=decay,
pher_init=pher_init_val, Q=Q, exp_heuristic=False)
# Evaluate ant paths using the objectiveFunction function (it will be maximized)
ant_scores = evaluateAnts(ants=ants, objectiveFunction=objectiveFunction)
# Update best score and save best solution
new_best = updateReportWithBest(
ants=ants, scores=ant_scores, best_score=best_score, report=report,
iteration=current_iteration)
if new_best > best_score:
best_score = new_best
it_without_improvements = 0
else:
it_without_improvements += 1
# Scale scores
if scaleScores is not None:
norm_scores[:] = ant_scores[:] # Copy scores
norm_scores = scaleScores(norm_scores, best_score)
else:
norm_scores = ant_scores
# Update pheromones according to the scores
updatePheromones(
paths=np.array([ant.adj_matrix for ant in ants], dtype=np.int8), P=P,
ant_scores=norm_scores if scores_decay is None else norm_scores * scores_decay(current_iteration),
rho=rho if evaporation_decay is None else rho * evaporation_decay(current_iteration),
**pheromone_update_kw)
if not fixed and accessory_node is None: # Restart ants initial position
deleteInitialPosition(ants)
# Compute convergence statistics
mean_scores = np.mean(ant_scores)
min_score = np.min(ant_scores)
max_score = np.max(ant_scores)
report.save(current_iteration, mean_cost=mean_scores, max_cost=max_score)
if save_pheromones: # Save pheromone values
report.save(current_iteration, pheromones=deepcopy(P))
# Compute monitoring computeMetrics
report.computeMetrics(current_iteration, P, graph)
# After several generations without improvements, do an early stopping of the algorithm.
if it_without_improvements > tol: break
if verbose:
sys.stdout.write('\rCost: Mean: %.4f (Min: %.4f Max: %.4f) (iteration %d)' %
(float(mean_scores), min_score, max_score, current_iteration))
current_iteration += 1
return report |
Python | def greedySearch(nodes: list or np.ndarray, depth: int, adj_matrix: np.ndarray,
objective: callable, objective_args: dict, best_score: float):
"""
Greedy search strategy used in GreedySearchOP.
Parameters
----------
nodes: list or np.ndarray (solution_nodes)
Nodes which make up the solution to be improved.
depth: int
Maximum depth of the solution tree explored.
adj_matrix: np.ndarray (nodes, nodes)
Adjacency matrix defining the structure of the network to be explored.
objective: callable
Objective function to be maximised used for the evaluation of solutions.
objective_args: dict
Additional parameters that will be passed to the objective function.
best_score: float
Initial score associated to the initial solution.
Returns
-------
:tuple
Tuple where the first element correspond to the improved solution or the solution received
if it could not be improved, and the second argument to the score associated with the
solution.
"""
graph = nx.from_numpy_array(adj_matrix)
best_solution = None
current_depth = 0
while current_depth < depth:
if current_depth == 0:
nodes_to_explore = [n for n in nodes]
else:
if best_solution is None: break
nodes_to_explore = best_solution
# Get those nodes that when removed do not generate two disconnected sub-networks
repl_nodes = getReplaceable(nodes_to_explore, graph)
# Replace each possible node
for n1 in repl_nodes:
# Solution in which one of the nodes has been removed
pruned_solution = [node for node in nodes_to_explore]
pruned_solution.pop(pruned_solution.index(n1))
# Get a list of all nodes that can be added to the sub-network according to the network
# structure
substitutes = []
for n2 in pruned_solution:
substitutes.append(
getValidPaths(n2, np.array(pruned_solution), adj_matrix))
# Getting the unique node
substitutes = list(set([e for lt in substitutes for e in lt if not e == n1]))
for n2 in substitutes:
new_score = objective(np.array(pruned_solution + [n2]), **objective_args)
if new_score > best_score:
best_solution = pruned_solution + [n2]
best_score = new_score
current_depth += 1
if best_solution is None:
return nodes, best_score
return best_solution, best_score | def greedySearch(nodes: list or np.ndarray, depth: int, adj_matrix: np.ndarray,
objective: callable, objective_args: dict, best_score: float):
"""
Greedy search strategy used in GreedySearchOP.
Parameters
----------
nodes: list or np.ndarray (solution_nodes)
Nodes which make up the solution to be improved.
depth: int
Maximum depth of the solution tree explored.
adj_matrix: np.ndarray (nodes, nodes)
Adjacency matrix defining the structure of the network to be explored.
objective: callable
Objective function to be maximised used for the evaluation of solutions.
objective_args: dict
Additional parameters that will be passed to the objective function.
best_score: float
Initial score associated to the initial solution.
Returns
-------
:tuple
Tuple where the first element correspond to the improved solution or the solution received
if it could not be improved, and the second argument to the score associated with the
solution.
"""
graph = nx.from_numpy_array(adj_matrix)
best_solution = None
current_depth = 0
while current_depth < depth:
if current_depth == 0:
nodes_to_explore = [n for n in nodes]
else:
if best_solution is None: break
nodes_to_explore = best_solution
# Get those nodes that when removed do not generate two disconnected sub-networks
repl_nodes = getReplaceable(nodes_to_explore, graph)
# Replace each possible node
for n1 in repl_nodes:
# Solution in which one of the nodes has been removed
pruned_solution = [node for node in nodes_to_explore]
pruned_solution.pop(pruned_solution.index(n1))
# Get a list of all nodes that can be added to the sub-network according to the network
# structure
substitutes = []
for n2 in pruned_solution:
substitutes.append(
getValidPaths(n2, np.array(pruned_solution), adj_matrix))
# Getting the unique node
substitutes = list(set([e for lt in substitutes for e in lt if not e == n1]))
for n2 in substitutes:
new_score = objective(np.array(pruned_solution + [n2]), **objective_args)
if new_score > best_score:
best_solution = pruned_solution + [n2]
best_score = new_score
current_depth += 1
if best_solution is None:
return nodes, best_score
return best_solution, best_score |
Python | def create_permutation_ind(cls, toolbox: base.Toolbox, fitness_function: callable,
initial_values: list = None, individual_size: int = None):
"""
Method that allows to create and register (following the guidelines defined in DEAP) the
genotype of the individuals (registered as 'Individual') and the generating function of
individuals (registered as 'individual').
Parameters
----------
toolbox: base.Toolbox
DEAP Toolbox instance.
fitness_function: callable
DEAP fitness function.
initial_values: list, default=None
List of list of initial genotypes used for the creation of the initial population, this
allows incorporating a priori knowledge about better solutions and usually gives better
results than random initialisation of the genotypes.
If this parameter is not provided, it will be necessary to provide tha argument
individual_size.
individual_size: int, default=None
Size of the individual genotype.
If this parameter is not provided, it will be necessary to provide tha argument
individual_size.
Notes
-----
Parameters initial_values and individual_size cannot be provided at the same time.
"""
assert (initial_values is None or individual_size is None) and \
not (initial_values is None and individual_size is None), \
'Either the initial_values or individual_size must be provided.'
# Create from initial values
if initial_values is not None:
ind_generator = lambda initial_values: initial_values[
random.randint(0, len(initial_values) - 1)]
toolbox.register('attr_permutation', ind_generator, initial_values)
# Create randomly
else:
toolbox.register('attr_permutation', random.sample, range(individual_size), individual_size)
creator.create('Individual', list, fitness=fitness_function)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.attr_permutation) | def create_permutation_ind(cls, toolbox: base.Toolbox, fitness_function: callable,
initial_values: list = None, individual_size: int = None):
"""
Method that allows to create and register (following the guidelines defined in DEAP) the
genotype of the individuals (registered as 'Individual') and the generating function of
individuals (registered as 'individual').
Parameters
----------
toolbox: base.Toolbox
DEAP Toolbox instance.
fitness_function: callable
DEAP fitness function.
initial_values: list, default=None
List of list of initial genotypes used for the creation of the initial population, this
allows incorporating a priori knowledge about better solutions and usually gives better
results than random initialisation of the genotypes.
If this parameter is not provided, it will be necessary to provide tha argument
individual_size.
individual_size: int, default=None
Size of the individual genotype.
If this parameter is not provided, it will be necessary to provide tha argument
individual_size.
Notes
-----
Parameters initial_values and individual_size cannot be provided at the same time.
"""
assert (initial_values is None or individual_size is None) and \
not (initial_values is None and individual_size is None), \
'Either the initial_values or individual_size must be provided.'
# Create from initial values
if initial_values is not None:
ind_generator = lambda initial_values: initial_values[
random.randint(0, len(initial_values) - 1)]
toolbox.register('attr_permutation', ind_generator, initial_values)
# Create randomly
else:
toolbox.register('attr_permutation', random.sample, range(individual_size), individual_size)
creator.create('Individual', list, fitness=fitness_function)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.attr_permutation) |
Python | def create_set_ind(cls, toolbox: base.Toolbox, fitness_function: callable,
possible_values: list):
"""
Method to create the genotype of the individuals represented by a Set from the list of
possible values received as an argument.
Parameters
----------
toolbox: base.Toolbox
DEAP Toolbox instance.
fitness_function: callable
DEAP fitness function.
possible_values: list
List of possible values to insert into the individual.
"""
creator.create('Individual', set, fitness=fitness_function)
toolbox.register('attr_set', initSetGenotype, possible_values=list(possible_values))
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.attr_set) | def create_set_ind(cls, toolbox: base.Toolbox, fitness_function: callable,
possible_values: list):
"""
Method to create the genotype of the individuals represented by a Set from the list of
possible values received as an argument.
Parameters
----------
toolbox: base.Toolbox
DEAP Toolbox instance.
fitness_function: callable
DEAP fitness function.
possible_values: list
List of possible values to insert into the individual.
"""
creator.create('Individual', set, fitness=fitness_function)
toolbox.register('attr_set', initSetGenotype, possible_values=list(possible_values))
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.attr_set) |
Python | def mutSet(individual, possible_values: list):
"""
Mutation that pops or add an element.
Parameters
----------
individual: Set
Individual to be mutated.
possible_values: list
List of possible values to insert into the individual.
Returns
-------
:tuple
:[0]: Set
Mutated individual.
"""
if random.random() < 0.5:
if len(individual) > 0: # We cannot pop from an empty set
individual.remove(random.choice(sorted(tuple(individual))))
else:
individual.add(random.choice(list(possible_values)))
return individual, | def mutSet(individual, possible_values: list):
"""
Mutation that pops or add an element.
Parameters
----------
individual: Set
Individual to be mutated.
possible_values: list
List of possible values to insert into the individual.
Returns
-------
:tuple
:[0]: Set
Mutated individual.
"""
if random.random() < 0.5:
if len(individual) > 0: # We cannot pop from an empty set
individual.remove(random.choice(sorted(tuple(individual))))
else:
individual.add(random.choice(list(possible_values)))
return individual, |
Python | def initSetGenotype(possible_values: list):
"""
Function to initialise the genotype of individuals represented as a Set from a list of possible
values.
Parameters
----------
possible_values: list
List of possible values to insert into the individual.
Returns
-------
:list
Individual genotype.
Notes
-----
There must be at least two different values in possible_values.
"""
num_elements = random.randint(2, len(possible_values) - 1)
return np.random.choice(possible_values, size=num_elements, replace=False).tolist() | def initSetGenotype(possible_values: list):
"""
Function to initialise the genotype of individuals represented as a Set from a list of possible
values.
Parameters
----------
possible_values: list
List of possible values to insert into the individual.
Returns
-------
:list
Individual genotype.
Notes
-----
There must be at least two different values in possible_values.
"""
num_elements = random.randint(2, len(possible_values) - 1)
return np.random.choice(possible_values, size=num_elements, replace=False).tolist() |
Python | def processPopulation(best_ants: int, ant_prototype: Ant, population: list,
hof: tools.HallOfFame or None = None) -> tuple:
""" Function in responsible for creating the ants to be returned by the metaheuristic strategies
according to the interface defined in antco.hybrid.base.Metaheuristic from the populations of
individuals of the evolutionary strategies. """
if hof is None: # Create Ants (when Hall of Fame hasn't been specified)
improved_ants = [ant_prototype.new() for _ in range(best_ants)]
improved_scores = [-float('inf') for _ in range(best_ants)]
fitness_values = [ind.fitness.values[0] for ind in population]
best_fitness = np.argsort(fitness_values)[::-1][:best_ants]
for idx, ind_idx in enumerate(best_fitness):
improved_ants[idx].visited_nodes = list(population[ind_idx])
improved_scores[idx] = fitness_values[idx]
else: # Create Ants (using Hall of Fame )
improved_ants = [ant_prototype.new() for _ in range(len(hof.items))]
improved_scores = [-float('inf') for _ in range(len(hof.items))]
for idx, solution in enumerate(hof.items):
improved_ants[idx].visited_nodes = list(solution)
improved_scores[idx] = solution.fitness.values[0]
return improved_ants, improved_scores | def processPopulation(best_ants: int, ant_prototype: Ant, population: list,
hof: tools.HallOfFame or None = None) -> tuple:
""" Function in responsible for creating the ants to be returned by the metaheuristic strategies
according to the interface defined in antco.hybrid.base.Metaheuristic from the populations of
individuals of the evolutionary strategies. """
if hof is None: # Create Ants (when Hall of Fame hasn't been specified)
improved_ants = [ant_prototype.new() for _ in range(best_ants)]
improved_scores = [-float('inf') for _ in range(best_ants)]
fitness_values = [ind.fitness.values[0] for ind in population]
best_fitness = np.argsort(fitness_values)[::-1][:best_ants]
for idx, ind_idx in enumerate(best_fitness):
improved_ants[idx].visited_nodes = list(population[ind_idx])
improved_scores[idx] = fitness_values[idx]
else: # Create Ants (using Hall of Fame )
improved_ants = [ant_prototype.new() for _ in range(len(hof.items))]
improved_scores = [-float('inf') for _ in range(len(hof.items))]
for idx, solution in enumerate(hof.items):
improved_ants[idx].visited_nodes = list(solution)
improved_scores[idx] = solution.fitness.values[0]
return improved_ants, improved_scores |
Python | def values(self) -> dict:
"""
Returns a dictionary with all the values stored during the execution of the algorithm.
Returns
-------
:dict
:key int
Iteration number.
"""
return self._values | def values(self) -> dict:
"""
Returns a dictionary with all the values stored during the execution of the algorithm.
Returns
-------
:dict
:key int
Iteration number.
"""
return self._values |
Python | def best_solution(self) -> dict:
"""
Return a dictionary with the best solution found by the algorithm.
Returns
-------
:return dict
:key "ant": Best Ant instance.
:key "solution": Nodes visited by the ant.
:key "score": Score associated to the ant.
:key "iteration": Iteration in which the best solution was found.
"""
return self._best_solution | def best_solution(self) -> dict:
"""
Return a dictionary with the best solution found by the algorithm.
Returns
-------
:return dict
:key "ant": Best Ant instance.
:key "solution": Nodes visited by the ant.
:key "score": Score associated to the ant.
:key "iteration": Iteration in which the best solution was found.
"""
return self._best_solution |
Python | def save(self, iteration: int, **kwargs):
"""
Stores the values received as an argument associated with the interaction passed as an
argument.
Parameters
----------
iteration: int
Algorithm interaction number.
kwargs:
Parameters to be saved.
"""
for key, value in kwargs.items():
self._values[iteration][key] = value | def save(self, iteration: int, **kwargs):
"""
Stores the values received as an argument associated with the interaction passed as an
argument.
Parameters
----------
iteration: int
Algorithm interaction number.
kwargs:
Parameters to be saved.
"""
for key, value in kwargs.items():
self._values[iteration][key] = value |
Python | def computeBranchingFactor(pheromones: np.ndarray, adj_matrix: np.ndarray, lambda_values: list = None,
**kw) -> dict:
"""
Function that computes the lambda-branching factor for the specified lambda values.
Parameters
----------
pheromones: np.ndarray of shape (n_nodes, n_nodes) and dtype np.float64
Pheromone matrix to be analysed.
adj_matrix: np.ndarray of shape (n_nodes, n_nodes) and dtype np.int8
Adjacency matrix defining the graph structure.
lambda_values: list
List of lambda values to be computed.
Returns
-------
:dict
Dictionary where the keys will have the format 'lambda_VALUE_DE_LAMBDA' (i.e. lambda_0.05) and t
he values will be the value obtained for the given lambda.
"""
# Default lambda parameters
lambda_values = lambda_values if lambda_values is not None else [0.05, 0.1, 0.2, 0.4]
return {'lambda_%.4f' % val: getBranchingFactor(pheromones, adj_matrix, val) for val in lambda_values} | def computeBranchingFactor(pheromones: np.ndarray, adj_matrix: np.ndarray, lambda_values: list = None,
**kw) -> dict:
"""
Function that computes the lambda-branching factor for the specified lambda values.
Parameters
----------
pheromones: np.ndarray of shape (n_nodes, n_nodes) and dtype np.float64
Pheromone matrix to be analysed.
adj_matrix: np.ndarray of shape (n_nodes, n_nodes) and dtype np.int8
Adjacency matrix defining the graph structure.
lambda_values: list
List of lambda values to be computed.
Returns
-------
:dict
Dictionary where the keys will have the format 'lambda_VALUE_DE_LAMBDA' (i.e. lambda_0.05) and t
he values will be the value obtained for the given lambda.
"""
# Default lambda parameters
lambda_values = lambda_values if lambda_values is not None else [0.05, 0.1, 0.2, 0.4]
return {'lambda_%.4f' % val: getBranchingFactor(pheromones, adj_matrix, val) for val in lambda_values} |
Python | def objective(nodes: list, adj_matrix: np.ndarray, diff: np.ndarray) -> float:
""" Compute the difference of the subnetwork associated to the nodes received as argument
between the group Gmax and the group Gmin (the difference has been precomputed in the variable
diff) returning the average. """
subnetwork = np.zeros(adj_matrix.shape, dtype=np.int8)
subnetwork[tuple(np.meshgrid(nodes, nodes))] = 1
subnetwork = np.multiply(subnetwork, adj_matrix)
coords = np.where(subnetwork == 1)
return float(np.mean(diff[coords])) | def objective(nodes: list, adj_matrix: np.ndarray, diff: np.ndarray) -> float:
""" Compute the difference of the subnetwork associated to the nodes received as argument
between the group Gmax and the group Gmin (the difference has been precomputed in the variable
diff) returning the average. """
subnetwork = np.zeros(adj_matrix.shape, dtype=np.int8)
subnetwork[tuple(np.meshgrid(nodes, nodes))] = 1
subnetwork = np.multiply(subnetwork, adj_matrix)
coords = np.where(subnetwork == 1)
return float(np.mean(diff[coords])) |
Python | def parse(file: str) -> np.ndarray:
""" Reads data from a standard .tsp file. """
with open(file) as input_file:
f = input_file.read().split('\n')
coords = []
for elem in f:
if elem == 'EOF':
break
init_line = elem.split(' ')[0]
if not init_line.isnumeric():
continue
#split_elms = [val for val in elem.split(' ')[1:] if re.match(r'^-?\d+(?:\.\d+)$', val)]
x, y = elem.split(' ')[1:]
coords.append((float(x), float(y)))
distance_matrix_ = np.zeros(shape=(len(coords), len(coords)))
for i in range(distance_matrix_.shape[0]):
for j in range(distance_matrix_.shape[1]):
if not i == j:
distance_matrix_[i, j] = euclidean(coords[i], coords[j])
else:
distance_matrix_[i, j] = 0.0
return distance_matrix_ | def parse(file: str) -> np.ndarray:
""" Reads data from a standard .tsp file. """
with open(file) as input_file:
f = input_file.read().split('\n')
coords = []
for elem in f:
if elem == 'EOF':
break
init_line = elem.split(' ')[0]
if not init_line.isnumeric():
continue
#split_elms = [val for val in elem.split(' ')[1:] if re.match(r'^-?\d+(?:\.\d+)$', val)]
x, y = elem.split(' ')[1:]
coords.append((float(x), float(y)))
distance_matrix_ = np.zeros(shape=(len(coords), len(coords)))
for i in range(distance_matrix_.shape[0]):
for j in range(distance_matrix_.shape[1]):
if not i == j:
distance_matrix_[i, j] = euclidean(coords[i], coords[j])
else:
distance_matrix_[i, j] = 0.0
return distance_matrix_ |
Python | def is_connected(nodes: list or np.ndarray, adj_matrix: np.ndarray) -> bool:
"""
Method that checks whether the nodes received as a list form a sub-network without any isolated
nodes within the network defined by the adjacency matrix.
Parameters
----------
nodes: np.ndarray or list
Sub-network nodes.
adj_matrix: np.ndarray
Adjacency matrix defining the graph structure.
Returns
-------
:bool
True if the sub-network is connected, False otherwise.
"""
graph = nx.from_numpy_array(adj_matrix)
subgraph = graph.subgraph(nodes)
return nx.is_connected(subgraph) | def is_connected(nodes: list or np.ndarray, adj_matrix: np.ndarray) -> bool:
"""
Method that checks whether the nodes received as a list form a sub-network without any isolated
nodes within the network defined by the adjacency matrix.
Parameters
----------
nodes: np.ndarray or list
Sub-network nodes.
adj_matrix: np.ndarray
Adjacency matrix defining the graph structure.
Returns
-------
:bool
True if the sub-network is connected, False otherwise.
"""
graph = nx.from_numpy_array(adj_matrix)
subgraph = graph.subgraph(nodes)
return nx.is_connected(subgraph) |
Python | def generate_problem(nodes: int, edges: int, optimal_path_length: int, noise: int,
min_noise_length: int, max_noise_length: int, seed: int):
"""
Function to generate an optimal sub-graph detection problem.
Parameters
----------
nodes: int
Number of nodes of the network to be explored.
edges: int
Number of edges in the network to be explored.
optimal_path_length: int
Number of nodes in the optimal sub-network.
noise: int
Noise added to the problem. This number determines the number of random subnetworks (with
less differences than the principal one) added to the problem.
min_noise_length: int
Minimum number of nodes in the noise subnetworks.
max_noise_length: int
Maximum number of nodes in the noise subnetworks.
seed: int
Random seed.
Returns
-------
:dict
:key "graph": nx.Graph
Graph to be explored.
:key "adj_matrix": np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored (as adjacency matrix).
:key "optimal_subgraph": np.ndarray (nodes, nodes), dtype=np.int8
Optimal subgraph (as adjacency matrix).
:key "selected_nodes": np.ndarray(optimal_path_length), dtype=int
Optimal subgraph nodes.
:key "Gmax": np.ndarray (nodes, nodes), dtype=np.float64
Group 1 values.
:key "Gmin": np.ndarray (nodes, nodes), dtype=np.float64
Group 2 values.
"""
problem = dict()
random.seed(seed)
np.random.seed(seed)
# Generate a random graph
graph = nx.dense_gnm_random_graph(n=nodes, m=edges, seed=seed)
adj_matrix = np.array(nx.adjacency_matrix(graph).todense(), dtype=np.int8)
# Select a random optimal sub-graph
selected_nodes = np.random.choice(
[n for n in range(nodes)], size=optimal_path_length, replace=False)
valid_subgraph = False
saveguard = 9999
count = 0
while not valid_subgraph:
if is_connected(selected_nodes, adj_matrix):
valid_subgraph = True
else:
selected_nodes = np.random.choice(
[n for n in range(nodes)], size=optimal_path_length, replace=False)
count += 1
if count == saveguard:
assert False, 'impossible to obtain a connected subgraph from the network, consider ' \
'increasing the number of connections.'
subgraph = np.zeros(shape=adj_matrix.shape, dtype=np.int8)
subgraph[tuple(np.meshgrid(selected_nodes, selected_nodes))] = 1
subgraph = np.multiply(subgraph, adj_matrix).astype(np.int8)
# Generate group values
Gmax = np.multiply(np.random.uniform(low=0.2, high=0.4, size=adj_matrix.shape), adj_matrix)
Gmin = np.multiply(np.random.uniform(low=-0.2, high=0.2, size=adj_matrix.shape), adj_matrix)
# max(Gmax) = 1.2; min(Gmax) = 0.8
# max(Gmin) = -0.4; min(Gmin) = -1.0
# Max difference: 2.2
# Min difference: 1.2
variation = np.multiply(np.random.uniform(low=0.6, high=0.8, size=adj_matrix.shape), subgraph)
Gmax += variation
Gmin -= variation
# Add noise
possible_nodes = list(set([n for n in range(nodes)]) - set(selected_nodes))
for i in range(noise):
noise_nodes = np.random.choice(
possible_nodes, size=random.randint(min_noise_length, max_noise_length), replace=False)
possible_nodes = list(set(possible_nodes) - set(noise_nodes))
if len(possible_nodes) < max_noise_length:
print('Too much noise has been introduced, the added noise may generate a better '
'sub-network than optimal sub-network. The noise factor has been reduced to %d' % i)
break
random_subnetwork = np.zeros(shape=adj_matrix.shape, dtype=np.int8)
random_subnetwork[tuple(np.meshgrid(noise_nodes, noise_nodes))] = 1
random_subnetwork = np.multiply(random_subnetwork, adj_matrix).astype(np.int8)
# max(Gmax) = 0.6; min(Gmax) = 0.2
# max(Gmin) = 0.2; min(Gmin) = -0.4
# Max difference: 1.0
# Min difference: 0.0
variation = np.multiply(np.random.uniform(low=0.0, high=0.2, size=adj_matrix.shape),
random_subnetwork)
Gmax += variation
Gmin -= variation
# Generate symmetric matrices
Gmax = (Gmax + Gmax.T) / 2
Gmin = (Gmin + Gmin.T) / 2
problem['graph'] = graph
problem['adj_matrix'] = adj_matrix
problem['optimal_subgraph'] = subgraph
problem['selected_nodes'] = selected_nodes
problem['Gmax'] = Gmax
problem['Gmin'] = Gmin
return problem | def generate_problem(nodes: int, edges: int, optimal_path_length: int, noise: int,
min_noise_length: int, max_noise_length: int, seed: int):
"""
Function to generate an optimal sub-graph detection problem.
Parameters
----------
nodes: int
Number of nodes of the network to be explored.
edges: int
Number of edges in the network to be explored.
optimal_path_length: int
Number of nodes in the optimal sub-network.
noise: int
Noise added to the problem. This number determines the number of random subnetworks (with
less differences than the principal one) added to the problem.
min_noise_length: int
Minimum number of nodes in the noise subnetworks.
max_noise_length: int
Maximum number of nodes in the noise subnetworks.
seed: int
Random seed.
Returns
-------
:dict
:key "graph": nx.Graph
Graph to be explored.
:key "adj_matrix": np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored (as adjacency matrix).
:key "optimal_subgraph": np.ndarray (nodes, nodes), dtype=np.int8
Optimal subgraph (as adjacency matrix).
:key "selected_nodes": np.ndarray(optimal_path_length), dtype=int
Optimal subgraph nodes.
:key "Gmax": np.ndarray (nodes, nodes), dtype=np.float64
Group 1 values.
:key "Gmin": np.ndarray (nodes, nodes), dtype=np.float64
Group 2 values.
"""
problem = dict()
random.seed(seed)
np.random.seed(seed)
# Generate a random graph
graph = nx.dense_gnm_random_graph(n=nodes, m=edges, seed=seed)
adj_matrix = np.array(nx.adjacency_matrix(graph).todense(), dtype=np.int8)
# Select a random optimal sub-graph
selected_nodes = np.random.choice(
[n for n in range(nodes)], size=optimal_path_length, replace=False)
valid_subgraph = False
saveguard = 9999
count = 0
while not valid_subgraph:
if is_connected(selected_nodes, adj_matrix):
valid_subgraph = True
else:
selected_nodes = np.random.choice(
[n for n in range(nodes)], size=optimal_path_length, replace=False)
count += 1
if count == saveguard:
assert False, 'impossible to obtain a connected subgraph from the network, consider ' \
'increasing the number of connections.'
subgraph = np.zeros(shape=adj_matrix.shape, dtype=np.int8)
subgraph[tuple(np.meshgrid(selected_nodes, selected_nodes))] = 1
subgraph = np.multiply(subgraph, adj_matrix).astype(np.int8)
# Generate group values
Gmax = np.multiply(np.random.uniform(low=0.2, high=0.4, size=adj_matrix.shape), adj_matrix)
Gmin = np.multiply(np.random.uniform(low=-0.2, high=0.2, size=adj_matrix.shape), adj_matrix)
# max(Gmax) = 1.2; min(Gmax) = 0.8
# max(Gmin) = -0.4; min(Gmin) = -1.0
# Max difference: 2.2
# Min difference: 1.2
variation = np.multiply(np.random.uniform(low=0.6, high=0.8, size=adj_matrix.shape), subgraph)
Gmax += variation
Gmin -= variation
# Add noise
possible_nodes = list(set([n for n in range(nodes)]) - set(selected_nodes))
for i in range(noise):
noise_nodes = np.random.choice(
possible_nodes, size=random.randint(min_noise_length, max_noise_length), replace=False)
possible_nodes = list(set(possible_nodes) - set(noise_nodes))
if len(possible_nodes) < max_noise_length:
print('Too much noise has been introduced, the added noise may generate a better '
'sub-network than optimal sub-network. The noise factor has been reduced to %d' % i)
break
random_subnetwork = np.zeros(shape=adj_matrix.shape, dtype=np.int8)
random_subnetwork[tuple(np.meshgrid(noise_nodes, noise_nodes))] = 1
random_subnetwork = np.multiply(random_subnetwork, adj_matrix).astype(np.int8)
# max(Gmax) = 0.6; min(Gmax) = 0.2
# max(Gmin) = 0.2; min(Gmin) = -0.4
# Max difference: 1.0
# Min difference: 0.0
variation = np.multiply(np.random.uniform(low=0.0, high=0.2, size=adj_matrix.shape),
random_subnetwork)
Gmax += variation
Gmin -= variation
# Generate symmetric matrices
Gmax = (Gmax + Gmax.T) / 2
Gmin = (Gmin + Gmin.T) / 2
problem['graph'] = graph
problem['adj_matrix'] = adj_matrix
problem['optimal_subgraph'] = subgraph
problem['selected_nodes'] = selected_nodes
problem['Gmax'] = Gmax
problem['Gmin'] = Gmin
return problem |
Python | def _pherIncrease(n_ants: int, pher_init: float, evaporation: float, weight: float,
iterations: list, score_range: tuple, seed: int):
""" Simulate the pheromone update (increasing the values) according to the AS update
strategy. """
if seed is not None:
np.random.seed(seed)
pher_increase = []
pher = pher_init
for it in iterations:
score = np.sum(
np.random.uniform(low=score_range[0], high=score_range[1], size=n_ants) * weight)
pher = (1 - evaporation) * pher + score * weight
pher_increase.append(pher)
return pher_increase | def _pherIncrease(n_ants: int, pher_init: float, evaporation: float, weight: float,
iterations: list, score_range: tuple, seed: int):
""" Simulate the pheromone update (increasing the values) according to the AS update
strategy. """
if seed is not None:
np.random.seed(seed)
pher_increase = []
pher = pher_init
for it in iterations:
score = np.sum(
np.random.uniform(low=score_range[0], high=score_range[1], size=n_ants) * weight)
pher = (1 - evaporation) * pher + score * weight
pher_increase.append(pher)
return pher_increase |
Python | def _pherDecrease(pher_init: float, evaporation: float, iterations: list):
""" Simulate the pheromone update (decreasing the values) according to the AS update
strategy. """
pher_decrease = []
pher = pher_init
for it in iterations:
pher = (1 - evaporation) * pher
pher_decrease.append(pher)
return pher_decrease | def _pherDecrease(pher_init: float, evaporation: float, iterations: list):
""" Simulate the pheromone update (decreasing the values) according to the AS update
strategy. """
pher_decrease = []
pher = pher_init
for it in iterations:
pher = (1 - evaporation) * pher
pher_decrease.append(pher)
return pher_decrease |
Python | def _pherIncrease(n_ants: int, pher_init: float, evaporation: float, limits: tuple, weight: float,
iterations: list, score_range: tuple, seed: int):
""" Simulate the pheromone update (increasing the values) according to the MMAS update
strategy. """
if seed is not None:
np.random.seed(seed)
pher_increase = []
pher = pher_init
for it in iterations:
scores = np.random.uniform(low=score_range[0], high=score_range[1], size=n_ants)
for score in scores:
pher = (1 - evaporation) * pher + score * weight
if pher < limits[0]:
pher = limits[0]
if pher > limits[1]:
pher = limits[1]
pher_increase.append(pher)
return pher_increase | def _pherIncrease(n_ants: int, pher_init: float, evaporation: float, limits: tuple, weight: float,
iterations: list, score_range: tuple, seed: int):
""" Simulate the pheromone update (increasing the values) according to the MMAS update
strategy. """
if seed is not None:
np.random.seed(seed)
pher_increase = []
pher = pher_init
for it in iterations:
scores = np.random.uniform(low=score_range[0], high=score_range[1], size=n_ants)
for score in scores:
pher = (1 - evaporation) * pher + score * weight
if pher < limits[0]:
pher = limits[0]
if pher > limits[1]:
pher = limits[1]
pher_increase.append(pher)
return pher_increase |
Python | def _pherDecrease(pher_init: float, evaporation: float, limits: tuple, iterations: list):
""" Simulate the pheromone update (decreasing the values) according to the MMAS update
strategy. """
pher_decrease = []
pher = pher_init
for it in iterations:
pher = (1 - evaporation) * pher
if pher < limits[0]:
pher = limits[0]
if pher > limits[1]:
pher = limits[1]
pher_decrease.append(pher)
return pher_decrease | def _pherDecrease(pher_init: float, evaporation: float, limits: tuple, iterations: list):
""" Simulate the pheromone update (decreasing the values) according to the MMAS update
strategy. """
pher_decrease = []
pher = pher_init
for it in iterations:
pher = (1 - evaporation) * pher
if pher < limits[0]:
pher = limits[0]
if pher > limits[1]:
pher = limits[1]
pher_decrease.append(pher)
return pher_decrease |
Python | def evaluate(self, ant: antco.Ant):
""" Length of the route taken by the ant. """
if not ant.is_valid:
return 0.0
return objective(ant.visited_nodes, self._cost_matrix)[0] | def evaluate(self, ant: antco.Ant):
""" Length of the route taken by the ant. """
if not ant.is_valid:
return 0.0
return objective(ant.visited_nodes, self._cost_matrix)[0] |
Python | def serialize(obj: object, path: str, extension: str = None):
"""
Method to serialize any type of object received as an argument.
Parameters
----------
obj: object
Object to be serialized.
path: str
Path where the object will be saved.
extension: str (default None)
Extension to be added.
"""
abs_path = os.path.abspath(path)
while os.path.exists(abs_path): # If exists update the name
path_to_file = '/'.join(abs_path.split('/')[:-1])
file_name = abs_path.split('/')[-1].split('.')[0]
abs_path = '%s/%s_(%s)' % \
(path_to_file, file_name, datetime.now().strftime("%m-%d-%Y-%H-%M-%S"))
print('\nFile: %s already exists, name changed to %s' % (path, abs_path), end='')
if extension is not None:
print('.%s\n' % extension)
else:
print()
# Add extension
if extension is not None:
if not abs_path.endswith('.%s' % extension):
abs_path += '.%s' % extension
with open(abs_path, 'ab') as out:
pickle.dump(obj, out)
print('%s object serialised correctly' % type(obj)) | def serialize(obj: object, path: str, extension: str = None):
"""
Method to serialize any type of object received as an argument.
Parameters
----------
obj: object
Object to be serialized.
path: str
Path where the object will be saved.
extension: str (default None)
Extension to be added.
"""
abs_path = os.path.abspath(path)
while os.path.exists(abs_path): # If exists update the name
path_to_file = '/'.join(abs_path.split('/')[:-1])
file_name = abs_path.split('/')[-1].split('.')[0]
abs_path = '%s/%s_(%s)' % \
(path_to_file, file_name, datetime.now().strftime("%m-%d-%Y-%H-%M-%S"))
print('\nFile: %s already exists, name changed to %s' % (path, abs_path), end='')
if extension is not None:
print('.%s\n' % extension)
else:
print()
# Add extension
if extension is not None:
if not abs_path.endswith('.%s' % extension):
abs_path += '.%s' % extension
with open(abs_path, 'ab') as out:
pickle.dump(obj, out)
print('%s object serialised correctly' % type(obj)) |
Python | def load(path: str) -> object:
"""
Method to deserialize the object stored in the file received as argument.
Parameters
----------
:param path: str
Path where the object was saved.
"""
abs_path = os.path.abspath(path)
assert os.path.exists(abs_path), 'File %s not found.' % abs_path
try:
with open(path, 'rb') as input_file:
obj = pickle.load(input_file)
return obj
except pickle.UnpicklingError as e:
print(traceback.format_exc(e))
raise
except (AttributeError, EOFError, ImportError, IndexError) as e:
print(traceback.format_exc(e))
raise
except Exception as e:
print(traceback.format_exc(e))
raise | def load(path: str) -> object:
"""
Method to deserialize the object stored in the file received as argument.
Parameters
----------
:param path: str
Path where the object was saved.
"""
abs_path = os.path.abspath(path)
assert os.path.exists(abs_path), 'File %s not found.' % abs_path
try:
with open(path, 'rb') as input_file:
obj = pickle.load(input_file)
return obj
except pickle.UnpicklingError as e:
print(traceback.format_exc(e))
raise
except (AttributeError, EOFError, ImportError, IndexError) as e:
print(traceback.format_exc(e))
raise
except Exception as e:
print(traceback.format_exc(e))
raise |
Python | def evaluate(self, ant: antco.Ant):
""" Length of the route taken by the ant. """
if not ant.is_valid:
return 0.0
return self._objective(self.getVisitedNodes(ant), self._cost_matrix) | def evaluate(self, ant: antco.Ant):
""" Length of the route taken by the ant. """
if not ant.is_valid:
return 0.0
return self._objective(self.getVisitedNodes(ant), self._cost_matrix) |
Python | def load_raw(self):
"""
Read the raw data in *.out files
And output the data stored in Python dictionary
"""
# the dictionary to store raw data
data_raw = {}
print('Extracting raw data...')
self.timer = time.time()
# get the folders' names relating to each glider:
names = os.listdir(self.path)
vehicles = {}
for name in names:
if os.path.isdir(os.path.join(self.path, name)):
vehicles[name] = {}
for vehicle in vehicles:
if self.verbose:
print(vehicle)
cycle_profiles = {}
current_dir = os.path.join(self.path, vehicle)
dirlist = []
# get the list of all cycles
cycle_list = os.listdir(current_dir)
for cycle_sequence in cycle_list:
if os.path.isdir(os.path.join(current_dir, cycle_sequence)):
dirlist.append(cycle_sequence)
# initialise data, see header about data
data = {}
for cycle in dirlist:
# initialise list of sensors
sensors = deepcopy(self.sensors)
for sensor in sensors:
data_path = os.path.join(self.path, vehicle, cycle, sensor + '.out')
# read the records if it is not empty, otherwise this sensor/parameter is emtpy
size = os.stat(data_path).st_size
if size:
sensors[sensor] = np.genfromtxt(data_path)
else:
sensors[sensor] = np.array([])
cycle_profiles[cycle] = deepcopy(sensors)
data[vehicle] = deepcopy(cycle_profiles)
# create data folder if there is no
if not os.path.exists('data'):
os.makedirs('data')
# save extracted data to 'data' folder
pickle.dump(cycle_profiles, open(os.path.join('data', 'rawdata_' + vehicle + '.pkl'), 'wb'))
if self.verbose:
print(vehicle, 'raw data loaded & saved to pickle file')
self.data_raw = deepcopy(data)
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer))
return data | def load_raw(self):
"""
Read the raw data in *.out files
And output the data stored in Python dictionary
"""
# the dictionary to store raw data
data_raw = {}
print('Extracting raw data...')
self.timer = time.time()
# get the folders' names relating to each glider:
names = os.listdir(self.path)
vehicles = {}
for name in names:
if os.path.isdir(os.path.join(self.path, name)):
vehicles[name] = {}
for vehicle in vehicles:
if self.verbose:
print(vehicle)
cycle_profiles = {}
current_dir = os.path.join(self.path, vehicle)
dirlist = []
# get the list of all cycles
cycle_list = os.listdir(current_dir)
for cycle_sequence in cycle_list:
if os.path.isdir(os.path.join(current_dir, cycle_sequence)):
dirlist.append(cycle_sequence)
# initialise data, see header about data
data = {}
for cycle in dirlist:
# initialise list of sensors
sensors = deepcopy(self.sensors)
for sensor in sensors:
data_path = os.path.join(self.path, vehicle, cycle, sensor + '.out')
# read the records if it is not empty, otherwise this sensor/parameter is emtpy
size = os.stat(data_path).st_size
if size:
sensors[sensor] = np.genfromtxt(data_path)
else:
sensors[sensor] = np.array([])
cycle_profiles[cycle] = deepcopy(sensors)
data[vehicle] = deepcopy(cycle_profiles)
# create data folder if there is no
if not os.path.exists('data'):
os.makedirs('data')
# save extracted data to 'data' folder
pickle.dump(cycle_profiles, open(os.path.join('data', 'rawdata_' + vehicle + '.pkl'), 'wb'))
if self.verbose:
print(vehicle, 'raw data loaded & saved to pickle file')
self.data_raw = deepcopy(data)
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer))
return data |
Python | def filter(self):
"""
This function filters:
Objective a:
i. Manually set the sensors not of interests or automatically remove parameters with high empty rates
- parameters with high empty rates
- parameters not of interests defined by user
ii. Ignore data with empty record of interested measurements
Objective b:
i. 25m filtering of depth,
TODO: this should be a generic user defined parameter, including the sensors, and their limits
ii. Minimum number of measurement points filtering
:return: filtered cycles
"""
print('Filtering in process...')
self.timer = time.time()
if self.auto_remove:
removed_sensors = self.find_empty_rate()
else:
removed_sensors = self.not_interested
# load raw data
data_raw = deepcopy(self.data_raw)
# initialise target data
data = {}
# clear memory
del self.data_raw
if self.verbose:
print('Parameters to be removed are:', removed_sensors)
# remove sensors not of interests
sensors_filtered = deepcopy(self.sensors)
for sensor in removed_sensors:
sensors_filtered.pop(sensor, None)
for vehicle, cycles in data_raw.items():
data[vehicle] = {}
for cycle, sensors in cycles.items():
data[vehicle][cycle] = {}
for sensor in sensors_filtered:
if sensor in data_raw[vehicle][cycle]:
data[vehicle][cycle][sensor] = deepcopy(data_raw[vehicle][cycle][sensor])
# remove cycles if they contain empty parameters of interest, or the record of any parameter is less than np
empty = {}
for vehicle, cycles in data.items():
empty[vehicle] = []
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
if records.shape[0] <= self.np:
empty[vehicle].append(cycle)
break
for vehicle in data:
for cycle in empty[vehicle]:
del data[vehicle][cycle]
# print(vehicle, cycle)
# delete cycles with parameters beyond defined thresholds
thres = {}
for vehicle, cycles in data.items():
thres[vehicle] = []
for cycle, sensors in cycles.items():
for r, l in self.sensor_threshold.items():
if np.max(sensors[r][:, 1]) < l:
thres[vehicle].append(cycle)
for vehicle in data:
for cycle in thres[vehicle]:
del data[vehicle][cycle]
# remove vehicle if it is empty
empty_vehicle = []
for vehicle, cycles in data.items():
if len(cycles) == 0:
empty_vehicle.append(vehicle)
for vehicle in empty_vehicle:
del data[vehicle]
if self.verbose:
print(vehicle, 'is removed due to lack of valuable data: '
'try to relax the filtering limits if this vehicle is of interest!')
"""
to get the raw timelines, including starting and ending time (in raw format),
this is to ensure cycles in sequence
"""
t_pd = {}
for vehicle, cycles in data.items():
temp = []
for cycle, sensors in cycles.items():
st_raw = np.array([])
ft_raw = np.array([])
for sensor, records in sensors.items():
st_raw = np.append(st_raw, records[0, 0])
ft_raw = np.append(ft_raw, records[-1, 0])
temp.append(
{'cycle': cycle, 's_t': np.min(st_raw), 'f_t': np.max(ft_raw)}
)
t_pd[vehicle] = pd.DataFrame(temp)
t_surface = {}
for vehicle, temp in t_pd.items():
t_surface[vehicle] = {}
temp.sort_values('s_t', inplace=True, ascending=True, ignore_index=False)
length = temp.shape[0]
for i in range(length):
if i == (length-1):
t_surface[vehicle][temp.iloc[i, 0]] = 0
else:
t_surface[vehicle][temp.iloc[i, 0]] = temp.iloc[i + 1, 1] - temp.iloc[i, 2]
self.t = t_surface
# get vertical velocity
data = self.get_velocity(data)
self.data_filter = deepcopy(data)
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer))
return data | def filter(self):
"""
This function filters:
Objective a:
i. Manually set the sensors not of interests or automatically remove parameters with high empty rates
- parameters with high empty rates
- parameters not of interests defined by user
ii. Ignore data with empty record of interested measurements
Objective b:
i. 25m filtering of depth,
TODO: this should be a generic user defined parameter, including the sensors, and their limits
ii. Minimum number of measurement points filtering
:return: filtered cycles
"""
print('Filtering in process...')
self.timer = time.time()
if self.auto_remove:
removed_sensors = self.find_empty_rate()
else:
removed_sensors = self.not_interested
# load raw data
data_raw = deepcopy(self.data_raw)
# initialise target data
data = {}
# clear memory
del self.data_raw
if self.verbose:
print('Parameters to be removed are:', removed_sensors)
# remove sensors not of interests
sensors_filtered = deepcopy(self.sensors)
for sensor in removed_sensors:
sensors_filtered.pop(sensor, None)
for vehicle, cycles in data_raw.items():
data[vehicle] = {}
for cycle, sensors in cycles.items():
data[vehicle][cycle] = {}
for sensor in sensors_filtered:
if sensor in data_raw[vehicle][cycle]:
data[vehicle][cycle][sensor] = deepcopy(data_raw[vehicle][cycle][sensor])
# remove cycles if they contain empty parameters of interest, or the record of any parameter is less than np
empty = {}
for vehicle, cycles in data.items():
empty[vehicle] = []
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
if records.shape[0] <= self.np:
empty[vehicle].append(cycle)
break
for vehicle in data:
for cycle in empty[vehicle]:
del data[vehicle][cycle]
# print(vehicle, cycle)
# delete cycles with parameters beyond defined thresholds
thres = {}
for vehicle, cycles in data.items():
thres[vehicle] = []
for cycle, sensors in cycles.items():
for r, l in self.sensor_threshold.items():
if np.max(sensors[r][:, 1]) < l:
thres[vehicle].append(cycle)
for vehicle in data:
for cycle in thres[vehicle]:
del data[vehicle][cycle]
# remove vehicle if it is empty
empty_vehicle = []
for vehicle, cycles in data.items():
if len(cycles) == 0:
empty_vehicle.append(vehicle)
for vehicle in empty_vehicle:
del data[vehicle]
if self.verbose:
print(vehicle, 'is removed due to lack of valuable data: '
'try to relax the filtering limits if this vehicle is of interest!')
"""
to get the raw timelines, including starting and ending time (in raw format),
this is to ensure cycles in sequence
"""
t_pd = {}
for vehicle, cycles in data.items():
temp = []
for cycle, sensors in cycles.items():
st_raw = np.array([])
ft_raw = np.array([])
for sensor, records in sensors.items():
st_raw = np.append(st_raw, records[0, 0])
ft_raw = np.append(ft_raw, records[-1, 0])
temp.append(
{'cycle': cycle, 's_t': np.min(st_raw), 'f_t': np.max(ft_raw)}
)
t_pd[vehicle] = pd.DataFrame(temp)
t_surface = {}
for vehicle, temp in t_pd.items():
t_surface[vehicle] = {}
temp.sort_values('s_t', inplace=True, ascending=True, ignore_index=False)
length = temp.shape[0]
for i in range(length):
if i == (length-1):
t_surface[vehicle][temp.iloc[i, 0]] = 0
else:
t_surface[vehicle][temp.iloc[i, 0]] = temp.iloc[i + 1, 1] - temp.iloc[i, 2]
self.t = t_surface
# get vertical velocity
data = self.get_velocity(data)
self.data_filter = deepcopy(data)
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer))
return data |
Python | def interpolate(self):
"""
Apply uniformed timeline for all measurements by 1D interpolation
- Start time
- End time
- Relative timelines
- Interpolations
"""
print('Interpolation in process...')
self.timer = time.time()
data = deepcopy(self.data_filter)
# set relative timelines for dive profiles with complete records
ft = {}
for vehicle, cycles in data.items():
ft[vehicle] = {}
for cycle in cycles:
start_time = np.array([])
final_time = np.array([])
# get start and final times of all measurements, find min and max of all measurements for each cycle
for sensor, measurement in data[vehicle][cycle].items():
start_time = np.append(start_time, measurement[0, 0])
final_time = np.append(final_time, measurement[-1, 0])
s_t = np.min(start_time)
# set relative start time
for sensor in cycles[cycle]:
data[vehicle][cycle][sensor][:, 0] -= s_t
ft[vehicle][cycle] = np.max(final_time) - s_t
# apply 1d interpolation to all measurements
t = np.arange(0, ft[vehicle][cycle], self.dt)
for sensor, records in cycles[cycle].items():
f = interpolate.interp1d(records[:, 0], records[:, 1], fill_value='extrapolate')
y = f(t)
# clear original data
data[vehicle][cycle][sensor] = np.zeros([len(t), 2])
# fill in with interpolated data
data[vehicle][cycle][sensor][:, 0] = deepcopy(t)
data[vehicle][cycle][sensor][:, 1] = deepcopy(y)
self.ft = ft
self.data_interp = data
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer))
return data | def interpolate(self):
"""
Apply uniformed timeline for all measurements by 1D interpolation
- Start time
- End time
- Relative timelines
- Interpolations
"""
print('Interpolation in process...')
self.timer = time.time()
data = deepcopy(self.data_filter)
# set relative timelines for dive profiles with complete records
ft = {}
for vehicle, cycles in data.items():
ft[vehicle] = {}
for cycle in cycles:
start_time = np.array([])
final_time = np.array([])
# get start and final times of all measurements, find min and max of all measurements for each cycle
for sensor, measurement in data[vehicle][cycle].items():
start_time = np.append(start_time, measurement[0, 0])
final_time = np.append(final_time, measurement[-1, 0])
s_t = np.min(start_time)
# set relative start time
for sensor in cycles[cycle]:
data[vehicle][cycle][sensor][:, 0] -= s_t
ft[vehicle][cycle] = np.max(final_time) - s_t
# apply 1d interpolation to all measurements
t = np.arange(0, ft[vehicle][cycle], self.dt)
for sensor, records in cycles[cycle].items():
f = interpolate.interp1d(records[:, 0], records[:, 1], fill_value='extrapolate')
y = f(t)
# clear original data
data[vehicle][cycle][sensor] = np.zeros([len(t), 2])
# fill in with interpolated data
data[vehicle][cycle][sensor][:, 0] = deepcopy(t)
data[vehicle][cycle][sensor][:, 1] = deepcopy(y)
self.ft = ft
self.data_interp = data
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer))
return data |
Python | def model_based_processing(self):
"""
Processing for model-based analysis:
a. New parameters introduced and calculation required (Fourth pass, gsw_toolbox called)
b. Divide the cycles into dives and climbs and remove transients (Fifth pass)
c. Put together the dives and climbs without transients (Sixth pass or Seventh pass)
d. Compute the bouncy buoyancy value for each data point (Eighth pass)
:return: data for system identification
"""
data_interp = deepcopy(self.data_interp)
s = Slocum(self.ft, self.dt)
data_calculated = s.slocum_cal_a(data_interp)
dive, climb, data = s.slocum_cal_b(data_calculated, self.t)
data_new = s.slocum_cal_c(dive, climb, data)
# data for system identification
data_b = s.slocum_cal_d(data_new)
return data_b | def model_based_processing(self):
"""
Processing for model-based analysis:
a. New parameters introduced and calculation required (Fourth pass, gsw_toolbox called)
b. Divide the cycles into dives and climbs and remove transients (Fifth pass)
c. Put together the dives and climbs without transients (Sixth pass or Seventh pass)
d. Compute the bouncy buoyancy value for each data point (Eighth pass)
:return: data for system identification
"""
data_interp = deepcopy(self.data_interp)
s = Slocum(self.ft, self.dt)
data_calculated = s.slocum_cal_a(data_interp)
dive, climb, data = s.slocum_cal_b(data_calculated, self.t)
data_new = s.slocum_cal_c(dive, climb, data)
# data for system identification
data_b = s.slocum_cal_d(data_new)
return data_b |
Python | def data_driven_processing(self):
"""
This function process the data for data driven approaches
TODO: currently only normalisation is included
:return: normalised data
"""
print('Prepare data for data-driven approaches...')
self.timer = time.time()
data = deepcopy(self.data_interp)
# normalise the data
data_normalised_multi = self.normalise_multi(deepcopy(data))
self.data_multi_train(data_normalised_multi)
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer)) | def data_driven_processing(self):
"""
This function process the data for data driven approaches
TODO: currently only normalisation is included
:return: normalised data
"""
print('Prepare data for data-driven approaches...')
self.timer = time.time()
data = deepcopy(self.data_interp)
# normalise the data
data_normalised_multi = self.normalise_multi(deepcopy(data))
self.data_multi_train(data_normalised_multi)
if self.verbose:
print("--- %f seconds ---" % (time.time() - self.timer)) |
Python | def find_empty_rate(self):
"""
This function finds the empty rates of sensors/parameters
"""
empty_rate = deepcopy(self.sensors)
for sensor in empty_rate:
empty_rate[sensor] = 0.
total_cycle = 0
data = self.data_raw
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
total_cycle += 1
for sensor in sensors:
size = len(sensors[sensor])
if size == 0:
empty_rate[sensor] += 1
for sensor in empty_rate:
empty_rate[sensor] /= total_cycle
empty_rate_sorted = sorted(empty_rate.items(), key=lambda x: x[1], reverse=True)
empty_high = []
if self.verbose and empty_rate_sorted[0][1] > self.e_r_threshold:
print('Be careful: the following sensors/parameters have empty rates higher than the threshold of',
self.e_r_threshold, '!!!')
for i in empty_rate_sorted:
if i[1] > self.e_r_threshold:
empty_high.append(i[0])
if self.verbose:
print(i[0], ':', i[1])
print(empty_rate_sorted)
print('Total cycle number is', total_cycle)
return empty_high | def find_empty_rate(self):
"""
This function finds the empty rates of sensors/parameters
"""
empty_rate = deepcopy(self.sensors)
for sensor in empty_rate:
empty_rate[sensor] = 0.
total_cycle = 0
data = self.data_raw
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
total_cycle += 1
for sensor in sensors:
size = len(sensors[sensor])
if size == 0:
empty_rate[sensor] += 1
for sensor in empty_rate:
empty_rate[sensor] /= total_cycle
empty_rate_sorted = sorted(empty_rate.items(), key=lambda x: x[1], reverse=True)
empty_high = []
if self.verbose and empty_rate_sorted[0][1] > self.e_r_threshold:
print('Be careful: the following sensors/parameters have empty rates higher than the threshold of',
self.e_r_threshold, '!!!')
for i in empty_rate_sorted:
if i[1] > self.e_r_threshold:
empty_high.append(i[0])
if self.verbose:
print(i[0], ':', i[1])
print(empty_rate_sorted)
print('Total cycle number is', total_cycle)
return empty_high |
Python | def normalise(self, data):
"""
Normalise all data to the range of [0, 1]
"""
minmax = {}
# initialise the normalised data
for vehicle, cycles in data.items():
minmax[vehicle] = {}
for cycle, sensors in cycles.items():
for sensor in sensors:
minmax[vehicle][sensor] = {}
minmax[vehicle][sensor]['min'] = None
minmax[vehicle][sensor]['max'] = None
# find min and max for sensors
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
r = records[:, 1]
min_r = r.min()
max_r = r.max()
if minmax[vehicle][sensor]['min'] is None:
minmax[vehicle][sensor]['min'] = min_r
else:
minmax[vehicle][sensor]['min'] = min(min_r, minmax[vehicle][sensor]['min'])
if minmax[vehicle][sensor]['max'] is None:
minmax[vehicle][sensor]['max'] = max_r
else:
minmax[vehicle][sensor]['max'] = max(max_r, minmax[vehicle][sensor]['max'])
# normalise sensor measurements to [0, 1]
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
ptp = minmax[vehicle][sensor]['max'] - minmax[vehicle][sensor]['min']
data[vehicle][cycle][sensor][:, 1] = (records[:, 1] - minmax[vehicle][sensor]['min']) / ptp
# data[vehicle][cycle][sensor][:, 1] = 2 * (records[:, 1] - minmax[vehicle][sensor]['min'])/ptp - 1
self.minmax = minmax
# save the minimum and maximum values to file
if not os.path.exists('training_data'):
os.makedirs('training_data')
pickle.dump(minmax, open(os.path.join('training_data', 'de_normal.pkl'), 'wb'))
return data | def normalise(self, data):
"""
Normalise all data to the range of [0, 1]
"""
minmax = {}
# initialise the normalised data
for vehicle, cycles in data.items():
minmax[vehicle] = {}
for cycle, sensors in cycles.items():
for sensor in sensors:
minmax[vehicle][sensor] = {}
minmax[vehicle][sensor]['min'] = None
minmax[vehicle][sensor]['max'] = None
# find min and max for sensors
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
r = records[:, 1]
min_r = r.min()
max_r = r.max()
if minmax[vehicle][sensor]['min'] is None:
minmax[vehicle][sensor]['min'] = min_r
else:
minmax[vehicle][sensor]['min'] = min(min_r, minmax[vehicle][sensor]['min'])
if minmax[vehicle][sensor]['max'] is None:
minmax[vehicle][sensor]['max'] = max_r
else:
minmax[vehicle][sensor]['max'] = max(max_r, minmax[vehicle][sensor]['max'])
# normalise sensor measurements to [0, 1]
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
ptp = minmax[vehicle][sensor]['max'] - minmax[vehicle][sensor]['min']
data[vehicle][cycle][sensor][:, 1] = (records[:, 1] - minmax[vehicle][sensor]['min']) / ptp
# data[vehicle][cycle][sensor][:, 1] = 2 * (records[:, 1] - minmax[vehicle][sensor]['min'])/ptp - 1
self.minmax = minmax
# save the minimum and maximum values to file
if not os.path.exists('training_data'):
os.makedirs('training_data')
pickle.dump(minmax, open(os.path.join('training_data', 'de_normal.pkl'), 'wb'))
return data |
Python | def normalise_multi(self, data):
"""
This normalise the dat for all vehicles to multi-vehicle training, all data are to the range of [0, 1]
"""
minmax = {}
# initialise the normalised data
sensor_list = []
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor in sensors:
sensor_list.append(sensor)
break
break
for sensor in sensor_list:
minmax[sensor] = {}
minmax[sensor]['min'] = None
minmax[sensor]['max'] = None
# find min and max for the sensors across the vehicles
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
r = records[:, 1]
min_r = r.min()
max_r = r.max()
if minmax[sensor]['min'] is None:
minmax[sensor]['min'] = min_r
else:
minmax[sensor]['min'] = min(min_r, minmax[sensor]['min'])
if minmax[sensor]['max'] is None:
minmax[sensor]['max'] = max_r
else:
minmax[sensor]['max'] = max(max_r, minmax[sensor]['max'])
# normalise sensor measurements to [0, 1]
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
ptp = minmax[sensor]['max'] - minmax[sensor]['min']
data[vehicle][cycle][sensor][:, 1] = (records[:, 1] - minmax[sensor]['min']) / ptp
# data[cycle][sensor][:, 1] = 2 * (records[:, 1] - minmax[sensor]['min'])/ptp - 1
self.minmax_multi = minmax
# save the minimum and maximum values to file
if not os.path.exists('training_data'):
os.makedirs('training_data')
pickle.dump(minmax, open(os.path.join('training_data', 'de_normal_multi.pkl'), 'wb'))
return data | def normalise_multi(self, data):
"""
This normalise the dat for all vehicles to multi-vehicle training, all data are to the range of [0, 1]
"""
minmax = {}
# initialise the normalised data
sensor_list = []
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor in sensors:
sensor_list.append(sensor)
break
break
for sensor in sensor_list:
minmax[sensor] = {}
minmax[sensor]['min'] = None
minmax[sensor]['max'] = None
# find min and max for the sensors across the vehicles
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
r = records[:, 1]
min_r = r.min()
max_r = r.max()
if minmax[sensor]['min'] is None:
minmax[sensor]['min'] = min_r
else:
minmax[sensor]['min'] = min(min_r, minmax[sensor]['min'])
if minmax[sensor]['max'] is None:
minmax[sensor]['max'] = max_r
else:
minmax[sensor]['max'] = max(max_r, minmax[sensor]['max'])
# normalise sensor measurements to [0, 1]
for vehicle, cycles in data.items():
for cycle, sensors in cycles.items():
for sensor, records in sensors.items():
ptp = minmax[sensor]['max'] - minmax[sensor]['min']
data[vehicle][cycle][sensor][:, 1] = (records[:, 1] - minmax[sensor]['min']) / ptp
# data[cycle][sensor][:, 1] = 2 * (records[:, 1] - minmax[sensor]['min'])/ptp - 1
self.minmax_multi = minmax
# save the minimum and maximum values to file
if not os.path.exists('training_data'):
os.makedirs('training_data')
pickle.dump(minmax, open(os.path.join('training_data', 'de_normal_multi.pkl'), 'wb'))
return data |
Python | def rcnn_im_detect_with_gtbox(net, im, boxes, feat_list = ()):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
feat_list: a list that contains feature names you need. (SUPPORT: conv1-conv5, fc, and logit)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
attr_scores (ndarray): R x M array of attribute class scores
"""
feat_dict = {
"conv1": "conv1",
"conv2": "res2c",
"conv3": "res3b3",
"conv4": "res4b22",
"conv5": "res5c",
"fc":"pool5_flat",
"logit":"cls_score"
}
blobs, im_scales = _get_blobs(im, boxes)
# Purpose: save computation resource for duplicated ROIs.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
if 'im_info' in net.blobs:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
if 'im_info' in net.blobs:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
feats = []
if len(feat_list) > 0:
for f in feat_list:
feats.append(net.blobs[feat_dict[f]])
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.COMMON.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
if 'attr_prob' in net.blobs:
attr_scores = blobs_out['attr_prob']
else:
attr_scores = None
if 'rel_prob' in net.blobs:
rel_scores = blobs_out['rel_prob']
else:
rel_scores = None
return scores, pred_boxes, attr_scores, rel_scores, feats | def rcnn_im_detect_with_gtbox(net, im, boxes, feat_list = ()):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
feat_list: a list that contains feature names you need. (SUPPORT: conv1-conv5, fc, and logit)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
attr_scores (ndarray): R x M array of attribute class scores
"""
feat_dict = {
"conv1": "conv1",
"conv2": "res2c",
"conv3": "res3b3",
"conv4": "res4b22",
"conv5": "res5c",
"fc":"pool5_flat",
"logit":"cls_score"
}
blobs, im_scales = _get_blobs(im, boxes)
# Purpose: save computation resource for duplicated ROIs.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
if 'im_info' in net.blobs:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
if 'im_info' in net.blobs:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
feats = []
if len(feat_list) > 0:
for f in feat_list:
feats.append(net.blobs[feat_dict[f]])
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.COMMON.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
if 'attr_prob' in net.blobs:
attr_scores = blobs_out['attr_prob']
else:
attr_scores = None
if 'rel_prob' in net.blobs:
rel_scores = blobs_out['rel_prob']
else:
rel_scores = None
return scores, pred_boxes, attr_scores, rel_scores, feats |
Python | def _load_and_fix_object_detector(self, object_model):
"""
To use this function, you need to make sure that all keys in object_model match the ones in the target model.
"""
self._fixed_keys = set([key.split('.')[0] for key in object_model.keys()])
self.load_state_dict(object_model, strict=False)
for name, module in self.named_children():
if name in self._fixed_keys:
for p in module.parameters(): p.requires_grad = False | def _load_and_fix_object_detector(self, object_model):
"""
To use this function, you need to make sure that all keys in object_model match the ones in the target model.
"""
self._fixed_keys = set([key.split('.')[0] for key in object_model.keys()])
self.load_state_dict(object_model, strict=False)
for name, module in self.named_children():
if name in self._fixed_keys:
for p in module.parameters(): p.requires_grad = False |
Python | def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
if index.startswith("coco"):
image_dir = self._ind_to_dir[index]
image_id = "_".join(index.split("_")[1:])
file_name = str(image_id).zfill(12) + '.jpg'
image_path = os.path.join(image_dir, file_name)
elif index.startswith("vg"):
image_dir = self._ind_to_dir[index]
image_id = "_".join(index.split("_")[1:])
file_name = str(image_id) + '.jpg'
image_path = os.path.join(image_dir, file_name)
else:
file_name = str(index) + '.jpg'
image_path = os.path.join(self._data_path, 'JPEGImages', file_name)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path | def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
if index.startswith("coco"):
image_dir = self._ind_to_dir[index]
image_id = "_".join(index.split("_")[1:])
file_name = str(image_id).zfill(12) + '.jpg'
image_path = os.path.join(image_dir, file_name)
elif index.startswith("vg"):
image_dir = self._ind_to_dir[index]
image_id = "_".join(index.split("_")[1:])
file_name = str(image_id) + '.jpg'
image_path = os.path.join(image_dir, file_name)
else:
file_name = str(index) + '.jpg'
image_path = os.path.join(self._data_path, 'JPEGImages', file_name)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path |
Python | def _get_default_path(self):
"""
Return the default path where Visual Manipulation Realtionship Dataset is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VMRD') | def _get_default_path(self):
"""
Return the default path where Visual Manipulation Realtionship Dataset is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VMRD') |
Python | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
if self._image_set == "trainval" and self._use_coco_vg_aug:
gt_roidb = [self._load_vmrd_annotation(index)
for index in self.image_index[:self._original_num_img]]
gt_roidb = self._append_rotated_images(gt_roidb)
# append coco data
gt_roidb.extend([self._load_coco_vg_annotation(index)
for index in self.image_index if index.startswith("coco") or index.startswith("vg")])
else:
gt_roidb = [dict(self._load_vmrd_annotation(index).items() +
self._load_grasp_annotation(index).items())
for index in self.image_index[:self._original_num_img]]
if self._image_set == "trainval":
gt_roidb = self._append_rotated_images(gt_roidb)
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
if self._image_set == "trainval" and self._use_coco_vg_aug:
gt_roidb = [self._load_vmrd_annotation(index)
for index in self.image_index[:self._original_num_img]]
gt_roidb = self._append_rotated_images(gt_roidb)
# append coco data
gt_roidb.extend([self._load_coco_vg_annotation(index)
for index in self.image_index if index.startswith("coco") or index.startswith("vg")])
else:
gt_roidb = [dict(self._load_vmrd_annotation(index).items() +
self._load_grasp_annotation(index).items())
for index in self.image_index[:self._original_num_img]]
if self._image_set == "trainval":
gt_roidb = self._append_rotated_images(gt_roidb)
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb |
Python | def _load_vmrd_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
nodeinds = np.zeros(num_objs, dtype=np.uint16)
parent_list = []
child_list = []
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
nodeind = int(obj.find('index').text)
parentnodes = obj.find('father').findall('num')
parents = [int(f.text) for f in parentnodes]
childnodes = obj.find('children').findall('num')
children = [int(f.text) for f in childnodes]
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
if x1>= x2 or y1 >= y2:
print(filename)
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
nodeinds[ix] = nodeind
parent_list.append(np.array(parents, dtype=np.uint16))
child_list.append(np.array(children, dtype=np.uint16))
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'node_inds': nodeinds,
'parent_lists': parent_list,
'child_lists': child_list,
'rotated': 0} | def _load_vmrd_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
nodeinds = np.zeros(num_objs, dtype=np.uint16)
parent_list = []
child_list = []
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
nodeind = int(obj.find('index').text)
parentnodes = obj.find('father').findall('num')
parents = [int(f.text) for f in parentnodes]
childnodes = obj.find('children').findall('num')
children = [int(f.text) for f in childnodes]
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
if x1>= x2 or y1 >= y2:
print(filename)
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
nodeinds[ix] = nodeind
parent_list.append(np.array(parents, dtype=np.uint16))
child_list.append(np.array(children, dtype=np.uint16))
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'node_inds': nodeinds,
'parent_lists': parent_list,
'child_lists': child_list,
'rotated': 0} |
Python | def _load_coco_vg_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
width = self._widths[self._index_to_i[index]]
height = self._heights[self._index_to_i[index]]
index = index.split("_")
prefix = index[0]
ind = int(index[1])
valid_objs = []
if prefix == "coco":
objs = self._cocoidToAnn[ind]
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
else:
ann = self._vgidToAnn[ind]
objs = ann["objects"]
for obj in objs:
x1 = np.max((0, obj['x']))
y1 = np.max((0, obj['y']))
x2 = np.min((width - 1, x1 + np.max((0, obj['w'] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['h'] - 1))))
if x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
for ix, obj in enumerate(objs):
if prefix == "coco":
cls = obj['category_id']
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
else:
vg_cls = obj["names"][0]
cls = self._vg_vmrd_synset[vg_cls] if vg_cls in self._vg_vmrd_synset.keys() else vg_cls
cls = self._class_to_ind[cls]
seg_areas[ix] = obj['w'] * obj['h']
overlaps[ix, cls] = 1.0
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'width': width,
'height': height,
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'rotated': 0} | def _load_coco_vg_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
width = self._widths[self._index_to_i[index]]
height = self._heights[self._index_to_i[index]]
index = index.split("_")
prefix = index[0]
ind = int(index[1])
valid_objs = []
if prefix == "coco":
objs = self._cocoidToAnn[ind]
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
else:
ann = self._vgidToAnn[ind]
objs = ann["objects"]
for obj in objs:
x1 = np.max((0, obj['x']))
y1 = np.max((0, obj['y']))
x2 = np.min((width - 1, x1 + np.max((0, obj['w'] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['h'] - 1))))
if x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
for ix, obj in enumerate(objs):
if prefix == "coco":
cls = obj['category_id']
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
else:
vg_cls = obj["names"][0]
cls = self._vg_vmrd_synset[vg_cls] if vg_cls in self._vg_vmrd_synset.keys() else vg_cls
cls = self._class_to_ind[cls]
seg_areas[ix] = obj['w'] * obj['h']
overlaps[ix, cls] = 1.0
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'width': width,
'height': height,
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'rotated': 0} |
Python | def evaluate_recall(self, candidate_boxes=None, thresholds=None,
area='all', limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,
'96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
area_ranges = [[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2], # 512-inf
]
assert area in areas, 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in range(self.num_images):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
(max_gt_overlaps == 1))[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
gt_areas = self.roidb[i]['seg_areas'][gt_inds]
valid_gt_inds = np.where((gt_areas >= area_range[0]) &
(gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# If candidate_boxes is not supplied, the default is to use the
# non-ground-truth boxes from this roidb
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps} | def evaluate_recall(self, candidate_boxes=None, thresholds=None,
area='all', limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,
'96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
area_ranges = [[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2], # 512-inf
]
assert area in areas, 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in range(self.num_images):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
(max_gt_overlaps == 1))[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
gt_areas = self.roidb[i]['seg_areas'][gt_inds]
valid_gt_inds = np.where((gt_areas >= area_range[0]) &
(gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# If candidate_boxes is not supplied, the default is to use the
# non-ground-truth boxes from this roidb
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps} |
Python | def clip_boxes_batch(boxes, im_shape, batch_size = 1):
"""
Clip boxes to image boundaries.
"""
num_rois = boxes.size(1)
boxes[boxes < 0] = 0
# batch_x = (im_shape[:,0]-1).view(batch_size, 1).expand(batch_size, num_rois)
# batch_y = (im_shape[:,1]-1).view(batch_size, 1).expand(batch_size, num_rois)
batch_x = im_shape[:, 1] - 1
batch_y = im_shape[:, 0] - 1
boxes[:,:,0][boxes[:,:,0] > batch_x] = batch_x
boxes[:,:,1][boxes[:,:,1] > batch_y] = batch_y
boxes[:,:,2][boxes[:,:,2] > batch_x] = batch_x
boxes[:,:,3][boxes[:,:,3] > batch_y] = batch_y
return boxes | def clip_boxes_batch(boxes, im_shape, batch_size = 1):
"""
Clip boxes to image boundaries.
"""
num_rois = boxes.size(1)
boxes[boxes < 0] = 0
# batch_x = (im_shape[:,0]-1).view(batch_size, 1).expand(batch_size, num_rois)
# batch_y = (im_shape[:,1]-1).view(batch_size, 1).expand(batch_size, num_rois)
batch_x = im_shape[:, 1] - 1
batch_y = im_shape[:, 0] - 1
boxes[:,:,0][boxes[:,:,0] > batch_x] = batch_x
boxes[:,:,1][boxes[:,:,1] > batch_y] = batch_y
boxes[:,:,2][boxes[:,:,2] > batch_x] = batch_x
boxes[:,:,3][boxes[:,:,3] > batch_y] = batch_y
return boxes |
Python | def image_path_from_index(self, index):
"""
For our Jacquard dataset, the index is the absolute path of the cooresponding file.
"""
if self._version == 'rgb':
image_path = index + '_RGB.png'
elif self._version == 'rgd':
image_path = index + '_RGD_.png'
elif self._version == 'depth':
image_path = index + '_Depth.png'
else:
raise NotImplemented
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path | def image_path_from_index(self, index):
"""
For our Jacquard dataset, the index is the absolute path of the cooresponding file.
"""
if self._version == 'rgb':
image_path = index + '_RGB.png'
elif self._version == 'rgd':
image_path = index + '_RGD_.png'
elif self._version == 'depth':
image_path = index + '_Depth.png'
else:
raise NotImplemented
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path |
Python | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /Cornell/ImageSets/test.txt
if isinstance(self._image_set,list):
image_index = []
for file in self._image_set:
image_set_file = os.path.join(self._data_path, file + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index += [x.strip() for x in f.readlines()]
else:
image_set_file = os.path.join(self._data_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index | def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /Cornell/ImageSets/test.txt
if isinstance(self._image_set,list):
image_index = []
for file in self._image_set:
image_set_file = os.path.join(self._data_path, file + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index += [x.strip() for x in f.readlines()]
else:
image_set_file = os.path.join(self._data_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index |
Python | def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'Jacquard') | def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'Jacquard') |
Python | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb |
Python | def _load_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
pos_filename = index + '_grasps.txt'
grasps = self._load_label_file(pos_filename)
grasps = self._labels_to_points(grasps)
# load segmentation
seg_name = index + '_mask.png'
seg = cv2.imread(seg_name)
seg = np.where(seg > 0)
xmin = np.min(seg[1])
xmax = np.max(seg[1])
ymin = np.min(seg[0])
ymax = np.max(seg[0])
# object bounding boxes from segmentaion
boxes = np.array([[xmin, ymin, xmax, ymax]], dtype = np.int32)
# self._show_label(index, grasps)
return {'grasps': grasps,
'boxes': boxes,
'rotated': 0} | def _load_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
pos_filename = index + '_grasps.txt'
grasps = self._load_label_file(pos_filename)
grasps = self._labels_to_points(grasps)
# load segmentation
seg_name = index + '_mask.png'
seg = cv2.imread(seg_name)
seg = np.where(seg > 0)
xmin = np.min(seg[1])
xmax = np.max(seg[1])
ymin = np.min(seg[0])
ymax = np.max(seg[0])
# object bounding boxes from segmentaion
boxes = np.array([[xmin, ymin, xmax, ymax]], dtype = np.int32)
# self._show_label(index, grasps)
return {'grasps': grasps,
'boxes': boxes,
'rotated': 0} |
Python | def prep_im_for_blob(im, target_size, max_size, fix_size = False):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im_shape = im.shape
im_scale = {}
if not fix_size:
im_size_min = np.min(im_shape[0:2])
im_scale['x'] = float(target_size) / float(im_size_min)
im_scale['y'] = float(target_size) / float(im_size_min)
else:
im_size_y, im_size_x = im.shape[:2]
im_scale['x'] = float(target_size) / float(im_size_x)
im_scale['y'] = float(target_size) / float(im_size_y)
# Prevent the biggest axis from being more than MAX_SIZE
# if np.round(im_scale * im_size_max) > max_size:
# im_scale = float(max_size) / float(im_size_max)
# im = imresize(im, im_scale)
im = cv2.resize(im, None, None, fx=im_scale['x'], fy=im_scale['y'],
interpolation=cv2.INTER_LINEAR)
return im, im_scale | def prep_im_for_blob(im, target_size, max_size, fix_size = False):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im_shape = im.shape
im_scale = {}
if not fix_size:
im_size_min = np.min(im_shape[0:2])
im_scale['x'] = float(target_size) / float(im_size_min)
im_scale['y'] = float(target_size) / float(im_size_min)
else:
im_size_y, im_size_x = im.shape[:2]
im_scale['x'] = float(target_size) / float(im_size_x)
im_scale['y'] = float(target_size) / float(im_size_y)
# Prevent the biggest axis from being more than MAX_SIZE
# if np.round(im_scale * im_size_max) > max_size:
# im_scale = float(max_size) / float(im_size_max)
# im = imresize(im, im_scale)
im = cv2.resize(im, None, None, fx=im_scale['x'], fy=im_scale['y'],
interpolation=cv2.INTER_LINEAR)
return im, im_scale |
Python | def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = gradient_norm(model)
norm = clip_norm / max(totalnorm, clip_norm)
for p in model.parameters():
if p.requires_grad and p.grad is not None:
p.grad.mul_(norm) | def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = gradient_norm(model)
norm = clip_norm / max(totalnorm, clip_norm)
for p in model.parameters():
if p.requires_grad and p.grad is not None:
p.grad.mul_(norm) |
Python | def gt_roidb(self, split, version):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self._cache_path, "regrad_{}_{}_gt.pkl".format(version, split))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
all_annos = pkl.load(fid)
print('regrad gt loaded from {}'.format(cache_file))
return all_annos
gt_roidb = self._load_all_annos(split, version)
with open(cache_file, 'wb') as fid:
pkl.dump(gt_roidb, fid, pkl.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb | def gt_roidb(self, split, version):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self._cache_path, "regrad_{}_{}_gt.pkl".format(version, split))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
all_annos = pkl.load(fid)
print('regrad gt loaded from {}'.format(cache_file))
return all_annos
gt_roidb = self._load_all_annos(split, version)
with open(cache_file, 'wb') as fid:
pkl.dump(gt_roidb, fid, pkl.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb |
Python | def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
file_name = (str(index).zfill(12) + '.jpg')
image_path = osp.join(self._data_path, self._image_name_to_dir[file_name], file_name)
assert osp.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path | def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
file_name = (str(index).zfill(12) + '.jpg')
image_path = osp.join(self._data_path, self._image_name_to_dir[file_name], file_name)
assert osp.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path |
Python | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_refcoco_annotation(index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_refcoco_annotation(index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb |
Python | def _load_refcoco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = self._refCOCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self._refCOCO.getAnnIds(image_ids=index)
objs = self._refCOCO.loadAnns(annIds)
refIds = self._refCOCO.getRefIds(image_ids=index)
refs = self._refCOCO.loadRefs(refIds)
for ref in refs:
assert ref['ann_id'] in annIds
refannIds = [ref['ann_id'] for ref in refs]
# Sanitize bboxes -- some are invalid
valid_objs = []
caps = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
# if there is a corresponding caption for obj
if obj['id'] in refannIds:
ref = self._refCOCO.annToRef[obj['id']]['sentences']
randSel = np.random.randint(len(ref))
ref = ref[randSel]
caps.append(ref['tokens'])
# if there is no caption for obj, an empty string is used.
else:
caps.append([])
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
ds_utils.validate_boxes(boxes, width=width, height=height)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'width': width,
'height': height,
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'rotated': 0,
'captions': caps} | def _load_refcoco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = self._refCOCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self._refCOCO.getAnnIds(image_ids=index)
objs = self._refCOCO.loadAnns(annIds)
refIds = self._refCOCO.getRefIds(image_ids=index)
refs = self._refCOCO.loadRefs(refIds)
for ref in refs:
assert ref['ann_id'] in annIds
refannIds = [ref['ann_id'] for ref in refs]
# Sanitize bboxes -- some are invalid
valid_objs = []
caps = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
# if there is a corresponding caption for obj
if obj['id'] in refannIds:
ref = self._refCOCO.annToRef[obj['id']]['sentences']
randSel = np.random.randint(len(ref))
ref = ref[randSel]
caps.append(ref['tokens'])
# if there is no caption for obj, an empty string is used.
else:
caps.append([])
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
ds_utils.validate_boxes(boxes, width=width, height=height)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'width': width,
'height': height,
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'rotated': 0,
'captions': caps} |
Python | def forward(self, data_batch):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
x = data_batch[0]
im_info = data_batch[1]
gt_boxes = data_batch[2]
num_boxes = data_batch[3]
if self.training:
self.iter_counter += 1
sources = []
s0, x = self.FeatExt(x)
s0 = self.L2Norm(s0)
sources.append(s0)
for m in self.extra_conv:
x = m(x)
sources.append(x)
loc, conf = self._get_obj_det_result(sources)
SSD_loss_cls, SSD_loss_bbox = 0, 0
if self.training:
predictions = (
loc,
conf,
self.priors.type_as(loc)
)
SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, gt_boxes, num_boxes)
conf = self.softmax(conf)
return loc, conf, SSD_loss_bbox, SSD_loss_cls | def forward(self, data_batch):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
x = data_batch[0]
im_info = data_batch[1]
gt_boxes = data_batch[2]
num_boxes = data_batch[3]
if self.training:
self.iter_counter += 1
sources = []
s0, x = self.FeatExt(x)
s0 = self.L2Norm(s0)
sources.append(s0)
for m in self.extra_conv:
x = m(x)
sources.append(x)
loc, conf = self._get_obj_det_result(sources)
SSD_loss_cls, SSD_loss_bbox = 0, 0
if self.training:
predictions = (
loc,
conf,
self.priors.type_as(loc)
)
SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, gt_boxes, num_boxes)
conf = self.softmax(conf)
return loc, conf, SSD_loss_bbox, SSD_loss_cls |
Python | def _get_default_path(self):
"""
Return the default path where BDDS dataset is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'BDDS') | def _get_default_path(self):
"""
Return the default path where BDDS dataset is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'BDDS') |
Python | def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text)
y1 = float(bbox.find('ymin').text)
x2 = float(bbox.find('xmax').text)
y2 = float(bbox.find('ymax').text)
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'rotated': 0} | def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text)
y1 = float(bbox.find('ymin').text)
x2 = float(bbox.find('xmax').text)
y2 = float(bbox.find('ymax').text)
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'rotated': 0} |
Python | def resnet18(feat_list, pretrained_model_path):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], feat_list=feat_list, pretrained_model_path=pretrained_model_path)
return model | def resnet18(feat_list, pretrained_model_path):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], feat_list=feat_list, pretrained_model_path=pretrained_model_path)
return model |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.