patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -10,6 +10,7 @@ import (
"bytes"
"context"
"encoding/hex"
+ "github.com/iotexproject/iotex-core/db"
"sync"
"time"
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"bytes"
"context"
"encoding/hex"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/zjshen14/go-fsm"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/proto"
)
/**
* TODO: For the nodes received correct proposal, add proposer's proposal endorse without signature, which could be replaced with real signature
*/
var (
consensusMtc = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_consensus",
Help: "Consensus result",
},
[]string{"result"},
)
)
func init() {
prometheus.MustRegister(consensusMtc)
}
const (
// consensusEvt states
sEpochStart fsm.State = "S_EPOCH_START"
sDKGGeneration fsm.State = "S_DKG_GENERATION"
sRoundStart fsm.State = "S_ROUND_START"
sBlockPropose fsm.State = "S_BLOCK_PROPOSE"
sAcceptPropose fsm.State = "S_ACCEPT_PROPOSE"
sAcceptProposalEndorse fsm.State = "S_ACCEPT_PROPOSAL_ENDORSE"
sAcceptLockEndorse fsm.State = "S_ACCEPT_LOCK_ENDORSE"
sAcceptCommitEndorse fsm.State = "S_ACCEPT_COMMIT_ENDORSE"
// consensusEvt event types
eRollDelegates fsm.EventType = "E_ROLL_DELEGATES"
eGenerateDKG fsm.EventType = "E_GENERATE_DKG"
eStartRound fsm.EventType = "E_START_ROUND"
eInitBlockPropose fsm.EventType = "E_INIT_BLOCK_PROPOSE"
eProposeBlock fsm.EventType = "E_PROPOSE_BLOCK"
eProposeBlockTimeout fsm.EventType = "E_PROPOSE_BLOCK_TIMEOUT"
eEndorseProposal fsm.EventType = "E_ENDORSE_PROPOSAL"
eEndorseProposalTimeout fsm.EventType = "E_ENDORSE_PROPOSAL_TIMEOUT"
eEndorseLock fsm.EventType = "E_ENDORSE_LOCK"
eEndorseLockTimeout fsm.EventType = "E_ENDORSE_LOCK_TIMEOUT"
eEndorseCommit fsm.EventType = "E_ENDORSE_COMMIT"
eFinishEpoch fsm.EventType = "E_FINISH_EPOCH"
// eBackdoor indicates an backdoor event type
eBackdoor fsm.EventType = "E_BACKDOOR"
)
var (
// ErrEvtCast indicates the error of casting the event
ErrEvtCast = errors.New("error when casting the event")
// ErrEvtConvert indicates the error of converting the event from/to the proto message
ErrEvtConvert = errors.New("error when converting the event from/to the proto message")
// ErrEvtType represents an unexpected event type error
ErrEvtType = errors.New("error when check the event type")
// consensusStates is a slice consisting of all consensusEvt states
consensusStates = []fsm.State{
sEpochStart,
sDKGGeneration,
sRoundStart,
sBlockPropose,
sAcceptPropose,
sAcceptProposalEndorse,
sAcceptLockEndorse,
sAcceptCommitEndorse,
}
)
// cFSM wraps over the general purpose FSM and implements the consensusEvt logic
type cFSM struct {
fsm fsm.FSM
evtq chan iConsensusEvt
close chan interface{}
ctx *rollDPoSCtx
wg sync.WaitGroup
}
func newConsensusFSM(ctx *rollDPoSCtx) (*cFSM, error) {
cm := &cFSM{
evtq: make(chan iConsensusEvt, ctx.cfg.EventChanSize),
close: make(chan interface{}),
ctx: ctx,
}
b := fsm.NewBuilder().
AddInitialState(sEpochStart).
AddStates(
sDKGGeneration,
sRoundStart,
sBlockPropose,
sAcceptPropose,
sAcceptProposalEndorse,
sAcceptLockEndorse,
sAcceptCommitEndorse,
).
AddTransition(sEpochStart, eRollDelegates, cm.handleRollDelegatesEvt, []fsm.State{sEpochStart, sDKGGeneration}).
AddTransition(sDKGGeneration, eGenerateDKG, cm.handleGenerateDKGEvt, []fsm.State{sRoundStart}).
AddTransition(sRoundStart, eStartRound, cm.handleStartRoundEvt, []fsm.State{sBlockPropose}).
AddTransition(sRoundStart, eFinishEpoch, cm.handleFinishEpochEvt, []fsm.State{sEpochStart, sRoundStart}).
AddTransition(sBlockPropose, eInitBlockPropose, cm.handleInitBlockProposeEvt, []fsm.State{sAcceptPropose}).
AddTransition(sBlockPropose, eProposeBlockTimeout, cm.handleProposeBlockTimeout, []fsm.State{sAcceptProposalEndorse}).
AddTransition(
sAcceptPropose,
eProposeBlock,
cm.handleProposeBlockEvt,
[]fsm.State{
sAcceptPropose, // proposed block invalid
sAcceptProposalEndorse, // receive valid block, jump to next step
}).
AddTransition(
sAcceptPropose,
eProposeBlockTimeout,
cm.handleProposeBlockTimeout,
[]fsm.State{
sAcceptProposalEndorse, // no valid block, jump to next step
}).
AddTransition(
sAcceptProposalEndorse,
eEndorseProposal,
cm.handleEndorseProposalEvt,
[]fsm.State{
sAcceptProposalEndorse, // haven't reach agreement yet
sAcceptLockEndorse, // reach agreement
}).
AddTransition(
sAcceptProposalEndorse,
eEndorseProposalTimeout,
cm.handleEndorseProposalTimeout,
[]fsm.State{
sAcceptLockEndorse, // timeout, jump to next step
}).
AddTransition(
sAcceptLockEndorse,
eEndorseLock,
cm.handleEndorseLockEvt,
[]fsm.State{
sAcceptLockEndorse, // haven't reach agreement yet
sAcceptCommitEndorse, // reach commit agreement, jump to next step
}).
AddTransition(
sAcceptLockEndorse,
eEndorseLockTimeout,
cm.handleEndorseLockTimeout,
[]fsm.State{
sAcceptCommitEndorse, // reach commit agreement, jump to next step
sRoundStart, // timeout, jump to next round
}).
AddTransition(
sAcceptCommitEndorse,
eEndorseCommit,
cm.handleEndorseCommitEvt,
[]fsm.State{
sRoundStart,
})
// Add the backdoor transition so that we could unit test the transition from any given state
for _, state := range consensusStates {
b = b.AddTransition(state, eBackdoor, cm.handleBackdoorEvt, consensusStates)
}
m, err := b.Build()
if err != nil {
return nil, errors.Wrap(err, "error when building the FSM")
}
cm.fsm = m
return cm, nil
}
func (m *cFSM) Start(c context.Context) error {
m.wg.Add(1)
go func() {
running := true
for running {
select {
case <-m.close:
running = false
case evt := <-m.evtq:
timeoutEvt, ok := evt.(*timeoutEvt)
if ok && timeoutEvt.timestamp().Before(m.ctx.round.timestamp) {
logger.Debug().Msg("timeoutEvt is stale")
continue
}
chainHeight := m.ctx.chain.TipHeight()
eventHeight := evt.height()
if _, ok := evt.(*proposeBlkEvt); ok && eventHeight <= chainHeight {
logger.Debug().Uint64("event height", eventHeight).Uint64("chain height", chainHeight).Msg("skip old proposal")
continue
}
if eEvt, ok := evt.(*endorseEvt); ok && eventHeight <= chainHeight && eEvt.endorse.Endorser() != m.ctx.addr.RawAddress {
logger.Debug().Uint64("event height", eventHeight).Uint64("chain height", chainHeight).Msg("skip old endorsement")
continue
}
src := m.fsm.CurrentState()
if err := m.fsm.Handle(evt); err != nil {
if errors.Cause(err) == fsm.ErrTransitionNotFound {
if m.ctx.clock.Now().Sub(evt.timestamp()) <= m.ctx.cfg.UnmatchedEventTTL {
m.produce(evt, m.ctx.cfg.UnmatchedEventInterval)
logger.Debug().
Str("src", string(src)).
Str("evt", string(evt.Type())).
Err(err).
Msg("consensusEvt state transition could find the match")
}
} else {
logger.Error().
Str("src", string(src)).
Str("evt", string(evt.Type())).
Err(err).
Msg("consensusEvt state transition fails")
}
} else {
dst := m.fsm.CurrentState()
logger.Debug().
Str("src", string(src)).
Str("dst", string(dst)).
Str("evt", string(evt.Type())).
Msg("consensusEvt state transition happens")
}
}
}
m.wg.Done()
}()
return nil
}
func (m *cFSM) Stop(_ context.Context) error {
close(m.close)
m.wg.Wait()
return nil
}
func (m *cFSM) currentState() fsm.State {
return m.fsm.CurrentState()
}
// produce adds an event into the queue for the consensus FSM to process
func (m *cFSM) produce(evt iConsensusEvt, delay time.Duration) {
if delay > 0 {
m.wg.Add(1)
go func() {
select {
case <-m.close:
case <-m.ctx.clock.After(delay):
m.evtq <- evt
}
m.wg.Done()
}()
} else {
m.evtq <- evt
}
}
func (m *cFSM) handleRollDelegatesEvt(_ fsm.Event) (fsm.State, error) {
epochNum, epochHeight, err := m.ctx.calcEpochNumAndHeight()
if err != nil {
// Even if error happens, we still need to schedule next check of delegate to tolerate transit error
m.produce(m.newCEvt(eRollDelegates), m.ctx.cfg.DelegateInterval)
return sEpochStart, errors.Wrap(
err,
"error when determining the epoch ordinal number and start height offset",
)
}
// Update CryptoSort seed
// TODO: Consider persist the most recent seed
if !m.ctx.cfg.EnableDKG {
m.ctx.epoch.seed = crypto.CryptoSeed
} else if m.ctx.epoch.seed, err = m.ctx.updateSeed(); err != nil {
logger.Error().Err(err).Msg("Failed to generate new seed from last epoch")
}
delegates, err := m.ctx.rollingDelegates(epochNum)
if err != nil {
// Even if error happens, we still need to schedule next check of delegate to tolerate transit error
m.produce(m.newCEvt(eRollDelegates), m.ctx.cfg.DelegateInterval)
return sEpochStart, errors.Wrap(
err,
"error when determining if the node will participate into next epoch",
)
}
// If the current node is the delegate, move to the next state
if m.isDelegate(delegates) {
// The epochStart start height is going to be the next block to generate
m.ctx.epoch.num = epochNum
m.ctx.epoch.height = epochHeight
m.ctx.epoch.delegates = delegates
m.ctx.epoch.numSubEpochs = m.ctx.getNumSubEpochs()
m.ctx.epoch.subEpochNum = uint64(0)
m.ctx.epoch.committedSecrets = make(map[string][]uint32)
// Trigger the event to generate DKG
m.produce(m.newCEvt(eGenerateDKG), 0)
logger.Info().
Uint64("epoch", epochNum).
Msg("current node is the delegate")
return sDKGGeneration, nil
}
// Else, stay at the current state and check again later
m.produce(m.newCEvt(eRollDelegates), m.ctx.cfg.DelegateInterval)
logger.Info().
Uint64("epoch", epochNum).
Msg("current node is not the delegate")
return sEpochStart, nil
}
func (m *cFSM) handleGenerateDKGEvt(_ fsm.Event) (fsm.State, error) {
if m.ctx.shouldHandleDKG() {
// TODO: numDelegates will be configurable later on
if len(m.ctx.epoch.delegates) != 21 {
logger.Panic().Msg("Number of delegates is incorrect for DKG generation")
}
secrets, witness, err := m.ctx.generateDKGSecrets()
if err != nil {
return sEpochStart, err
}
m.ctx.epoch.secrets = secrets
m.ctx.epoch.witness = witness
}
if err := m.produceStartRoundEvt(); err != nil {
return sEpochStart, errors.Wrapf(err, "error when producing %s", eStartRound)
}
return sRoundStart, nil
}
func (m *cFSM) handleStartRoundEvt(_ fsm.Event) (fsm.State, error) {
subEpochNum, err := m.ctx.calcSubEpochNum()
if err != nil {
return sEpochStart, errors.Wrap(
err,
"error when determining the sub-epoch ordinal number",
)
}
m.ctx.epoch.subEpochNum = subEpochNum
proposer, height, round, err := m.ctx.rotatedProposer()
if err != nil {
logger.Error().
Err(err).
Msg("error when getting the proposer")
return sEpochStart, err
}
if m.ctx.round.height != height {
m.ctx.round = roundCtx{
height: height,
endorsementSets: make(map[hash.Hash32B]*endorsement.Set),
}
}
m.ctx.round.number = round
m.ctx.round.proposer = proposer
m.ctx.round.timestamp = m.ctx.clock.Now()
m.produce(m.newCEvt(eInitBlockPropose), 0)
// Setup timeout for waiting for proposed block
ttl := m.ctx.cfg.AcceptProposeTTL
m.produce(m.newTimeoutEvt(eProposeBlockTimeout), ttl)
ttl += m.ctx.cfg.AcceptProposalEndorseTTL
m.produce(m.newTimeoutEvt(eEndorseProposalTimeout), ttl)
ttl += m.ctx.cfg.AcceptCommitEndorseTTL
m.produce(m.newTimeoutEvt(eEndorseLockTimeout), ttl)
return sBlockPropose, nil
}
func (m *cFSM) handleInitBlockProposeEvt(evt fsm.Event) (fsm.State, error) {
log := logger.Info().
Str("proposer", m.ctx.round.proposer).
Uint64("height", m.ctx.round.height).
Uint32("round", m.ctx.round.number)
if m.ctx.round.proposer != m.ctx.addr.RawAddress {
log.Msg("current node is not the proposer")
return sAcceptPropose, nil
}
log.Msg("current node is the proposer")
blk := m.ctx.round.block
if blk == nil {
var err error
blk, err = m.ctx.mintBlock()
if err != nil {
return sEpochStart, errors.Wrap(err, "error when minting a block")
}
}
proposeBlkEvt := m.newProposeBlkEvt(blk)
proposeBlkEvtProto := proposeBlkEvt.toProtoMsg()
// Notify itself
h := blk.HashBlock()
logger.Info().Str("block", hex.EncodeToString(h[:])).Msgf("Broadcast init proposal %+v", blk)
m.produce(proposeBlkEvt, 0)
// Notify other delegates
if err := m.ctx.p2p.Broadcast(m.ctx.chain.ChainID(), proposeBlkEvtProto); err != nil {
logger.Error().
Err(err).
Msg("error when broadcasting proposeBlkEvt")
}
return sAcceptPropose, nil
}
func (m *cFSM) validateProposeBlock(blk *blockchain.Block, expectedProposer string) bool {
blkHash := blk.HashBlock()
errorLog := logger.Error().
Uint64("expectedHeight", m.ctx.round.height).
Str("expectedProposer", expectedProposer).
Str("hash", hex.EncodeToString(blkHash[:]))
if blk.Height() != m.ctx.round.height {
errorLog.Uint64("blockHeight", blk.Height()).
Msg("error when validating the block height")
return false
}
producer := blk.ProducerAddress()
if producer == "" || producer != expectedProposer {
errorLog.Str("proposer", producer).
Msg("error when validating the block proposer")
return false
}
if !blk.VerifySignature() {
errorLog.Msg("error when validating the block signature")
return false
}
if producer == m.ctx.addr.RawAddress {
// If the block is self proposed, skip validation
return true
}
containCoinbase := true
if m.ctx.cfg.EnableDKG {
if m.ctx.shouldHandleDKG() {
containCoinbase = false
} else if err := verifyDKGSignature(blk, m.ctx.epoch.seed); err != nil {
// Verify dkg signature failed
errorLog.Err(err).Msg("Failed to verify the DKG signature")
return false
}
}
if err := m.ctx.chain.ValidateBlock(blk, containCoinbase); err != nil {
errorLog.Err(err).Msg("error when validating the proposed block")
return false
}
return true
}
func (m *cFSM) handleProposeBlockEvt(evt fsm.Event) (fsm.State, error) {
if evt.Type() != eProposeBlock {
return sEpochStart, errors.Errorf("invalid event type %s", evt.Type())
}
proposeBlkEvt, ok := evt.(*proposeBlkEvt)
if !ok {
return sEpochStart, errors.Wrap(ErrEvtCast, "the event is not a proposeBlkEvt")
}
if !m.validateProposeBlock(proposeBlkEvt.block, m.ctx.round.proposer) {
return sAcceptPropose, nil
}
m.ctx.round.block = proposeBlkEvt.block
m.broadcastConsensusVote(m.ctx.round.block.HashBlock(), endorsement.PROPOSAL)
return sAcceptProposalEndorse, nil
}
func (m *cFSM) handleProposeBlockTimeout(evt fsm.Event) (fsm.State, error) {
if evt.Type() != eProposeBlockTimeout {
return sEpochStart, errors.Errorf("invalid event type %s", evt.Type())
}
logger.Warn().
Str("proposer", m.ctx.round.proposer).
Uint64("height", m.ctx.round.height).
Uint32("round", m.ctx.round.number).
Msg("didn't receive the proposed block before timeout")
return sAcceptProposalEndorse, nil
}
func (m *cFSM) isDelegateEndorsement(endorser string) bool {
for _, delegate := range m.ctx.epoch.delegates {
if delegate == endorser {
return true
}
}
return false
}
func (m *cFSM) validateEndorse(
en *endorsement.Endorsement,
expectedConsensusTopics map[endorsement.ConsensusVoteTopic]bool,
) bool {
if !m.isDelegateEndorsement(en.Endorser()) {
logger.Error().
Str("endorser", en.Endorser()).
Msg("error when validating the endorser's delegation")
return false
}
vote := en.ConsensusVote()
if _, ok := expectedConsensusTopics[vote.Topic]; !ok {
logger.Error().
Interface("expectedConsensusTopics", expectedConsensusTopics).
Uint8("consensusTopic", uint8(vote.Topic)).
Msg("error when validating the endorse topic")
return false
}
if vote.Height != m.ctx.round.height {
logger.Error().
Uint64("height", vote.Height).
Uint64("expectedHeight", m.ctx.round.height).
Msg("error when validating the endorse height")
return false
}
return true
}
func (m *cFSM) isProposedBlock(hash []byte) bool {
if m.ctx.round.block == nil {
return false
}
blkHash := m.ctx.round.block.HashBlock()
return bytes.Equal(hash[:], blkHash[:])
}
func (m *cFSM) processEndorseEvent(
evt fsm.Event,
expectedEventType fsm.EventType,
expectedConsensusTopics map[endorsement.ConsensusVoteTopic]bool,
) (*endorsement.Set, error) {
if evt.Type() != expectedEventType {
return nil, errors.Wrapf(ErrEvtType, "invalid endorsement event type %s", evt.Type())
}
endorseEvt, ok := evt.(*endorseEvt)
if !ok {
return nil, errors.Wrap(ErrEvtCast, "the event is not an endorseEvt")
}
endorse := endorseEvt.endorse
vote := endorse.ConsensusVote()
if !m.isProposedBlock(vote.BlkHash[:]) {
return nil, errors.New("the endorsed block was not the proposed block")
}
if !m.validateEndorse(endorse, expectedConsensusTopics) {
return nil, errors.New("invalid endorsement")
}
return m.addEndorsement(endorse, expectedConsensusTopics)
}
func (m *cFSM) addEndorsement(
en *endorsement.Endorsement,
expectedConsensusTopics map[endorsement.ConsensusVoteTopic]bool,
) (*endorsement.Set, error) {
blkHash := en.ConsensusVote().BlkHash
endorsementSet, ok := m.ctx.round.endorsementSets[blkHash]
if !ok {
endorsementSet = endorsement.NewSet(blkHash)
m.ctx.round.endorsementSets[blkHash] = endorsementSet
}
if err := endorsementSet.AddEndorsement(en); err != nil {
return nil, err
}
validNum := endorsementSet.NumOfValidEndorsements(
expectedConsensusTopics,
m.ctx.epoch.delegates,
)
numDelegates := len(m.ctx.epoch.delegates)
if numDelegates >= 4 && validNum > numDelegates*2/3 || numDelegates < 4 && validNum >= numDelegates {
return endorsementSet, nil
}
return nil, nil
}
func (m *cFSM) broadcastConsensusVote(
blkHash hash.Hash32B,
topic endorsement.ConsensusVoteTopic,
) {
cEvt := m.newEndorseEvt(blkHash, topic)
cEvtProto := cEvt.toProtoMsg()
// Notify itself
m.produce(cEvt, 0)
// Notify other delegates
if err := m.ctx.p2p.Broadcast(m.ctx.chain.ChainID(), cEvtProto); err != nil {
logger.Error().
Err(err).
Msg("error when broadcasting commitEvtProto")
}
}
func (m *cFSM) handleEndorseProposalEvt(evt fsm.Event) (fsm.State, error) {
endorsementSet, err := m.processEndorseEvent(
evt,
eEndorseProposal,
map[endorsement.ConsensusVoteTopic]bool{
endorsement.PROPOSAL: true,
endorsement.COMMIT: true, // commit endorse is counted as one proposal endorse
},
)
if errors.Cause(err) == ErrEvtCast || errors.Cause(err) == ErrEvtType {
return sEpochStart, err
}
if err != nil || endorsementSet == nil {
return sAcceptProposalEndorse, err
}
// Gather enough proposal endorsements
m.ctx.round.proofOfLock = endorsementSet
m.broadcastConsensusVote(endorsementSet.BlockHash(), endorsement.LOCK)
return sAcceptLockEndorse, nil
}
func (m *cFSM) handleEndorseProposalTimeout(evt fsm.Event) (fsm.State, error) {
if evt.Type() != eEndorseProposalTimeout {
return sEpochStart, errors.Errorf("invalid event type %s", evt.Type())
}
logger.Warn().
Uint64("height", m.ctx.round.height).
Msg("didn't collect enough proposal endorses before timeout")
return sAcceptLockEndorse, nil
}
func (m *cFSM) handleEndorseLockEvt(evt fsm.Event) (fsm.State, error) {
endorsementSet, err := m.processEndorseEvent(
evt,
eEndorseLock,
map[endorsement.ConsensusVoteTopic]bool{
endorsement.LOCK: true,
endorsement.COMMIT: true, // commit endorse is counted as one lock endorse
},
)
if errors.Cause(err) == ErrEvtCast || errors.Cause(err) == ErrEvtType {
return sEpochStart, err
}
if err != nil || endorsementSet == nil {
// Wait for more lock votes to come
return sAcceptLockEndorse, err
}
m.broadcastConsensusVote(endorsementSet.BlockHash(), endorsement.COMMIT)
return sAcceptCommitEndorse, nil
}
func (m *cFSM) handleEndorseLockTimeout(evt fsm.Event) (fsm.State, error) {
if evt.Type() != eEndorseLockTimeout {
return sEpochStart, errors.Errorf("invalid event type %s", evt.Type())
}
logger.Warn().
Uint64("height", m.ctx.round.height).
Msg("didn't collect enough commit endorse before timeout")
m.produce(m.newCEvt(eFinishEpoch), 0)
return sRoundStart, nil
}
func (m *cFSM) handleEndorseCommitEvt(evt fsm.Event) (fsm.State, error) {
pendingBlock := m.ctx.round.block
logger.Info().
Uint64("block", m.ctx.round.height).
Msg("consensus reached")
consensusMtc.WithLabelValues("true").Inc()
// If the pending block is a secret block, record the secret share generated by producer
if m.ctx.shouldHandleDKG() {
for _, secretProposal := range pendingBlock.SecretProposals {
if secretProposal.DstAddr() == m.ctx.addr.RawAddress {
m.ctx.epoch.committedSecrets[secretProposal.SrcAddr()] = secretProposal.Secret()
break
}
}
}
// Commit and broadcast the pending block
if err := m.ctx.chain.CommitBlock(pendingBlock); err != nil {
logger.Error().
Err(err).
Uint64("block", pendingBlock.Height()).
Msg("error when committing a block")
}
// Remove transfers in this block from ActPool and reset ActPool state
m.ctx.actPool.Reset()
// Broadcast the committed block to the network
if blkProto := pendingBlock.ConvertToBlockPb(); blkProto != nil {
if err := m.ctx.p2p.Broadcast(m.ctx.chain.ChainID(), blkProto); err != nil {
logger.Error().
Err(err).
Uint64("block", pendingBlock.Height()).
Msg("error when broadcasting blkProto")
}
// putblock to parent chain if the current node is proposer and current chain is a sub chain
if m.ctx.round.proposer == m.ctx.addr.RawAddress && m.ctx.chain.ChainAddress() != "" {
putBlockToParentChain(m.ctx.rootChainAPI, m.ctx.chain.ChainAddress(), m.ctx.addr, pendingBlock)
}
} else {
logger.Error().
Uint64("block", pendingBlock.Height()).
Msg("error when converting a block into a proto msg")
}
m.produce(m.newCEvt(eFinishEpoch), 0)
return sRoundStart, nil
}
func (m *cFSM) handleFinishEpochEvt(evt fsm.Event) (fsm.State, error) {
if m.ctx.shouldHandleDKG() && m.ctx.isDKGFinished() {
dkgPubKey, dkgPriKey, err := m.ctx.generateDKGKeyPair()
if err != nil {
return sEpochStart, errors.Wrap(err, "error when generating DKG key pair")
}
m.ctx.epoch.dkgAddress.PublicKey = dkgPubKey
m.ctx.epoch.dkgAddress.PrivateKey = dkgPriKey
}
epochFinished, err := m.ctx.isEpochFinished()
if err != nil {
return sEpochStart, errors.Wrap(err, "error when checking if the epoch is finished")
}
if epochFinished {
m.produce(m.newCEvt(eRollDelegates), 0)
return sEpochStart, nil
}
if err := m.produceStartRoundEvt(); err != nil {
return sEpochStart, errors.Wrapf(err, "error when producing %s", eStartRound)
}
return sRoundStart, nil
}
func (m *cFSM) isDelegate(delegates []string) bool {
for _, d := range delegates {
if m.ctx.addr.RawAddress == d {
return true
}
}
return false
}
func (m *cFSM) produceStartRoundEvt() error {
var (
duration time.Duration
err error
)
// If we have the cached last block, we get the timestamp from it
if duration, err = m.ctx.calcDurationSinceLastBlock(); err != nil {
// Otherwise, we read it from blockchain
return errors.Wrap(err, "error when computing the duration since last block time")
}
// If the proposal interval is not set (not zero), the next round will only be started after the configured duration
// after last block's creation time, so that we could keep the constant
waitDuration := time.Duration(0)
if m.ctx.cfg.ProposerInterval > 0 {
waitDuration = (m.ctx.cfg.ProposerInterval - (duration % m.ctx.cfg.ProposerInterval)) % m.ctx.cfg.ProposerInterval
}
m.produce(m.newCEvt(eStartRound), waitDuration)
return nil
}
// handleBackdoorEvt takes the dst state from the event and move the FSM into it
func (m *cFSM) handleBackdoorEvt(evt fsm.Event) (fsm.State, error) {
bEvt, ok := evt.(*backdoorEvt)
if !ok {
return sEpochStart, errors.Wrap(ErrEvtCast, "the event is not a backdoorEvt")
}
return bEvt.dst, nil
}
func (m *cFSM) newCEvt(t fsm.EventType) *consensusEvt {
return newCEvt(t, m.ctx.round.height, m.ctx.round.number, m.ctx.clock)
}
func (m *cFSM) newProposeBlkEvt(blk *blockchain.Block) *proposeBlkEvt {
return newProposeBlkEvt(blk, m.ctx.round.proofOfLock, m.ctx.round.number, m.ctx.clock)
}
func (m *cFSM) newProposeBlkEvtFromProposePb(pb *iproto.ProposePb) (*proposeBlkEvt, error) {
evt := newProposeBlkEvtFromProtoMsg(pb, m.ctx.clock)
if evt == nil {
return nil, errors.New("error when casting a proto msg to proposeBlkEvt")
}
return evt, nil
}
func (m *cFSM) newEndorseEvtWithEndorsePb(ePb *iproto.EndorsePb) (*endorseEvt, error) {
en, err := endorsement.FromProtoMsg(ePb)
if err != nil {
return nil, errors.Wrap(err, "error when casting a proto msg to endorse")
}
return newEndorseEvtWithEndorse(en, m.ctx.clock), nil
}
func (m *cFSM) newEndorseEvt(blkHash hash.Hash32B, topic endorsement.ConsensusVoteTopic) *endorseEvt {
return newEndorseEvt(topic, blkHash, m.ctx.round.height, m.ctx.round.number, m.ctx.addr, m.ctx.clock)
}
func (m *cFSM) newTimeoutEvt(t fsm.EventType) *timeoutEvt {
return newTimeoutEvt(t, m.ctx.round.height, m.ctx.round.number, m.ctx.clock)
}
func (m *cFSM) newBackdoorEvt(dst fsm.State) *backdoorEvt {
return newBackdoorEvt(dst, m.ctx.round.height, m.ctx.round.number, m.ctx.clock)
}
func verifyDKGSignature(blk *blockchain.Block, seedByte []byte) error {
return crypto.BLS.Verify(blk.Header.DKGPubkey, seedByte, blk.Header.DKGBlockSig)
}
| 1 | 13,519 | File is not `goimports`-ed (from `goimports`) | iotexproject-iotex-core | go |
@@ -375,6 +375,8 @@ const char *get_parameter(char *cmd, SERVER_CONN *server_conn) {
const char *set_parameter(char *cmd, SERVER_CONN *server_conn, CLIENT_CONN *client_conn) {
const char *param_name = strstr(cmd, SET_PARAM_CMD) + SET_PARAM_CMD_LEN;
const char *param_value;
+ if (!param_name)
+ return SET_PARAM_CMD_FAIL_RSP;
if (strstr(param_name, SERVER_LOOPBACK_MODE_PARAM) == param_name) {
param_value = param_name + SERVER_LOOPBACK_MODE_PARAM_LEN;
if (strnlen(param_value, 1) == 1) { | 1 | // Copyright(c) 2020, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <sys/types.h>
#include <limits.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h> // offsetof
#include "server.h"
#include "packet.h"
#include "constants.h"
const SERVER_BUFFERS SERVER_BUFFERS_default = {
.ctrl_rx_buff = NULL,
.ctrl_rx_buff_sz = 0,
.ctrl_tx_buff = NULL,
.ctrl_tx_buff_sz = 0,
.h2t_header_buff = { 0 },
.t2h_header_buff = { 0 },
.mgmt_header_buff = { 0 },
.mgmt_rsp_header_buff = { 0 },
.use_wrapping_data_buffers = 0,
.h2t_rx_buff = NULL,
.h2t_rx_buff_sz = 0,
.mgmt_rx_buff = NULL,
.mgmt_rx_buff_sz = 0,
.t2h_tx_buff = NULL,
.t2h_tx_buff_sz = 0,
.mgmt_rsp_tx_buff = NULL,
.mgmt_rsp_tx_buff_sz = 0
};
const SERVER_CONN SERVER_CONN_default = {
.buff = NULL,
.h2t_waiting = 0,
.mgmt_waiting = 0,
.hw_callbacks = {
.init_driver = NULL,
.get_h2t_buffer = NULL,
.h2t_data_received = NULL,
.get_mgmt_buffer = NULL,
.mgmt_data_received = NULL,
.acquire_t2h_data = NULL,
.t2h_data_complete = NULL,
.acquire_mgmt_rsp_data = NULL,
.mgmt_rsp_data_complete = NULL,
.has_mgmt_support = NULL,
.set_param = NULL,
.get_param = NULL,
.server_printf = printf
},
.loopback_mode = 0,
.server_fd = INVALID_SOCKET,
.t2h_nagle = 0,
.mgmt_rsp_nagle = 0,
.pkt_stats = { 0, 0, 0, 0 }
};
const SERVER_HW_CALLBACKS SERVER_HW_CALLBACKS_default = {
.init_driver = NULL,
.get_h2t_buffer = NULL,
.h2t_data_received = NULL,
.get_mgmt_buffer = NULL,
.mgmt_data_received = NULL,
.acquire_t2h_data = NULL,
.t2h_data_complete = NULL,
.acquire_mgmt_rsp_data = NULL,
.mgmt_rsp_data_complete = NULL,
.has_mgmt_support = NULL,
.set_param = NULL,
.get_param = NULL,
.server_printf = printf
};
const SERVER_PKT_STATS SERVER_PKT_STATS_default = { 0, 0, 0, 0 };
const CLIENT_CONN CLIENT_CONN_default = { INVALID_SOCKET, INVALID_SOCKET, INVALID_SOCKET, INVALID_SOCKET, INVALID_SOCKET };
// Global variables
#if STI_NOSYS_PROT_PLATFORM==STI_PLATFORM_WINDOWS || STI_NOSYS_PROT_PLATFORM==STI_PLATFORM_NIOS_INICHE
int sizeof_addr = -1;
#else
uint32_t sizeof_addr = 0;
#endif
void reset_buffers(SERVER_CONN *conn) {
zero_mem(conn->buff->ctrl_rx_buff, conn->buff->ctrl_rx_buff_sz);
zero_mem(conn->buff->ctrl_tx_buff, conn->buff->ctrl_tx_buff_sz);
}
void print_last_socket_error(const char *context_msg, int(*printf_fp)(printf_format_arg, ...)) {
enum { ERR_MSG_BUFF_SZ = 256 };
char err_buff[ERR_MSG_BUFF_SZ] = { 0 };
const char *system_msg = get_last_socket_error_msg(err_buff, ERR_MSG_BUFF_SZ);
if (system_msg != NULL) {
printf_fp("%s: %s\n", context_msg, system_msg);
} else {
printf_fp("%s\n", context_msg);
}
}
void print_last_socket_error_b(const char *context_msg, ssize_t bytes_transferred, int(*printf_fp)(printf_format_arg, ...)) {
if (bytes_transferred < 0) {
print_last_socket_error(context_msg, printf_fp);
} else {
printf_fp("%s\n", context_msg);
}
}
void generate_server_welcome_message(char *buff, size_t buff_size, int mgmt_support, SERVER_BUFFERS *serv_buff, int handle) {
snprintf(buff, buff_size, "Welcome to INTEL_ST_HOST_EP_SERVER: %s=%d %s=%ld %s=%ld %s=%ld HANDLE=%d",
MGMT_SUPPORT_PARAM,
mgmt_support,
H2T_RX_BUFFER_SIZE_PARAM,
serv_buff->h2t_rx_buff_sz,
MGMT_RX_BUFFER_SIZE_PARAM,
serv_buff->mgmt_rx_buff_sz,
CTRL_RX_BUFFER_SIZE_PARAM,
serv_buff->ctrl_rx_buff_sz,
handle
);
}
RETURN_CODE bind_server_socket(SERVER_CONN *server_conn) {
const int MAX_LISTEN = 8;
unsigned char errors = 0;
// Create the socket
if ((server_conn->server_fd = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) {
print_last_socket_error("Failed to create socket", server_conn->hw_callbacks.server_printf);
++errors;
}
// Set some options
if ((errors == 0) && (set_boolean_socket_option(server_conn->server_fd, SO_REUSEADDR, 1) < 0)) {
print_last_socket_error("Failed to set socket options", server_conn->hw_callbacks.server_printf);
++errors;
}
// Bind it to PORT + Protocol
#if STI_NOSYS_PROT_PLATFORM==STI_PLATFORM_NIOS_INICHE
if ((errors == 0) && (bind(server_conn->server_fd, (struct sockaddr *)(&(server_conn->server_addr)), sizeof_addr) < 0)) {
print_last_socket_error("Failed to bind socket", server_conn->hw_callbacks.server_printf);
++errors;
}
#else
if ((errors == 0) && (bind(server_conn->server_fd, (const struct sockaddr *)(&(server_conn->server_addr)), sizeof_addr) < 0)) {
print_last_socket_error("Failed to bind socket", server_conn->hw_callbacks.server_printf);
++errors;
}
#endif
// Tell the socket to listen for incoming connections
if ((errors == 0) && (listen(server_conn->server_fd, MAX_LISTEN) < 0)) {
print_last_socket_error("Failed to listen on server socket", server_conn->hw_callbacks.server_printf);
++errors;
}
if (errors > 0) {
if (server_conn->server_fd != INVALID_SOCKET) {
close_socket_fd(server_conn->server_fd);
server_conn->server_fd = INVALID_SOCKET;
}
return FAILURE;
} else {
return OK;
}
}
RETURN_CODE connect_client_socket(SERVER_CONN *server_conn, int handle_id, SOCKET *client_fd, const char *sock_name, char use_nagle) {
enum { FAIL_MSG_SIZE = 80, MAX_HANDLE_RSP = 64 };
char socket_fail_msg[FAIL_MSG_SIZE];
if((*client_fd = accept(server_conn->server_fd, (struct sockaddr *)(&(server_conn->server_addr)), &sizeof_addr)) == INVALID_SOCKET) {
snprintf(socket_fail_msg, FAIL_MSG_SIZE, "Failed to accept %s socket", sock_name);
print_last_socket_error(socket_fail_msg, server_conn->hw_callbacks.server_printf);
} else {
if (set_tcp_no_delay(*client_fd, (use_nagle == 0) ? 1 : 0) == 0) {
ssize_t bytes_transferred;
if (socket_recv_until_null_reached(*client_fd, server_conn->buff->ctrl_rx_buff, MAX_HANDLE_RSP, 0, &bytes_transferred) == OK) {
// Client needs to send a null terminated handle ack message
generate_expected_handle_message(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, sock_name, handle_id);
if (strncmp(server_conn->buff->ctrl_rx_buff, server_conn->buff->ctrl_tx_buff, MAX_HANDLE_RSP) == 0) {
RETURN_CODE result;
if ((result = socket_send_all(*client_fd, READY_MSG, READY_MSG_LEN, 0, &bytes_transferred)) != OK) {
snprintf(socket_fail_msg, FAIL_MSG_SIZE, "Failed to send handle ready message for %s socket", sock_name);
print_last_socket_error_b(socket_fail_msg, bytes_transferred, server_conn->hw_callbacks.server_printf);
}
return result;
} else {
socket_send_all(*client_fd, NOT_READY_MSG, NOT_READY_MSG_LEN, 0, NULL);
server_conn->hw_callbacks.server_printf("Got unexpected handle ack message: %s\n\tExpected: %s\n", server_conn->buff->ctrl_rx_buff, server_conn->buff->ctrl_tx_buff);
}
} else {
snprintf(socket_fail_msg, FAIL_MSG_SIZE, "Failed to recv handle ack message for %s socket", sock_name);
print_last_socket_error_b(socket_fail_msg, bytes_transferred, server_conn->hw_callbacks.server_printf);
}
}
}
return FAILURE;
}
RETURN_CODE connect_client(SERVER_CONN *server_conn, CLIENT_CONN *client_conn) {
enum { MAX_HANDLE_RSP = 64 };
RETURN_CODE result = OK;
int handle = get_random_id();
ssize_t bytes_transferred;
server_conn->pkt_stats = SERVER_PKT_STATS_default;
// Initialize the driver if required. Initialization occurs here since it is the first thing run per spec,
// and the welcome message requires querying the driver for MGMT support.
if (server_conn->hw_callbacks.init_driver != NULL) {
int init_driver_rc;
if ((init_driver_rc = server_conn->hw_callbacks.init_driver(server_conn->buff->h2t_rx_buff, server_conn->buff->h2t_rx_buff_sz, server_conn->buff->mgmt_rx_buff, server_conn->buff->mgmt_rx_buff_sz)) != 0) {
server_conn->hw_callbacks.server_printf("Failed to initialize driver: %d\n", init_driver_rc);
return FAILURE; // Early return if driver fails to initialize, client is rejected.
}
}
// Connect CTRL socket
if((client_conn->ctrl_fd = accept(server_conn->server_fd, (struct sockaddr *)(&(server_conn->server_addr)), &sizeof_addr)) == INVALID_SOCKET) {
print_last_socket_error("Failed to accept CTRL socket", server_conn->hw_callbacks.server_printf);
result = FAILURE;
} else {
// Send out the welcome message
int mgmt_support = server_conn->hw_callbacks.has_mgmt_support != NULL ? server_conn->hw_callbacks.has_mgmt_support() : 0;
generate_server_welcome_message(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, mgmt_support, server_conn->buff, handle);
if (socket_send_all(client_conn->ctrl_fd, server_conn->buff->ctrl_tx_buff, strnlen(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz) + 1, 0, &bytes_transferred) == FAILURE) {
print_last_socket_error_b("Failed to send welcome message to CTRL socket", bytes_transferred, server_conn->hw_callbacks.server_printf);
result = FAILURE;
}
}
// Verify CTRL handle response
if (result == OK) {
if ((result = socket_recv_until_null_reached(client_conn->ctrl_fd, server_conn->buff->ctrl_rx_buff, MAX_HANDLE_RSP, 0, &bytes_transferred)) == OK) {
generate_expected_handle_message(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, CONTROL_SOCK_NAME, handle);
if (strncmp(server_conn->buff->ctrl_rx_buff, server_conn->buff->ctrl_tx_buff, MAX_HANDLE_RSP) != 0) {
socket_send_all(client_conn->ctrl_fd, NOT_READY_MSG, NOT_READY_MSG_LEN, 0, NULL);
server_conn->hw_callbacks.server_printf("Got unexpected handle ack message: %s\n\tExpected: %s\n", server_conn->buff->ctrl_rx_buff, server_conn->buff->ctrl_tx_buff);
result = FAILURE;
} else {
if ((result = socket_send_all(client_conn->ctrl_fd, READY_MSG, READY_MSG_LEN, 0, &bytes_transferred)) != OK) {
print_last_socket_error_b("Failed to send handle ready message for CTRL socket", bytes_transferred, server_conn->hw_callbacks.server_printf);
}
}
} else {
print_last_socket_error_b("Failed to recv handle ack message for CTRL socket", bytes_transferred, server_conn->hw_callbacks.server_printf);
}
}
if (result == OK) {
result = connect_client_socket(server_conn, handle, &(client_conn->mgmt_fd), MANAGEMENT_SOCK_NAME, 0);
}
if (result == OK) {
result = connect_client_socket(server_conn, handle, &(client_conn->mgmt_rsp_fd), MANAGEMENT_RSP_SOCK_NAME, server_conn->mgmt_rsp_nagle);
}
if (result == OK) {
result = connect_client_socket(server_conn, handle, &(client_conn->h2t_data_fd), H2T_SOCK_NAME, 0);
}
if (result == OK) {
result = connect_client_socket(server_conn, handle, &(client_conn->t2h_data_fd), T2H_SOCK_NAME, server_conn->t2h_nagle);
}
if (result != OK) {
send(client_conn->ctrl_fd, NOT_READY_MSG, NOT_READY_MSG_LEN, 0);
return FAILURE;
} else {
if (socket_send_all(client_conn->ctrl_fd, READY_MSG, READY_MSG_LEN, 0, &bytes_transferred) == OK) {
return OK;
} else {
print_last_socket_error_b("Failed to send ready message to CTRL socket", bytes_transferred, server_conn->hw_callbacks.server_printf);
return FAILURE;
}
}
}
RETURN_CODE close_client_conn(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
unsigned char errors = 0;
if (client_conn->ctrl_fd != INVALID_SOCKET) {
set_linger_socket_option(client_conn->ctrl_fd, 1, 0);
if (close_socket_fd(client_conn->ctrl_fd) != 0) {
print_last_socket_error("Failed to close CONTROL socket", server_conn->hw_callbacks.server_printf);
++errors;
}
}
if (client_conn->mgmt_fd != INVALID_SOCKET) {
set_linger_socket_option(client_conn->mgmt_fd, 1, 0);
if (close_socket_fd(client_conn->mgmt_fd) != 0) {
print_last_socket_error("Failed to close MGMT socket", server_conn->hw_callbacks.server_printf);
++errors;
}
}
if (client_conn->mgmt_rsp_fd != INVALID_SOCKET) {
set_linger_socket_option(client_conn->mgmt_rsp_fd, 1, 0);
if (close_socket_fd(client_conn->mgmt_rsp_fd) != 0) {
print_last_socket_error("Failed to close MGMT RSP socket", server_conn->hw_callbacks.server_printf);
++errors;
}
}
if (client_conn->h2t_data_fd != INVALID_SOCKET) {
set_linger_socket_option(client_conn->h2t_data_fd, 1, 0);
if (close_socket_fd(client_conn->h2t_data_fd) != 0) {
print_last_socket_error("Failed to close H2T_DATA socket", server_conn->hw_callbacks.server_printf);
++errors;
}
}
if (client_conn->t2h_data_fd != INVALID_SOCKET) {
set_linger_socket_option(client_conn->t2h_data_fd, 1, 0);
if (close_socket_fd(client_conn->t2h_data_fd) != 0) {
print_last_socket_error("Failed to close T2H_DATA socket", server_conn->hw_callbacks.server_printf);
++errors;
}
}
if (errors > 0) {
return FAILURE;
} else {
return OK;
}
}
const char *get_parameter(char *cmd, SERVER_CONN *server_conn) {
const char *param_name = strstr(cmd, GET_PARAM_CMD) + GET_PARAM_CMD_LEN;
if (strncmp(param_name, SERVER_LOOPBACK_MODE_PARAM, SERVER_LOOPBACK_MODE_PARAM_LEN) == 0) {
return server_conn->loopback_mode == 1 ? "1" : "0";
} else if (strncmp(param_name, H2T_RX_BUFFER_SIZE_PARAM, H2T_RX_BUFFER_SIZE_PARAM_LEN) == 0) {
snprintf(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, "%ld", server_conn->buff->h2t_rx_buff_sz);
return server_conn->buff->ctrl_tx_buff;
} else if (strncmp(param_name, MGMT_RX_BUFFER_SIZE_PARAM, MGMT_RX_BUFFER_SIZE_PARAM_LEN) == 0) {
snprintf(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, "%ld", server_conn->buff->mgmt_rx_buff_sz);
return server_conn->buff->ctrl_tx_buff;
} else if (strncmp(param_name, CTRL_RX_BUFFER_SIZE_PARAM, CTRL_RX_BUFFER_SIZE_PARAM_LEN) == 0) {
snprintf(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, "%ld", server_conn->buff->ctrl_rx_buff_sz);
return server_conn->buff->ctrl_tx_buff;
} else if (strncmp(param_name, T2H_NAGLE_PARAM, T2H_NAGLE_PARAM_LEN) == 0) {
snprintf(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, "%d", (int)(server_conn->t2h_nagle));
return server_conn->buff->ctrl_tx_buff;
} else if (strncmp(param_name, MGMT_RSP_NAGLE_PARAM, MGMT_RSP_NAGLE_PARAM_LEN) == 0) {
snprintf(server_conn->buff->ctrl_tx_buff, server_conn->buff->ctrl_tx_buff_sz, "%d", (int)(server_conn->mgmt_rsp_nagle));
return server_conn->buff->ctrl_tx_buff;
} else {
return GET_PARAM_CMD_FAIL_RSP;
}
}
const char *set_parameter(char *cmd, SERVER_CONN *server_conn, CLIENT_CONN *client_conn) {
const char *param_name = strstr(cmd, SET_PARAM_CMD) + SET_PARAM_CMD_LEN;
const char *param_value;
if (strstr(param_name, SERVER_LOOPBACK_MODE_PARAM) == param_name) {
param_value = param_name + SERVER_LOOPBACK_MODE_PARAM_LEN;
if (strnlen(param_value, 1) == 1) {
server_conn->loopback_mode = (*param_value == '1' ? 1 : 0);
return SET_PARAM_CMD_RSP;
}
} else if (strstr(param_name, T2H_NAGLE_PARAM) == param_name) {
param_value = param_name + T2H_NAGLE_PARAM_LEN;
if (strnlen(param_value, 1) == 1) {
const char t2h_nagle = (*param_value == '1' ? 1 : 0);
if (set_tcp_no_delay(client_conn->t2h_data_fd, t2h_nagle ? 0 : 1) == 0) {
server_conn->t2h_nagle = t2h_nagle;
return SET_PARAM_CMD_RSP;
}
}
} else if (strstr(param_name, MGMT_RSP_NAGLE_PARAM) == param_name) {
param_value = param_name + MGMT_RSP_NAGLE_PARAM_LEN;
if (strnlen(param_value, 1) == 1) {
const char mgmt_rsp_nagle = (*param_value == '1' ? 1 : 0);
if (set_tcp_no_delay(client_conn->mgmt_rsp_fd, mgmt_rsp_nagle ? 0 : 1) == 0) {
server_conn->mgmt_rsp_nagle = mgmt_rsp_nagle;
return SET_PARAM_CMD_RSP;
}
}
}
return SET_PARAM_CMD_FAIL_RSP;
}
const char *get_driver_parameter(char *cmd, SERVER_CONN *server_conn) {
if (server_conn->hw_callbacks.get_param != NULL) {
const char *param_name = strstr(cmd, GET_DRIVER_PARAM_CMD) + GET_DRIVER_PARAM_CMD_LEN;
const char *result = server_conn->hw_callbacks.get_param(param_name);
if (result != NULL) {
return result;
}
}
return GET_PARAM_CMD_FAIL_RSP;
}
const char *set_driver_parameter(char *cmd, SERVER_CONN *server_conn) {
enum { MAX_DRIVER_PARAM_NAME_LEN = 32 };
char param_name_buff[MAX_DRIVER_PARAM_NAME_LEN + 1] = { 0 };
if (server_conn->hw_callbacks.set_param != NULL) {
const char *param_name = strstr(cmd, SET_DRIVER_PARAM_CMD) + SET_DRIVER_PARAM_CMD_LEN;
const char *param_value = strstr(param_name, " ") + 1;
size_t param_name_len = (size_t)(param_value - param_name) - 1;
for (size_t i = 0; i < MIN_MACRO(param_name_len, MAX_DRIVER_PARAM_NAME_LEN); ++i) {
param_name_buff[i] = param_name[i];
}
if (server_conn->hw_callbacks.set_param(param_name_buff, param_value) >= 0) {
return SET_PARAM_CMD_RSP;
}
}
return SET_PARAM_CMD_FAIL_RSP;
}
RETURN_CODE process_control_message(CLIENT_CONN *client_conn, SERVER_CONN *server_conn, char *disconnect_client) {
ssize_t bytes_transferred;
RETURN_CODE result;
if (socket_recv_until_null_reached(client_conn->ctrl_fd, server_conn->buff->ctrl_rx_buff, server_conn->buff->ctrl_rx_buff_sz, 0, &bytes_transferred) == OK) {
if (strstr(server_conn->buff->ctrl_rx_buff, GET_PARAM_CMD) == server_conn->buff->ctrl_rx_buff) {
const char *resp = get_parameter(server_conn->buff->ctrl_rx_buff, server_conn);
result = socket_send_all(client_conn->ctrl_fd, resp, strnlen(resp, MAX_SERVER_PARAM_VALUE_LEN) + 1, 0, &bytes_transferred);
} else if (strncmp(server_conn->buff->ctrl_rx_buff, PING_CMD, PING_CMD_LEN) == 0) {
result = socket_send_all(client_conn->ctrl_fd, PING_CMD_RSP, PING_CMD_RSP_LEN, 0, &bytes_transferred);
} else if (strncmp(server_conn->buff->ctrl_rx_buff, DISCONNECT_CMD, DISCONNECT_CMD_LEN) == 0) {
socket_send_all(client_conn->ctrl_fd, DISCONNECT_CMD_RSP, DISCONNECT_CMD_RSP_LEN, 0, NULL);
wait_for_read_event(client_conn->ctrl_fd, 10, 0); // Wait 10 seconds at most for the client to close first
*disconnect_client = 1;
result = OK;
} else if (strstr(server_conn->buff->ctrl_rx_buff, SET_PARAM_CMD) == server_conn->buff->ctrl_rx_buff) {
const char *resp = set_parameter(server_conn->buff->ctrl_rx_buff, server_conn, client_conn);
result = socket_send_all(client_conn->ctrl_fd, resp, strnlen(resp, 128) + 1, 0, &bytes_transferred);
} else if (strstr(server_conn->buff->ctrl_rx_buff, SET_DRIVER_PARAM_CMD) == server_conn->buff->ctrl_rx_buff) {
const char *resp = set_driver_parameter(server_conn->buff->ctrl_rx_buff, server_conn);
result = socket_send_all(client_conn->ctrl_fd, resp, strnlen(resp, 256) + 1, 0, &bytes_transferred);
} else if (strstr(server_conn->buff->ctrl_rx_buff, GET_DRIVER_PARAM_CMD) == server_conn->buff->ctrl_rx_buff) {
const char *resp = get_driver_parameter(server_conn->buff->ctrl_rx_buff, server_conn);
result = socket_send_all(client_conn->ctrl_fd, resp, strnlen(resp, MAX_SERVER_PARAM_VALUE_LEN) + 1, 0, &bytes_transferred);
} else {
result = socket_send_all(client_conn->ctrl_fd, UNRECOGNIZED_CMD_RSP, UNRECOGNIZED_CMD_RSP_LEN, 0, &bytes_transferred);
}
if (result != OK) {
print_last_socket_error_b("Failed to send CTRL message response", bytes_transferred, server_conn->hw_callbacks.server_printf);
}
return result;
} else {
print_last_socket_error_b("Failed to recv CTRL message", bytes_transferred, server_conn->hw_callbacks.server_printf);
return FAILURE;
}
}
unsigned long buff_len_to_wrap_boundary(char *buff_sa, size_t buff_sz, char *buff, size_t payload_sz) {
const unsigned long mem_base = (unsigned long)((uintptr_t)buff_sa);
const unsigned long end_addr = mem_base + buff_sz;
if (((unsigned long)((uintptr_t)buff) + payload_sz) > end_addr) {
return (end_addr - (unsigned long)((uintptr_t)buff));
} else {
return 0;
}
}
RETURN_CODE update_curr_h2t_header(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
if (server_conn->h2t_waiting == 0) {
ssize_t bytes_recvd;
RETURN_CODE result = socket_recv_accumulate(client_conn->h2t_data_fd, server_conn->buff->h2t_header_buff, SIZEOF_PACKET_GUARDBAND + SIZEOF_H2T_PACKET_HEADER, 0, &bytes_recvd);
if (result != OK) {
print_last_socket_error_b("Failed to recv H2T header", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
return result;
}
return OK;
}
RETURN_CODE process_h2t_data(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
ssize_t bytes_recvd;
RETURN_CODE has_error = OK;
if ((has_error = update_curr_h2t_header(client_conn, server_conn)) == OK) {
H2T_PACKET_HEADER *header = (H2T_PACKET_HEADER *)(server_conn->buff->h2t_header_buff + SIZEOF_PACKET_GUARDBAND);
size_t bytes_to_transfer = header->DATA_LEN_BYTES;
// Polls to see if there is room for the packet
char *h2t_buff = ((server_conn->hw_callbacks.get_h2t_buffer != NULL) && (server_conn->loopback_mode == 0)) ? server_conn->hw_callbacks.get_h2t_buffer(bytes_to_transfer) : server_conn->buff->h2t_rx_buff;
// Recv H2T payload
if (h2t_buff != NULL) {
server_conn->pkt_stats.h2t_cnt++;
server_conn->h2t_waiting = 0;
size_t first_len;
if (server_conn->buff->use_wrapping_data_buffers && ((first_len = buff_len_to_wrap_boundary(server_conn->buff->h2t_rx_buff, server_conn->buff->h2t_rx_buff_sz, h2t_buff, header->DATA_LEN_BYTES)) != 0)) {
// Wrap, 2 recv necessary
size_t second_len = header->DATA_LEN_BYTES - first_len;
has_error = socket_recv_accumulate_h2t_data(client_conn->h2t_data_fd, h2t_buff, first_len, 0, &bytes_recvd);
if (has_error == OK) {
has_error = socket_recv_accumulate_h2t_data(client_conn->h2t_data_fd, server_conn->buff->h2t_rx_buff, second_len, 0, &bytes_recvd);
}
} else {
// No wrap
has_error = socket_recv_accumulate_h2t_data(client_conn->h2t_data_fd, h2t_buff, bytes_to_transfer, 0, &bytes_recvd);
}
// Push to driver or loopback
if (has_error == OK) {
if (server_conn->loopback_mode == 0) {
// Normal operation, push the transaction to HW
has_error = (server_conn->hw_callbacks.h2t_data_received != NULL) ? server_conn->hw_callbacks.h2t_data_received(header, (unsigned char *)h2t_buff) : OK;
} else {
// Send the header
if ((has_error = socket_send_all(client_conn->t2h_data_fd, server_conn->buff->h2t_header_buff, SIZEOF_PACKET_GUARDBAND + SIZEOF_H2T_PACKET_HEADER, 0, &bytes_recvd)) == OK) {
// Send the payload
if ((has_error = socket_send_all_t2h_data(client_conn->t2h_data_fd, h2t_buff, bytes_to_transfer, 0, &bytes_recvd)) != OK) {
print_last_socket_error_b("Failed to send loopback T2H data", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
} else {
print_last_socket_error_b("Failed to send loopback T2H header", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
}
} else {
print_last_socket_error_b("Failed to recv H2T data", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
} else {
// Wait for buffer to be available!
server_conn->h2t_waiting = 1;
}
}
return has_error;
}
RETURN_CODE update_curr_mgmt_header(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
if (server_conn->mgmt_waiting == 0) {
ssize_t bytes_recvd;
RETURN_CODE result = socket_recv_accumulate(client_conn->mgmt_fd, server_conn->buff->mgmt_header_buff, SIZEOF_PACKET_GUARDBAND + SIZEOF_MGMT_PACKET_HEADER, 0, &bytes_recvd);
if (result != OK) {
print_last_socket_error_b("Failed to recv MGMT header", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
return result;
}
return OK;
}
RETURN_CODE process_mgmt_data(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
ssize_t bytes_recvd;
RETURN_CODE has_error = OK;
if ((has_error = update_curr_mgmt_header(client_conn, server_conn)) == OK) {
MGMT_PACKET_HEADER *header = (MGMT_PACKET_HEADER *)(server_conn->buff->mgmt_header_buff + SIZEOF_PACKET_GUARDBAND);
size_t bytes_to_transfer = header->DATA_LEN_BYTES;
// Polls to see if there is room for the packet
char *mgmt_buff = ((server_conn->hw_callbacks.get_mgmt_buffer != NULL) && (server_conn->loopback_mode == 0)) ? server_conn->hw_callbacks.get_mgmt_buffer(bytes_to_transfer) : server_conn->buff->mgmt_rx_buff;
// Recv MGMT payload
if (mgmt_buff != NULL) {
server_conn->pkt_stats.mgmt_cnt++;
server_conn->mgmt_waiting = 0;
size_t first_len;
if (server_conn->buff->use_wrapping_data_buffers && ((first_len = buff_len_to_wrap_boundary(server_conn->buff->mgmt_rx_buff, server_conn->buff->mgmt_rx_buff_sz, mgmt_buff, header->DATA_LEN_BYTES)) != 0)) {
// Wrap, 2 recv necessary
size_t second_len = header->DATA_LEN_BYTES - first_len;
has_error = socket_recv_accumulate(client_conn->mgmt_fd, mgmt_buff, first_len, 0, &bytes_recvd);
if (has_error == OK) {
has_error = socket_recv_accumulate(client_conn->mgmt_fd, server_conn->buff->mgmt_rx_buff, second_len, 0, &bytes_recvd);
}
} else {
// No wrap
has_error = socket_recv_accumulate(client_conn->mgmt_fd, mgmt_buff, bytes_to_transfer, 0, &bytes_recvd);
}
// Push to driver or loopback
if (has_error == OK) {
if (server_conn->loopback_mode == 0) {
// Normal operation, push the transaction to HW
has_error = (server_conn->hw_callbacks.mgmt_data_received != NULL) ? server_conn->hw_callbacks.mgmt_data_received(header, (unsigned char *)mgmt_buff) : OK;
} else {
// Send the header
if ((has_error = socket_send_all(client_conn->mgmt_rsp_fd, server_conn->buff->mgmt_header_buff, SIZEOF_PACKET_GUARDBAND + SIZEOF_MGMT_PACKET_HEADER, 0, &bytes_recvd)) == OK) {
// Send the payload
if ((has_error = socket_send_all(client_conn->mgmt_rsp_fd, mgmt_buff, bytes_to_transfer, 0, &bytes_recvd)) != OK) {
print_last_socket_error_b("Failed to send loopback MGMT RSP data", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
} else {
print_last_socket_error_b("Failed to send loopback MGMT RSP header", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
}
} else {
print_last_socket_error_b("Failed to recv MGMT data", bytes_recvd, server_conn->hw_callbacks.server_printf);
}
} else {
// Wait for buffer to be available!
server_conn->mgmt_waiting = 1;
}
}
return has_error;
}
RETURN_CODE process_t2h_data(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
ssize_t bytes_sent;
RETURN_CODE has_error = OK;
H2T_PACKET_HEADER *header = (H2T_PACKET_HEADER *)(server_conn->buff->t2h_header_buff + SIZEOF_PACKET_GUARDBAND);
unsigned char *t2h_buff;
if ((has_error = (server_conn->hw_callbacks.acquire_t2h_data(header, &t2h_buff) == 0) ? OK : FAILURE) == OK) {
unsigned short curr_payload_bytes;
if ((curr_payload_bytes = header->DATA_LEN_BYTES) == 0) {
return has_error;
}
server_conn->pkt_stats.t2h_cnt++;
if ((has_error = socket_send_all(client_conn->t2h_data_fd, (const char *)server_conn->buff->t2h_header_buff, SIZEOF_PACKET_GUARDBAND + SIZEOF_H2T_PACKET_HEADER, 0, &bytes_sent)) == OK) {
size_t first_len;
if (server_conn->buff->use_wrapping_data_buffers && ((first_len = buff_len_to_wrap_boundary(server_conn->buff->t2h_tx_buff, server_conn->buff->t2h_tx_buff_sz, (char *)t2h_buff, header->DATA_LEN_BYTES)) != 0)) {
// Wrap, 2 sends necessary
size_t second_len = header->DATA_LEN_BYTES - first_len;
if ((has_error = socket_send_all_t2h_data(client_conn->t2h_data_fd, (const char *)t2h_buff, first_len, 0, &bytes_sent)) == OK) {
has_error = socket_send_all_t2h_data(client_conn->t2h_data_fd, (const char *)server_conn->buff->t2h_tx_buff, second_len, 0, &bytes_sent);
}
} else {
// No wrap
has_error = socket_send_all_t2h_data(client_conn->t2h_data_fd, (const char *)t2h_buff, curr_payload_bytes, 0, &bytes_sent);
}
if (has_error == OK) {
if (server_conn->hw_callbacks.t2h_data_complete != NULL) {
server_conn->hw_callbacks.t2h_data_complete();
}
}
}
if (has_error != OK) {
print_last_socket_error_b("An error occurred sending T2H data", bytes_sent, server_conn->hw_callbacks.server_printf);
}
}
return has_error;
}
RETURN_CODE process_mgmt_rsp_data(CLIENT_CONN *client_conn, SERVER_CONN *server_conn) {
ssize_t bytes_sent;
RETURN_CODE has_error = OK;
MGMT_PACKET_HEADER *header = (MGMT_PACKET_HEADER *)(server_conn->buff->mgmt_rsp_header_buff + SIZEOF_PACKET_GUARDBAND);
unsigned char *mgmt_rsp_buff;
if ((has_error = (server_conn->hw_callbacks.acquire_mgmt_rsp_data(header, &mgmt_rsp_buff) == 0) ? OK : FAILURE) == OK) {
unsigned short curr_payload_bytes;
if ((curr_payload_bytes = header->DATA_LEN_BYTES) == 0) {
return has_error;
}
server_conn->pkt_stats.mgmt_rsp_cnt++;
if ((has_error = socket_send_all(client_conn->mgmt_rsp_fd, (const char *)server_conn->buff->mgmt_rsp_header_buff, SIZEOF_PACKET_GUARDBAND + SIZEOF_MGMT_PACKET_HEADER, 0, &bytes_sent)) == OK) {
size_t first_len;
if (server_conn->buff->use_wrapping_data_buffers && ((first_len = buff_len_to_wrap_boundary(server_conn->buff->mgmt_rsp_tx_buff, server_conn->buff->mgmt_rsp_tx_buff_sz, (char *)mgmt_rsp_buff, header->DATA_LEN_BYTES)) != 0)) {
// Wrap, 2 sends necessary
size_t second_len = header->DATA_LEN_BYTES - first_len;
if ((has_error = socket_send_all(client_conn->mgmt_rsp_fd, (const char *)mgmt_rsp_buff, first_len, 0, &bytes_sent)) == OK) {
has_error = socket_send_all(client_conn->mgmt_rsp_fd, (const char *)server_conn->buff->mgmt_rsp_tx_buff, second_len, 0, &bytes_sent);
}
} else {
// No wrap
has_error = socket_send_all(client_conn->mgmt_rsp_fd, (const char *)mgmt_rsp_buff, curr_payload_bytes, 0, &bytes_sent);
}
if (has_error == OK) {
if (server_conn->hw_callbacks.mgmt_rsp_data_complete != NULL) {
server_conn->hw_callbacks.mgmt_rsp_data_complete();
}
}
}
if (has_error != OK) {
print_last_socket_error_b("An error occurred sending MGMT RSP data", bytes_sent, server_conn->hw_callbacks.server_printf);
}
}
return has_error;
}
void reject_client(SERVER_CONN *server_conn) {
SOCKET sock_fd = INVALID_SOCKET;
if((sock_fd = accept(server_conn->server_fd, (struct sockaddr *)(&(server_conn->server_addr)), &sizeof_addr)) == INVALID_SOCKET) {
print_last_socket_error("Failed to accept additional client", server_conn->hw_callbacks.server_printf);
} else {
#if STI_NOSYS_PROT_PLATFORM==STI_PLATFORM_WINDOWS
int flags = 0;
#else
int flags = MSG_DONTWAIT;
#endif
size_t reject_msg_len = 12;
if (send(sock_fd, REJECT_MSG, reject_msg_len, flags) < 0) {
print_last_socket_error("Failed to send rejection message to additional client", server_conn->hw_callbacks.server_printf);
}
// Prevent TIME_WAIT
set_linger_socket_option(sock_fd, 1, 0);
close_socket_fd(sock_fd);
}
}
void handle_client(SERVER_CONN *server_conn, CLIENT_CONN *client_conn) {
fd_set read_fds;
fd_set write_fds;
fd_set except_fds;
enum { NUM_FDS = 6 };
SOCKET all_fds[NUM_FDS];
all_fds[0] = server_conn->server_fd;
all_fds[1] = client_conn->ctrl_fd;
all_fds[2] = client_conn->mgmt_fd;
all_fds[3] = client_conn->mgmt_rsp_fd;
all_fds[4] = client_conn->h2t_data_fd;
all_fds[5] = client_conn->t2h_data_fd;
const char *all_fd_names[NUM_FDS];
all_fd_names[0] = SERVER_SOCK_NAME;
all_fd_names[1] = CONTROL_SOCK_NAME;
all_fd_names[2] = MANAGEMENT_SOCK_NAME;
all_fd_names[3] = MANAGEMENT_RSP_SOCK_NAME;
all_fd_names[4] = H2T_SOCK_NAME;
all_fd_names[5] = T2H_SOCK_NAME;
SOCKET max_fd = max_of(all_fds, NUM_FDS) + 1;
while (1) {
FD_ZERO(&read_fds);
FD_ZERO(&write_fds);
FD_ZERO(&except_fds);
// H2T, MGMT, and server listening socket are read-only
FD_SET(client_conn->h2t_data_fd, &read_fds);
FD_SET(client_conn->mgmt_fd, &read_fds);
FD_SET(server_conn->server_fd, &read_fds);
// T2H & MGMT_RSP are write-only
FD_SET(client_conn->t2h_data_fd, &write_fds);
FD_SET(client_conn->mgmt_rsp_fd, &write_fds);
// Ctrl is R/W
FD_SET(client_conn->ctrl_fd, &read_fds);
FD_SET(client_conn->ctrl_fd, &write_fds);
// Any socket can have an exception
FD_SET(client_conn->ctrl_fd, &except_fds);
FD_SET(client_conn->mgmt_fd, &except_fds);
FD_SET(client_conn->mgmt_rsp_fd, &except_fds);
FD_SET(client_conn->h2t_data_fd, &except_fds);
FD_SET(client_conn->t2h_data_fd, &except_fds);
struct timeval to;
to.tv_sec = 1;
to.tv_usec = 0;
if (select((int)max_fd, &read_fds, &write_fds, &except_fds, &to) < 0) {
print_last_socket_error("Select failure", server_conn->hw_callbacks.server_printf);
break;
}
// First handle exceptional conditions
char disconnect_client = 0;
for (int i = 0; i < NUM_FDS; ++i) {
if (FD_ISSET(all_fds[i], &except_fds)) {
server_conn->hw_callbacks.server_printf("Exception found on socket: %s\n", all_fd_names[i]);
disconnect_client = 1;
break;
}
}
if (disconnect_client) {
break;
}
// Check for additional clients attempting to connect,
// if so, politely tell them to get lost.
if (FD_ISSET(server_conn->server_fd, &read_fds)) {
reject_client(server_conn);
}
// See if any incoming control messages are present
if (FD_ISSET(client_conn->ctrl_fd, &read_fds)) {
if (process_control_message(client_conn, server_conn, &disconnect_client) == FAILURE) {
break;
}
if (disconnect_client) {
break;
}
}
// See if any incoming management commands are present
if (FD_ISSET(client_conn->mgmt_fd, &read_fds)) {
if (process_mgmt_data(client_conn, server_conn) == FAILURE) {
break;
}
}
// Lastly handle incoming H2T data
if (FD_ISSET(client_conn->h2t_data_fd, &read_fds)) {
if (process_h2t_data(client_conn, server_conn) == FAILURE) {
break;
}
}
// See if any outbound management data is present, if so send it out
if (server_conn->loopback_mode == 0) {
if (FD_ISSET(client_conn->mgmt_rsp_fd, &write_fds)) {
if (server_conn->hw_callbacks.acquire_mgmt_rsp_data != NULL) {
if (process_mgmt_rsp_data(client_conn, server_conn) == FAILURE) {
break;
}
}
}
// See if any outbound t2h data is present, if so send it out
if (FD_ISSET(client_conn->t2h_data_fd, &write_fds)) {
if (server_conn->hw_callbacks.acquire_t2h_data != NULL) {
if (process_t2h_data(client_conn, server_conn) == FAILURE) {
break;
}
}
}
}
}
}
RETURN_CODE initialize_server(unsigned short port, SERVER_CONN *server_conn, const char *port_filename) {
if (initialize_sockets_library() == FAILURE) {
return FAILURE;
}
// Fill the guardband preamble just once
populate_guardband((unsigned char *)server_conn->buff->mgmt_rsp_header_buff);
populate_guardband((unsigned char *)server_conn->buff->t2h_header_buff);
server_conn->server_addr.sin_family = AF_INET;
server_conn->server_addr.sin_addr.s_addr = INADDR_ANY;
server_conn->server_addr.sin_port = htons(port);
sizeof_addr = sizeof(server_conn->server_addr);
if (bind_server_socket(server_conn) != OK) {
server_conn->hw_callbacks.server_printf("Failed to bind server socket!\n");
return FAILURE;
}
// If a request to bind to any available port, take note of which port was assigned by the kernel
if (port == 0) {
if (getsockname(server_conn->server_fd, (struct sockaddr *)(&(server_conn->server_addr)), &sizeof_addr) < 0) {
print_last_socket_error("getsockname failed", server_conn->hw_callbacks.server_printf);
}
}
unsigned short port_used = ntohs(server_conn->server_addr.sin_port);
server_conn->hw_callbacks.server_printf("Server socket is listening on port: %d\n", port_used);
// Write out the port used. This is especially useful when an ephermal port is used.
#if STI_NOSYS_PROT_PLATFORM==STI_PLATFORM_WINDOWS || STI_NOSYS_PROT_PLATFORM==STI_PLATFORM_LINUX
if (port_filename != NULL) {
FILE *port_fp = fopen(port_filename, "w");
if (port_fp != NULL) {
fprintf(port_fp, "%d", port_used);
fclose(port_fp);
} else {
server_conn->hw_callbacks.server_printf("Failed to save out port file: %s\n", port_filename);
}
}
#endif
fflush(stdout);
return OK;
}
void server_main(SERVER_LIFESPAN lifespan, SERVER_CONN *server_conn) {
// Main loop of server app
do {
reset_buffers(server_conn);
CLIENT_CONN client_conn = CLIENT_CONN_default;
if (connect_client(server_conn, &client_conn) == OK) {
handle_client(server_conn, &client_conn);
} else {
server_conn->hw_callbacks.server_printf("Rejected remote client.\n");
}
close_client_conn(&client_conn, server_conn);
} while (lifespan == MULTIPLE_CLIENTS);
// Close the listening socket
set_linger_socket_option(server_conn->server_fd, 1, 0);
close_socket_fd(server_conn->server_fd);
}
| 1 | 19,965 | If strstr returns NULL, the check on 378 won't fire, because param_name will be at least SET_PARAM_CMD_LEN. | OPAE-opae-sdk | c |
@@ -47,13 +47,12 @@ services::Status buildProgram(ClKernelFactoryIface & kernelFactory, const TypeId
return status;
}
-services::Status sum_singlepass(ExecutionContextIface & context, ClKernelFactoryIface & kernelFactory, Layout vectorsLayout,
- const UniversalBuffer & vectors, uint32_t nVectors, uint32_t vectorSize, uint32_t workItemsPerGroup,
- SumReducer::Result & result)
+services::Status sum_singlepass(ExecutionContextIface & context, ClKernelFactoryIface & kernelFactory, const UniversalBuffer & vectors,
+ uint32_t nVectors, uint32_t vectorSize, SumReducer::Result & result)
{
services::Status status;
- auto sum_kernel = kernelFactory.getKernel("sum_singlepass", status);
+ auto sum_kernel = kernelFactory.getKernel("sum_singlesubgroup", status);
DAAL_CHECK_STATUS_VAR(status);
// no need to check overflow for nVectors * vectorSize due to we already have buffer vectors of such size | 1 | /* file: sum_reducer.cpp */
/*******************************************************************************
* Copyright 2014-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "src/sycl/reducer.h"
#include "services/internal/execution_context.h"
#include "src/externals/service_ittnotify.h"
#include "services/daal_defines.h"
namespace daal
{
namespace services
{
namespace internal
{
namespace sycl
{
namespace math
{
DAAL_ITTNOTIFY_DOMAIN(daal.oneapi.internal.math.SumReducer);
services::Status buildProgram(ClKernelFactoryIface & kernelFactory, const TypeId & vectorTypeId)
{
services::Status status;
services::String fptype_name = getKeyFPType(vectorTypeId);
auto build_options = fptype_name;
build_options.add("-cl-std=CL1.2 -D LOCAL_BUFFER_SIZE=256");
services::String cachekey("__daal_oneapi_internal_math_sum_reducer_");
cachekey.add(build_options);
kernelFactory.build(ExecutionTargetIds::device, cachekey.c_str(), sum_reducer, build_options.c_str(), status);
return status;
}
services::Status sum_singlepass(ExecutionContextIface & context, ClKernelFactoryIface & kernelFactory, Layout vectorsLayout,
const UniversalBuffer & vectors, uint32_t nVectors, uint32_t vectorSize, uint32_t workItemsPerGroup,
SumReducer::Result & result)
{
services::Status status;
auto sum_kernel = kernelFactory.getKernel("sum_singlepass", status);
DAAL_CHECK_STATUS_VAR(status);
// no need to check overflow for nVectors * vectorSize due to we already have buffer vectors of such size
if (vectors.type() == TypeIds::id<float>())
{
DAAL_ASSERT_UNIVERSAL_BUFFER(vectors, float, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sum, float, nVectors);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sumOfSquares, float, nVectors);
}
else
{
DAAL_ASSERT_UNIVERSAL_BUFFER(vectors, double, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sum, double, nVectors);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sumOfSquares, double, nVectors);
}
KernelRange localRange(workItemsPerGroup, 1);
KernelRange globalRange(workItemsPerGroup, nVectors);
KernelNDRange range(2);
range.global(globalRange, status);
DAAL_CHECK_STATUS_VAR(status);
range.local(localRange, status);
DAAL_CHECK_STATUS_VAR(status);
KernelArguments args(6 /*8*/, status);
DAAL_CHECK_STATUS_VAR(status);
uint32_t vectorsAreRows = vectorsLayout == Layout::RowMajor ? 1 : 0;
args.set(0, vectorsAreRows);
args.set(1, vectors, AccessModeIds::read);
args.set(2, nVectors);
args.set(3, vectorSize);
args.set(4, result.sum, AccessModeIds::write);
args.set(5, result.sumOfSquares, AccessModeIds::write);
//args.set(6, LocalBuffer(vectors.type(), workItemsPerGroup));
//args.set(7, LocalBuffer(vectors.type(), workItemsPerGroup));
context.run(range, sum_kernel, args, status);
return status;
}
services::Status runStepColmajor(ExecutionContextIface & context, ClKernelFactoryIface & kernelFactory, const UniversalBuffer & vectors,
uint32_t nVectors, uint32_t vectorSize, uint32_t numWorkItems, uint32_t numWorkGroups, uint32_t numDivisionsByRow,
SumReducer::Result & stepResult)
{
services::Status status;
auto sum_kernel = kernelFactory.getKernel("sum_step_colmajor", status);
DAAL_CHECK_STATUS_VAR(status);
// no need to check overflow for nVectors * vectorSize due to we already have buffer vectors of such size
if (vectors.type() == TypeIds::id<float>())
{
DAAL_ASSERT_UNIVERSAL_BUFFER(vectors, float, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sum, float, nVectors * numDivisionsByRow);
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sumOfSquares, float, nVectors * numDivisionsByRow);
}
else
{
DAAL_ASSERT_UNIVERSAL_BUFFER(vectors, double, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sum, double, nVectors * numDivisionsByRow);
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sumOfSquares, double, nVectors * numDivisionsByRow);
}
DAAL_OVERFLOW_CHECK_BY_MULTIPLICATION(size_t, numWorkGroups, numWorkItems);
KernelRange localRange(numWorkItems);
KernelRange globalRange(numWorkGroups * numWorkItems);
KernelNDRange range(1);
range.global(globalRange, status);
DAAL_CHECK_STATUS_VAR(status);
range.local(localRange, status);
DAAL_CHECK_STATUS_VAR(status);
KernelArguments args(5, status);
DAAL_CHECK_STATUS_VAR(status);
args.set(0, vectors, AccessModeIds::read);
args.set(1, nVectors);
args.set(2, vectorSize);
args.set(3, stepResult.sum, AccessModeIds::write);
args.set(4, stepResult.sumOfSquares, AccessModeIds::write);
context.run(range, sum_kernel, args, status);
return status;
}
services::Status runFinalStepRowmajor(ExecutionContextIface & context, ClKernelFactoryIface & kernelFactory, SumReducer::Result & stepResult,
uint32_t nVectors, uint32_t vectorSize, uint32_t workItemsPerGroup, SumReducer::Result & result)
{
services::Status status;
auto sum_kernel = kernelFactory.getKernel("sum_final_step_rowmajor", status);
DAAL_CHECK_STATUS_VAR(status);
if (result.sum.type() == TypeIds::id<float>())
{
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sum, float, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sumOfSquares, float, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sum, float, nVectors);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sumOfSquares, float, nVectors);
}
else
{
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sum, double, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(stepResult.sumOfSquares, double, nVectors * vectorSize);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sum, double, nVectors);
DAAL_ASSERT_UNIVERSAL_BUFFER(result.sumOfSquares, double, nVectors);
}
DAAL_OVERFLOW_CHECK_BY_MULTIPLICATION(size_t, workItemsPerGroup, nVectors);
KernelRange localRange(workItemsPerGroup);
KernelRange globalRange(workItemsPerGroup * nVectors);
KernelNDRange range(1);
range.global(globalRange, status);
DAAL_CHECK_STATUS_VAR(status);
range.local(localRange, status);
DAAL_CHECK_STATUS_VAR(status);
KernelArguments args(6, status);
DAAL_CHECK_STATUS_VAR(status);
args.set(0, stepResult.sum, AccessModeIds::read);
args.set(1, stepResult.sumOfSquares, AccessModeIds::read);
args.set(2, nVectors);
args.set(3, vectorSize);
args.set(4, result.sum, AccessModeIds::write);
args.set(5, result.sumOfSquares, AccessModeIds::write);
context.run(range, sum_kernel, args, status);
return status;
}
SumReducer::Result SumReducer::sum(Layout vectorsLayout, const UniversalBuffer & vectors, uint32_t nVectors, uint32_t vectorSize,
services::Status & status)
{
DAAL_ITTNOTIFY_SCOPED_TASK(SumReducer.sum);
auto & context = services::internal::getDefaultContext();
auto & kernelFactory = context.getClKernelFactory();
status |= buildProgram(kernelFactory, vectors.type());
DAAL_CHECK_STATUS_RETURN_IF_FAIL(status, SumReducer::Result());
DAAL_ASSERT(vectors.type() == TypeIds::id<float>() || vectors.type() == TypeIds::id<double>());
const uint32_t maxWorkItemsPerGroup = 256;
const uint32_t maxNumSubSlices = 9;
Result result(context, nVectors, vectors.type(), status);
DAAL_CHECK_STATUS_RETURN_IF_FAIL(status, result);
if (vectorsLayout == Layout::RowMajor)
{
status |= sum_singlepass(context, kernelFactory, vectorsLayout, vectors, nVectors, vectorSize, maxWorkItemsPerGroup, result);
}
else
{
const uint32_t numDivisionsByCol = (nVectors + maxWorkItemsPerGroup - 1) / maxWorkItemsPerGroup;
uint32_t numDivisionsByRow = 9;
if (vectorSize < 5000)
numDivisionsByRow = 1;
else if (vectorSize < 10000)
numDivisionsByRow = 3;
else if (vectorSize < 20000)
numDivisionsByRow = 6;
const uint32_t workItemsPerGroup = (maxWorkItemsPerGroup < nVectors) ? maxWorkItemsPerGroup : nVectors;
if (numDivisionsByRow > 1)
{
// no need to check overflow for numDivisionsByRow * nVectors due to numDivisionsByRow less than vectorSize,
// and input vectors buffer has size of vectorSize * numDivisionsByRow
Result stepResult(context, numDivisionsByRow * nVectors, vectors.type(), status);
DAAL_CHECK_STATUS_RETURN_IF_FAIL(status, result);
status |= runStepColmajor(context, kernelFactory, vectors, nVectors, vectorSize, workItemsPerGroup, numDivisionsByCol * numDivisionsByRow,
numDivisionsByRow, stepResult);
DAAL_CHECK_STATUS_RETURN_IF_FAIL(status, result);
const uint32_t stepWorkItems = maxNumSubSlices / 2; //need to be power of two
status |= runFinalStepRowmajor(context, kernelFactory, stepResult, nVectors, numDivisionsByRow, stepWorkItems, result);
}
else
{
status |= runStepColmajor(context, kernelFactory, vectors, nVectors, vectorSize, workItemsPerGroup, numDivisionsByCol, numDivisionsByRow,
result);
}
}
return result;
}
} // namespace math
} // namespace sycl
} // namespace internal
} // namespace services
} // namespace daal
| 1 | 27,689 | Does this change affect the performance of other algorithms, except KMeans? | oneapi-src-oneDAL | cpp |
@@ -142,13 +142,13 @@ func TestListenerAddrEqual(t *testing.T) {
addr string
expect bool
}{
- {ln1, ":1234", false},
- {ln1, "0.0.0.0:1234", false},
+ {ln1, ":" + ln2port, false},
+ {ln1, "0.0.0.0:" + ln2port, false},
{ln1, "0.0.0.0", false},
{ln1, ":" + ln1port, true},
{ln1, "0.0.0.0:" + ln1port, true},
{ln2, ":" + ln2port, false},
- {ln2, "127.0.0.1:1234", false},
+ {ln2, "127.0.0.1:" + ln1port, false},
{ln2, "127.0.0.1", false},
{ln2, "127.0.0.1:" + ln2port, true},
} { | 1 | package caddy
import (
"net"
"strconv"
"testing"
)
/*
// TODO
func TestCaddyStartStop(t *testing.T) {
caddyfile := "localhost:1984"
for i := 0; i < 2; i++ {
_, err := Start(CaddyfileInput{Contents: []byte(caddyfile)})
if err != nil {
t.Fatalf("Error starting, iteration %d: %v", i, err)
}
client := http.Client{
Timeout: time.Duration(2 * time.Second),
}
resp, err := client.Get("http://localhost:1984")
if err != nil {
t.Fatalf("Expected GET request to succeed (iteration %d), but it failed: %v", i, err)
}
resp.Body.Close()
err = Stop()
if err != nil {
t.Fatalf("Error stopping, iteration %d: %v", i, err)
}
}
}
*/
func TestIsLoopback(t *testing.T) {
for i, test := range []struct {
input string
expect bool
}{
{"example.com", false},
{"localhost", true},
{"localhost:1234", true},
{"localhost:", true},
{"127.0.0.1", true},
{"127.0.0.1:443", true},
{"127.0.1.5", true},
{"10.0.0.5", false},
{"12.7.0.1", false},
{"[::1]", true},
{"[::1]:1234", true},
{"::1", true},
{"::", false},
{"[::]", false},
{"local", false},
} {
if got, want := IsLoopback(test.input), test.expect; got != want {
t.Errorf("Test %d (%s): expected %v but was %v", i, test.input, want, got)
}
}
}
func TestIsInternal(t *testing.T) {
for i, test := range []struct {
input string
expect bool
}{
{"9.255.255.255", false},
{"10.0.0.0", true},
{"10.0.0.1", true},
{"10.255.255.254", true},
{"10.255.255.255", true},
{"11.0.0.0", false},
{"10.0.0.5:1234", true},
{"11.0.0.5:1234", false},
{"172.15.255.255", false},
{"172.16.0.0", true},
{"172.16.0.1", true},
{"172.31.255.254", true},
{"172.31.255.255", true},
{"172.32.0.0", false},
{"172.16.0.1:1234", true},
{"192.167.255.255", false},
{"192.168.0.0", true},
{"192.168.0.1", true},
{"192.168.255.254", true},
{"192.168.255.255", true},
{"192.169.0.0", false},
{"192.168.0.1:1234", true},
{"fbff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false},
{"fc00::", true},
{"fc00::1", true},
{"[fc00::1]", true},
{"[fc00::1]:8888", true},
{"fdff:ffff:ffff:ffff:ffff:ffff:ffff:fffe", true},
{"fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", true},
{"fe00::", false},
{"fd12:3456:789a:1::1:1234", true},
{"example.com", false},
{"localhost", false},
{"localhost:1234", false},
{"localhost:", false},
{"127.0.0.1", false},
{"127.0.0.1:443", false},
{"127.0.1.5", false},
{"12.7.0.1", false},
{"[::1]", false},
{"[::1]:1234", false},
{"::1", false},
{"::", false},
{"[::]", false},
{"local", false},
} {
if got, want := IsInternal(test.input), test.expect; got != want {
t.Errorf("Test %d (%s): expected %v but was %v", i, test.input, want, got)
}
}
}
func TestListenerAddrEqual(t *testing.T) {
ln1, err := net.Listen("tcp", "[::]:0")
if err != nil {
t.Fatal(err)
}
defer ln1.Close()
ln1port := strconv.Itoa(ln1.Addr().(*net.TCPAddr).Port)
ln2, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer ln2.Close()
ln2port := strconv.Itoa(ln2.Addr().(*net.TCPAddr).Port)
for i, test := range []struct {
ln net.Listener
addr string
expect bool
}{
{ln1, ":1234", false},
{ln1, "0.0.0.0:1234", false},
{ln1, "0.0.0.0", false},
{ln1, ":" + ln1port, true},
{ln1, "0.0.0.0:" + ln1port, true},
{ln2, ":" + ln2port, false},
{ln2, "127.0.0.1:1234", false},
{ln2, "127.0.0.1", false},
{ln2, "127.0.0.1:" + ln2port, true},
} {
if got, want := listenerAddrEqual(test.ln, test.addr), test.expect; got != want {
t.Errorf("Test %d (%s == %s): expected %v but was %v", i, test.addr, test.ln.Addr().String(), want, got)
}
}
}
| 1 | 11,002 | What do these changes have to do with the request ID? | caddyserver-caddy | go |
@@ -48,11 +48,11 @@
#include <cstdio>
#include <cstdlib>
-typedef Kokkos::DefaultExecutionSpace Device;
-typedef Kokkos::HostSpace::execution_space Host;
+using Device = Kokkos::DefaultExecutionSpace;
+using Host = Kokkos::HostSpace::execution_space;
-typedef Kokkos::TeamPolicy<Device> team_policy;
-typedef team_policy::member_type team_member;
+using team_policy = Kokkos::TeamPolicy<Device>;
+using team_member = team_policy::member_type;
static const int TEAM_SIZE = 16;
| 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Core.hpp>
#include <Kokkos_DualView.hpp>
#include <impl/Kokkos_Timer.hpp>
#include <cstdio>
#include <cstdlib>
typedef Kokkos::DefaultExecutionSpace Device;
typedef Kokkos::HostSpace::execution_space Host;
typedef Kokkos::TeamPolicy<Device> team_policy;
typedef team_policy::member_type team_member;
static const int TEAM_SIZE = 16;
struct find_2_tuples {
int chunk_size;
Kokkos::View<const int*> data;
Kokkos::View<int**> histogram;
find_2_tuples(int chunk_size_, Kokkos::DualView<int*> data_,
Kokkos::DualView<int**> histogram_)
: chunk_size(chunk_size_),
data(data_.d_view),
histogram(histogram_.d_view) {
data_.sync<Device>();
histogram_.sync<Device>();
histogram_.modify<Device>();
}
KOKKOS_INLINE_FUNCTION
void operator()(const team_member& dev) const {
Kokkos::View<int**, Kokkos::MemoryUnmanaged> l_histogram(
dev.team_shmem(), TEAM_SIZE, TEAM_SIZE);
Kokkos::View<int*, Kokkos::MemoryUnmanaged> l_data(dev.team_shmem(),
chunk_size + 1);
const int i = dev.league_rank() * chunk_size;
for (int j = dev.team_rank(); j < chunk_size + 1; j += dev.team_size())
l_data(j) = data(i + j);
for (int k = dev.team_rank(); k < TEAM_SIZE; k += dev.team_size())
for (int l = 0; l < TEAM_SIZE; l++) l_histogram(k, l) = 0;
dev.team_barrier();
for (int j = 0; j < chunk_size; j++) {
for (int k = dev.team_rank(); k < TEAM_SIZE; k += dev.team_size())
for (int l = 0; l < TEAM_SIZE; l++) {
if ((l_data(j) == k) && (l_data(j + 1) == l)) l_histogram(k, l)++;
}
}
for (int k = dev.team_rank(); k < TEAM_SIZE; k += dev.team_size())
for (int l = 0; l < TEAM_SIZE; l++) {
Kokkos::atomic_fetch_add(&histogram(k, l), l_histogram(k, l));
}
dev.team_barrier();
}
size_t team_shmem_size(int team_size) const {
return Kokkos::View<int**, Kokkos::MemoryUnmanaged>::shmem_size(TEAM_SIZE,
TEAM_SIZE) +
Kokkos::View<int*, Kokkos::MemoryUnmanaged>::shmem_size(chunk_size +
1);
}
};
int main(int narg, char* args[]) {
Kokkos::initialize(narg, args);
{
int chunk_size = 1024;
int nchunks = 100000; // 1024*1024;
Kokkos::DualView<int*> data("data", nchunks * chunk_size + 1);
srand(1231093);
for (int i = 0; i < (int)data.extent(0); i++) {
data.h_view(i) = rand() % TEAM_SIZE;
}
data.modify<Host>();
data.sync<Device>();
Kokkos::DualView<int**> histogram("histogram", TEAM_SIZE, TEAM_SIZE);
Kokkos::Timer timer;
// threads/team is automatically limited to maximum supported by the device.
int team_size = TEAM_SIZE;
if (team_size > Device::execution_space::concurrency())
team_size = Device::execution_space::concurrency();
Kokkos::parallel_for(team_policy(nchunks, team_size),
find_2_tuples(chunk_size, data, histogram));
Kokkos::fence();
double time = timer.seconds();
histogram.sync<Host>();
printf("Time: %f \n\n", time);
int sum = 0;
for (int k = 0; k < TEAM_SIZE; k++) {
for (int l = 0; l < TEAM_SIZE; l++) {
printf("%i ", histogram.h_view(k, l));
sum += histogram.h_view(k, l);
}
printf("\n");
}
printf("Result: %i %i\n", sum, chunk_size * nchunks);
}
Kokkos::finalize();
}
| 1 | 24,406 | I'm kind of surprised this doesn't require `typename`? | kokkos-kokkos | cpp |
@@ -17,6 +17,7 @@
package agreement
import (
+ "github.com/algorand/go-algorand/logging"
"time"
"github.com/algorand/go-algorand/config" | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package agreement
import (
"time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/protocol"
)
// The player implements the top-level state machine functionality of the
// agreement protocol.
type player struct {
// Round, Period, and Step hold the current round, period, and step of
// the player state machine.
Round round
Period period
Step step
// LastConcluding holds the largest step reached in the last period. As
// described in the spec, it affects the propagation of next-vote
// messages.
LastConcluding step
// Deadline contains the time of the next timeout expected by the player
// state machine (relevant to the start of the current period).
Deadline time.Duration
// Napping is set when the player is expecting a random timeout (i.e.,
// to determine when the player chooses to send a next-vote).
Napping bool
// FastRecoveryDeadline contains the next timeout expected for fast
// partition recovery.
FastRecoveryDeadline time.Duration
// Pending holds the player's proposalTable, which stores proposals that
// must be verified after some vote has been verified.
Pending proposalTable
}
func (p *player) T() stateMachineTag {
return playerMachine
}
func (p *player) underlying() actor {
return p
}
// Precondition: passed-in player is equal to player
// Postcondition: each messageEvent is processed exactly once
func (p *player) handle(r routerHandle, e event) []action {
var actions []action
if e.t() == none {
return nil
}
switch e := e.(type) {
case messageEvent:
return p.handleMessageEvent(r, e)
case thresholdEvent:
return p.handleThresholdEvent(r, e)
case timeoutEvent:
if e.T == fastTimeout {
return p.handleFastTimeout(r, e)
}
if !p.Napping {
r.t.logTimeout(*p)
}
switch p.Step {
case soft:
// precondition: nap = false
actions = p.issueSoftVote(r)
p.Step = cert
// update tracer state to match player
r.t.setMetadata(tracerMetadata{p.Round, p.Period, p.Step})
return actions
case cert:
// precondition: nap = false
p.Step = next
// update tracer state to match player
r.t.setMetadata(tracerMetadata{p.Round, p.Period, p.Step})
return p.issueNextVote(r)
default:
if p.Napping {
return p.issueNextVote(r) // sets p.Napping to false
}
// not napping, so we should enter a new step
p.Step++ // note: this must happen before next timeout setting.
// TODO add unit test to ensure that deadlines increase monotonically here
lower, upper := p.Step.nextVoteRanges()
delta := time.Duration(e.RandomEntropy % uint64(upper-lower))
p.Napping = true
p.Deadline = lower + delta
return actions
}
case roundInterruptionEvent:
return p.enterRound(r, e, e.Round)
case checkpointEvent:
return p.handleCheckpointEvent(r, e)
default:
panic("bad event")
}
}
func (p *player) handleFastTimeout(r routerHandle, e timeoutEvent) []action {
if e.Proto.Err != nil {
r.t.log.Errorf("failed to read protocol version for fastTimeout event (proto %v): %v", e.Proto.Version, e.Proto.Err)
return nil
}
lambda := config.Consensus[e.Proto.Version].FastRecoveryLambda
k := (p.FastRecoveryDeadline + lambda - 1) / lambda // round up
lower, upper := k*lambda, (k+1)*lambda
delta := time.Duration(e.RandomEntropy % uint64(upper-lower))
if p.FastRecoveryDeadline == 0 {
// don't vote the first time
p.FastRecoveryDeadline = lower + delta + lambda // add lambda for extra delay the first time
return nil
}
p.FastRecoveryDeadline = lower + delta
r.t.logFastTimeout(*p)
return p.issueFastVote(r)
}
func (p *player) issueSoftVote(r routerHandle) (actions []action) {
defer func() {
p.Deadline = deadlineTimeout
}()
e := r.dispatch(*p, proposalFrozenEvent{}, proposalMachinePeriod, p.Round, p.Period, 0)
a := pseudonodeAction{T: attest, Round: p.Round, Period: p.Period, Step: soft, Proposal: e.(proposalFrozenEvent).Proposal}
r.t.logProposalFrozen(a.Proposal, a.Round, a.Period)
r.t.timeR().RecStep(p.Period, soft, a.Proposal)
res := r.dispatch(*p, nextThresholdStatusRequestEvent{}, voteMachinePeriod, p.Round, p.Period-1, 0)
nextStatus := res.(nextThresholdStatusEvent) // panic if violate postcondition
if p.Period > 0 && !nextStatus.Bottom && nextStatus.Proposal != bottom {
// did not see bottom: vote for our starting value
// we check if answer.Proposal != bottom because we may have arrived here due to a fast-forward/soft threshold
// If we arrive due to fast-forward/soft threshold; then answer.Bottom = false and answer.Proposal = bottom
// and we should soft-vote normally (not based on the starting value)
a.Proposal = nextStatus.Proposal
return append(actions, a)
}
if a.Proposal == bottom {
// did not see anything: do not vote
return nil
}
if p.Period > a.Proposal.OriginalPeriod {
// leader sent reproposal: vote if we saw a quorum for that hash, even if we saw nextStatus.Bottom
if nextStatus.Proposal != bottom && nextStatus.Proposal == a.Proposal {
return append(actions, a)
}
return nil
}
// original proposal: vote for it
return append(actions, a)
}
// A committableEvent is the trigger for issuing a cert vote.
func (p *player) issueCertVote(r routerHandle, e committableEvent) action {
r.t.timeR().RecStep(p.Period, cert, e.Proposal)
return pseudonodeAction{T: attest, Round: p.Round, Period: p.Period, Step: cert, Proposal: e.Proposal}
}
func (p *player) issueNextVote(r routerHandle) []action {
actions := p.partitionPolicy(r)
a := pseudonodeAction{T: attest, Round: p.Round, Period: p.Period, Step: p.Step, Proposal: bottom}
answer := stagedValue(*p, r, p.Round, p.Period)
if answer.Committable {
a.Proposal = answer.Proposal
} else {
res := r.dispatch(*p, nextThresholdStatusRequestEvent{}, voteMachinePeriod, p.Round, p.Period-1, 0)
nextStatus := res.(nextThresholdStatusEvent) // panic if violate postcondition
if !nextStatus.Bottom {
// if we fast-forwarded to this period or entered via a soft/cert threshold,
// nextStatus.Bottom will be false and we will next vote bottom.
// As long as a majority of honest users (in the cert threshold case) do not vote bottom (as assumed), we are safe.
// Note that cert threshold fast-forwarding will never change a next value vote to a next bottom vote -
// if a player has voted for a value, they have the block, and should have ended the round.
a.Proposal = nextStatus.Proposal
}
}
actions = append(actions, a)
r.t.timeR().RecStep(p.Period, p.Step, a.Proposal)
_, upper := p.Step.nextVoteRanges()
p.Napping = false
p.Deadline = upper
return actions
}
func (p *player) issueFastVote(r routerHandle) (actions []action) {
actions = p.partitionPolicy(r)
elate := r.dispatch(*p, dumpVotesRequestEvent{}, voteMachineStep, p.Round, p.Period, late).(dumpVotesEvent).Votes
eredo := r.dispatch(*p, dumpVotesRequestEvent{}, voteMachineStep, p.Round, p.Period, redo).(dumpVotesEvent).Votes
edown := r.dispatch(*p, dumpVotesRequestEvent{}, voteMachineStep, p.Round, p.Period, down).(dumpVotesEvent).Votes
votes := append(eredo, edown...)
votes = append(elate, votes...)
actions = append(actions, networkAction{T: broadcastVotes, UnauthenticatedVotes: votes})
a := pseudonodeAction{T: attest, Round: p.Round, Period: p.Period, Step: down, Proposal: bottom}
answer := stagedValue(*p, r, p.Round, p.Period)
if answer.Committable {
a.Step = late
a.Proposal = answer.Proposal
} else {
res := r.dispatch(*p, nextThresholdStatusRequestEvent{}, voteMachinePeriod, p.Round, p.Period-1, 0)
nextStatus := res.(nextThresholdStatusEvent) // panic if violate postcondition
if !nextStatus.Bottom {
a.Step = redo
// note that this is bottom if we fast-forwarded to this period or entered via a soft/cert threshold.
a.Proposal = nextStatus.Proposal
}
}
if a.Proposal == bottom {
// required if we entered the period via a soft threshold
a.Step = down
}
return append(actions, a)
}
func (p *player) handleCheckpointEvent(r routerHandle, e checkpointEvent) []action {
return []action{
checkpointAction{
Round: e.Round,
Period: e.Period,
Step: e.Step,
Err: e.Err,
done: e.done,
}}
}
func (p *player) handleThresholdEvent(r routerHandle, e thresholdEvent) []action {
r.t.timeR().RecThreshold(e)
var actions []action
switch e.t() {
case certThreshold:
// for future periods, fast-forwarding below will ensure correct staging
// for past periods, having a freshest certThreshold will prevent losing the block
r.dispatch(*p, e, proposalMachine, 0, 0, 0)
// Now, also check if we have the block.
res := stagedValue(*p, r, e.Round, e.Period)
if res.Committable {
cert := Certificate(e.Bundle)
a0 := ensureAction{Payload: res.Payload, Certificate: cert}
actions = append(actions, a0)
as := p.enterRound(r, e, p.Round+1)
return append(actions, as...)
}
// we don't have the block! We need to ensure we will be able to receive the block.
// In addition, hint to the ledger to fetch by digest.
actions = append(actions, stageDigestAction{Certificate: Certificate(e.Bundle)})
if p.Period < e.Period {
actions = append(actions, p.enterPeriod(r, e, e.Period)...)
}
return actions
case softThreshold:
// note that it is ok not to stage softThresholds from previous periods; relaying the pinned block
// handles any edge case (w.r.t. resynchronization, at least)
if p.Period > e.Period {
return nil
}
if p.Period < e.Period {
return p.enterPeriod(r, e, e.Period)
}
ec := r.dispatch(*p, e, proposalMachine, p.Round, p.Period, 0)
if ec.t() == proposalCommittable && p.Step <= cert {
actions = append(actions, p.issueCertVote(r, ec.(committableEvent)))
}
return actions
case nextThreshold:
// We might receive a next threshold event for the previous period due to fast-forwarding or a soft threshold.
// If we do, this is okay, but the proposalMachine contract-checker will complain.
// TODO test this case and update the contract-checker so it does not complain when this is benign
if p.Period > e.Period {
return nil
}
return p.enterPeriod(r, e, e.Period+1)
default:
panic("bad event")
}
}
func (p *player) enterPeriod(r routerHandle, source thresholdEvent, target period) []action {
actions := p.partitionPolicy(r)
// this needs to happen before changing player state so the correct old blockAssemblers can be promoted
// TODO might be better passing through the old period explicitly in the {soft,next}Threshold event
e := r.dispatch(*p, source, proposalMachine, p.Round, p.Period, 0)
r.t.logPeriodConcluded(*p, target, source.Proposal)
p.LastConcluding = p.Step
p.Period = target
p.Step = soft
p.Napping = false
p.FastRecoveryDeadline = 0 // set immediately
p.Deadline = FilterTimeout(target, source.Proto)
// update tracer state to match player
r.t.setMetadata(tracerMetadata{p.Round, p.Period, p.Step})
actions = append(actions, rezeroAction{Round: p.Round})
if e.t() == proposalCommittable { // implies source.t() == softThreshold
return append(actions, p.issueCertVote(r, e.(committableEvent)))
}
if source.t() == nextThreshold {
proposal := source.Proposal
if proposal == bottom {
a := pseudonodeAction{T: assemble, Round: p.Round, Period: p.Period}
return append(actions, a)
}
a := pseudonodeAction{T: repropose, Round: p.Round, Period: p.Period, Proposal: proposal}
return append(actions, a)
}
return actions
}
func (p *player) enterRound(r routerHandle, source event, target round) []action {
var actions []action
newRoundEvent := source
// passing in a cert threshold to the proposalMachine is now ambiguous,
// so replace with an explicit new round event.
// In addition, handle a new source: payloadVerified (which can trigger new round if
// received after cert threshold)
if source.t() == certThreshold || source.t() == payloadVerified { // i.e., source.t() != roundInterruption
r.t.logRoundStart(*p, target)
newRoundEvent = roundInterruptionEvent{Round: target}
}
// this happens here so that the proposalMachine contract does not complain
e := r.dispatch(*p, newRoundEvent, proposalMachine, target, 0, 0)
p.LastConcluding = p.Step
p.Round = target
p.Period = 0
p.Step = soft
p.Napping = false
p.FastRecoveryDeadline = 0 // set immediately
switch source := source.(type) {
case roundInterruptionEvent:
p.Deadline = FilterTimeout(0, source.Proto.Version)
case thresholdEvent:
p.Deadline = FilterTimeout(0, source.Proto)
case filterableMessageEvent:
p.Deadline = FilterTimeout(0, source.Proto.Version)
}
// update tracer state to match player
r.t.setMetadata(tracerMetadata{p.Round, p.Period, p.Step})
r.t.resetTimingWithPipeline(target)
// do proposal-related actions
as := pseudonodeAction{T: assemble, Round: p.Round, Period: 0}
actions = append(actions, rezeroAction{Round: target}, as)
if e.t() == payloadPipelined {
e := e.(payloadProcessedEvent)
msg := message{MessageHandle: 0, Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: e.UnauthenticatedPayload} // TODO do we want to keep around the original handle?
a := verifyPayloadAction(messageEvent{T: payloadPresent, Input: msg}, p.Round, e.Period, e.Pinned)
actions = append(actions, a)
}
// we might need to handle a pipelined threshold event
res := r.dispatch(*p, freshestBundleRequestEvent{}, voteMachineRound, p.Round, 0, 0)
freshestRes := res.(freshestBundleEvent) // panic if violate postcondition
if freshestRes.Ok {
a4 := p.handle(r, freshestRes.Event)
actions = append(actions, a4...)
}
return actions
}
// partitionPolicy checks if the player is in a partition, and if it is,
// it returns the list of actions necessary to recover.
//
// partitionPolicy represents an attempt to resynchronize.
//
// These actions include the repropagation of the freshest bundle, if one was seen,
// (necessarily true for p.Period > 0 or the presence of a soft threshold)
// and the repropagation of the block payload this bundle votes for, if one was seen.
func (p *player) partitionPolicy(r routerHandle) (actions []action) {
if !p.partitioned() {
return
}
res := r.dispatch(*p, freshestBundleRequestEvent{}, voteMachineRound, p.Round, 0, 0)
bundleResponse := res.(freshestBundleEvent) // panic if violate postcondition
if bundleResponse.Ok {
// TODO do we want to authenticate our own bundles?
b := bundleResponse.Event.Bundle
r.t.logBundleBroadcast(*p, b)
a0 := broadcastAction(protocol.VoteBundleTag, b)
actions = append(actions, a0)
}
// On resynchronization, first try relaying the staged proposal from the same period as
// the freshest bundle. If that does not exist, for instance if we saw two next quorums in a row,
// then we fall back to relaying the pinned value, for liveness.
// One specific scenario where this is essential, assuming we handle ensure digest asynchronously:
// - Let the majority of honest nodes cert vote, and then see a next value quorum, and enter p + 1.
// - They see another next value quorum, and enter p + 2.
// - The minority of honest nodes see a certThreshold (but without a block), in period p. Assume that
// they are partitioned from the majority of honest nodes, until the majority reach p + 2.
// - The minority already has the freshest bundle, so will not advance to period p + 2. However, the
// majority will also filter out the cert threshold (due to a stale period).
// - Now we relay the pinned value, and then can wait for catchup.
// - Another optimization is that we could allow cert bundles from stale periods to bypass the filter.
// This may be worth implementing in the future.
bundleRound := p.Round
bundlePeriod := p.Period
switch {
case bundleResponse.Ok && bundleResponse.Event.Bundle.Proposal != bottom:
b := bundleResponse.Event.Bundle
bundleRound = b.Round
bundlePeriod = b.Period
fallthrough
case p.Period == 0:
resStaged := stagedValue(*p, r, bundleRound, bundlePeriod)
if resStaged.Committable {
transmit := compoundMessage{Proposal: resStaged.Payload.u()}
r.t.logProposalRepropagate(resStaged.Proposal, bundleRound, bundlePeriod)
a1 := broadcastAction(protocol.ProposalPayloadTag, transmit)
actions = append(actions, a1)
} else {
// even if there is no staged value, there may be a pinned value
resPinned := pinnedValue(*p, r, bundleRound)
if resPinned.PayloadOK {
transmit := compoundMessage{Proposal: resPinned.Payload.u()}
r.t.logProposalRepropagate(resPinned.Proposal, bundleRound, bundlePeriod)
a1 := broadcastAction(protocol.ProposalPayloadTag, transmit)
actions = append(actions, a1)
}
}
}
return
}
func (p *player) partitioned() bool {
return p.Step >= partitionStep || p.Period >= 3
}
func (p *player) handleMessageEvent(r routerHandle, e messageEvent) (actions []action) {
// is it a proposal-vote? (i.e., vote where step = 0)
proposalVote := false
switch e.t() {
case votePresent, voteVerified:
uv := e.Input.UnauthenticatedVote
proposalVote = (uv.R.Step == propose)
}
// wrap message event with current player round, etc. for freshness computation
delegatedE := filterableMessageEvent{
messageEvent: e,
FreshnessData: freshnessData{
PlayerRound: p.Round,
PlayerPeriod: p.Period,
PlayerStep: p.Step,
PlayerLastConcluding: p.LastConcluding,
},
}
// if so, process it separately
if proposalVote {
doneProcessing := true // TODO check that this is still required
defer func() {
tail := e.Tail
if e.t() == voteVerified {
tail = p.Pending.pop(e.TaskIndex)
}
if tail == nil || !doneProcessing {
return
}
ev := *tail // make sure the event we handle is messageEvent, not *messageEvent
suffix := p.handle(r, ev)
actions = append(actions, suffix...)
}()
ef := r.dispatch(*p, delegatedE, proposalMachine, 0, 0, 0)
switch ef.t() {
case voteMalformed:
err := ef.(filteredEvent).Err
return append(actions, disconnectAction(e, err))
case voteFiltered:
err := ef.(filteredEvent).Err
return append(actions, ignoreAction(e, err))
}
if e.t() == votePresent {
doneProcessing = false
seq := p.Pending.push(e.Tail)
uv := e.Input.UnauthenticatedVote
return append(actions, verifyVoteAction(e, uv.R.Round, uv.R.Period, seq))
}
v := e.Input.Vote
a := relayAction(e, protocol.AgreementVoteTag, v.u())
ep := ef.(proposalAcceptedEvent)
if ep.PayloadOk {
transmit := compoundMessage{
Proposal: ep.Payload.u(),
Vote: v.u(),
}
a = broadcastAction(protocol.ProposalPayloadTag, transmit)
}
return append(actions, a)
}
switch e.t() {
case payloadPresent, payloadVerified:
ef := r.dispatch(*p, delegatedE, proposalMachine, 0, 0, 0)
switch ef.t() {
case payloadMalformed:
err := makeSerErrf("rejected message since it was invalid: %v", ef.(filteredEvent).Err)
return append(actions, ignoreAction(e, err))
case payloadRejected:
return append(actions, ignoreAction(e, ef.(payloadProcessedEvent).Err))
case payloadPipelined:
ep := ef.(payloadProcessedEvent)
if ep.Round == p.Round {
up := e.Input.UnauthenticatedProposal
uv := ef.(payloadProcessedEvent).Vote.u()
a := relayAction(e, protocol.ProposalPayloadTag, compoundMessage{Proposal: up, Vote: uv})
actions = append(actions, a)
return append(actions, verifyPayloadAction(e, ep.Round, ep.Period, ep.Pinned))
}
}
// relay as the proposer
if e.Input.MessageHandle == nil {
var uv unauthenticatedVote
switch ef.t() {
case payloadPipelined, payloadAccepted:
uv = ef.(payloadProcessedEvent).Vote.u()
case proposalCommittable:
uv = ef.(committableEvent).Vote.u()
}
up := e.Input.UnauthenticatedProposal
a := relayAction(e, protocol.ProposalPayloadTag, compoundMessage{Proposal: up, Vote: uv})
actions = append(actions, a)
}
// If the payload is valid, check it against any received cert threshold.
// Of course, this should only trigger for payloadVerified case.
// This allows us to handle late payloads (relative to cert-bundles, i.e., certificates) without resorting to catchup.
if ef.t() == proposalCommittable || ef.t() == payloadAccepted {
freshestRes := r.dispatch(*p, freshestBundleRequestEvent{}, voteMachineRound, p.Round, 0, 0).(freshestBundleEvent)
if freshestRes.Ok && freshestRes.Event.t() == certThreshold && freshestRes.Event.Proposal == e.Input.Proposal.value() {
cert := Certificate(freshestRes.Event.Bundle)
a0 := ensureAction{Payload: e.Input.Proposal, Certificate: cert}
actions = append(actions, a0)
as := p.enterRound(r, delegatedE, cert.Round+1)
return append(actions, as...)
}
}
if ef.t() == proposalCommittable && p.Step <= cert {
actions = append(actions, p.issueCertVote(r, ef.(committableEvent)))
}
return actions
case votePresent, voteVerified:
ef := r.dispatch(*p, delegatedE, voteMachine, 0, 0, 0)
switch ef.t() {
case voteMalformed:
// TODO Add Metrics here to capture telemetryspec.VoteRejectedEvent details
// Reason: fmt.Sprintf("rejected malformed message: %v", e.Err),
err := makeSerErrf("rejected message since it was invalid: %v", ef.(filteredEvent).Err)
return append(actions, disconnectAction(e, err))
case voteFiltered:
err := ef.(filteredEvent).Err
return append(actions, ignoreAction(e, err))
}
if e.t() == votePresent {
uv := e.Input.UnauthenticatedVote
return append(actions, verifyVoteAction(e, uv.R.Round, uv.R.Period, 0))
} // else e.t() == voteVerified
v := e.Input.Vote
actions = append(actions, relayAction(e, protocol.AgreementVoteTag, v.u()))
a1 := p.handle(r, ef)
return append(actions, a1...)
case bundlePresent, bundleVerified:
ef := r.dispatch(*p, delegatedE, voteMachine, 0, 0, 0)
switch ef.t() {
case bundleMalformed:
err := makeSerErrf("rejected message since it was invalid: %v", ef.(filteredEvent).Err)
return append(actions, disconnectAction(e, err))
case bundleFiltered:
err := ef.(filteredEvent).Err
return append(actions, ignoreAction(e, err))
}
if e.t() == bundlePresent {
ub := e.Input.UnauthenticatedBundle
return append(actions, verifyBundleAction(e, ub.Round, ub.Period, ub.Step))
}
a0 := relayAction(e, protocol.VoteBundleTag, ef.(thresholdEvent).Bundle)
a1 := p.handle(r, ef)
return append(append(actions, a0), a1...)
}
panic("bad event")
}
| 1 | 42,411 | nit: move this one down. | algorand-go-algorand | go |
@@ -137,8 +137,14 @@ class Bars(Chart):
class BoxWhisker(Chart):
"""
- BoxWhisker represent data as a distributions highlighting
- the median, mean and various percentiles.
+ BoxWhisker allows representing the distribution of data grouped
+ into one or more groups by summarizing the data using quartiles.
+ The boxes of a BoxWhisker element represent the first, second and
+ third quartiles. The whiskers follow the Tukey boxplot definition
+ representing the lowest datum still within 1.5 IQR of the lower
+ quartile, and the highest datum still within 1.5 IQR of the upper
+ quartile. Any points falling outside this range are shown as
+ distinct outlier points.
"""
group = param.String(default='BoxWhisker', constant=True) | 1 | import numpy as np
import param
from ..core import util
from ..core import Dimension, Dataset, Element2D
from .util import compute_edges
class Chart(Dataset, Element2D):
"""
The data held within Chart is a numpy array of shape (N, D),
where N is the number of samples and D the number of dimensions.
Chart Elements are sliceable along up to two key dimensions.
The data may be supplied in one of three formats:
1) As a numpy array of shape (N, D).
2) As a list of length N containing tuples of length D.
3) As a tuple of length D containing iterables of length N.
"""
kdims = param.List(default=[Dimension('x')], bounds=(1,2), doc="""
The key dimensions of the Chart, determining the number of
indexable dimensions.""")
group = param.String(default='Chart', constant=True)
vdims = param.List(default=[Dimension('y')], bounds=(1,None), doc="""
The value dimensions of the Chart, usually corresponding to a
number of dependent variables.""")
def __getitem__(self, index):
sliced = super(Chart, self).__getitem__(index)
if not isinstance(sliced, Chart):
return sliced
if not isinstance(index, tuple): index = (index,)
ndims = len(self.extents)//2
lower_bounds, upper_bounds = [None]*ndims, [None]*ndims
for i, slc in enumerate(index[:ndims]):
if isinstance(slc, slice):
lbound = self.extents[i]
ubound = self.extents[ndims:][i]
lower_bounds[i] = lbound if slc.start is None else slc.start
upper_bounds[i] = ubound if slc.stop is None else slc.stop
sliced.extents = tuple(lower_bounds+upper_bounds)
return sliced
class Scatter(Chart):
"""
Scatter is a Element2D type which gets displayed as a number of
disconnected points.
"""
group = param.String(default='Scatter', constant=True)
class Curve(Chart):
"""
Curve is a simple Chart Element providing 1D indexing along
the x-axis.
"""
group = param.String(default='Curve', constant=True)
class ErrorBars(Chart):
"""
ErrorBars is a Chart Element type representing any number of
errorbars situated in a 2D space. The errors must be supplied
as an Nx3 or Nx4 array representing the x/y-positions and
either the symmetric error or asymmetric errors respectively.
"""
group = param.String(default='ErrorBars', constant=True, doc="""
A string describing the quantity measured by the ErrorBars
object.""")
kdims = param.List(default=[Dimension('x')],
bounds=(1, 2), constant=True, doc="""
The Dimensions corresponding to the x- and y-positions of
the error bars.""")
vdims = param.List(default=[Dimension('y'), Dimension('yerror')],
bounds=(1, 3), constant=True)
def range(self, dim, data_range=True):
didx = self.get_dimension_index(dim)
dim = self.get_dimension(dim)
if didx == 1 and data_range and len(self):
mean = self.dimension_values(1)
neg_error = self.dimension_values(2)
if len(self.dimensions()) > 3:
pos_error = self.dimension_values(3)
else:
pos_error = neg_error
lower = np.nanmin(mean-neg_error)
upper = np.nanmax(mean+pos_error)
return util.dimension_range(lower, upper, dim)
return super(ErrorBars, self).range(dim, data_range)
class Spread(ErrorBars):
"""
Spread is a Chart Element type representing a spread of
values as given by a mean and standard error or confidence
intervals. Just like the ErrorBars Element type, mean and
deviations from the mean should be supplied as either an
Nx3 or Nx4 array representing the x-values, mean values
and symmetric or asymmetric errors respective. Internally
the data is always expanded to an Nx4 array.
"""
group = param.String(default='Spread', constant=True)
class Bars(Chart):
"""
Bars is an Element type, representing a number of stacked and
grouped bars, depending the dimensionality of the key and value
dimensions. Bars is useful for categorical data, which may be
laid via groups, categories and stacks.
"""
group = param.String(default='Bars', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1,3))
vdims = param.List(default=[Dimension('y')], bounds=(1, None))
class BoxWhisker(Chart):
"""
BoxWhisker represent data as a distributions highlighting
the median, mean and various percentiles.
"""
group = param.String(default='BoxWhisker', constant=True)
kdims = param.List(default=[], bounds=(0,None))
vdims = param.List(default=[Dimension('y')], bounds=(1,1))
_auto_indexable_1d = False
class Histogram(Element2D):
"""
Histogram contains a number of bins, which are defined by the
upper and lower bounds of their edges and the computed bin values.
"""
kdims = param.List(default=[Dimension('x')], bounds=(1,1), doc="""
Dimensions on Element2Ds determine the number of indexable
dimensions.""")
group = param.String(default='Histogram', constant=True)
vdims = param.List(default=[Dimension('Frequency')], bounds=(1,1))
def __init__(self, values, edges=None, **params):
if edges is not None:
self.warning("Histogram edges should be supplied as a tuple "
"along with the values, passing the edges will "
"be deprecated in holoviews 2.0.")
self.values, self.edges, settings = self._process_data(values, edges)
settings.update(params)
super(Histogram, self).__init__((self.values, self.edges), **settings)
def __getitem__(self, key):
"""
Implements slicing or indexing of the Histogram
"""
if key in self.dimensions(): return self.dimension_values(key)
if key is () or key is Ellipsis: return self # May no longer be necessary
key = util.process_ellipses(self, key)
if not isinstance(key, tuple): pass
elif len(key) == self.ndims + 1:
if key[-1] != slice(None) and (key[-1] not in self.vdims):
raise KeyError("%r is the only selectable value dimension" %
self.vdims[0].name)
key = key[0]
elif len(key) == self.ndims + 1: key = key[0]
else:
raise KeyError("Histogram cannot slice more than %d dimension."
% len(self.kdims)+1)
centers = [(float(l)+r)/2 for (l,r) in zip(self.edges, self.edges[1:])]
if isinstance(key, slice):
start, stop = key.start, key.stop
if [start, stop] == [None,None]: return self
start_idx, stop_idx = None,None
if start is not None:
start_idx = np.digitize([start], centers, right=True)[0]
if stop is not None:
stop_idx = np.digitize([stop], centers, right=True)[0]
slice_end = stop_idx+1 if stop_idx is not None else None
slice_values = self.values[start_idx:stop_idx]
slice_edges = self.edges[start_idx: slice_end]
extents = (min(slice_edges), self.extents[1],
max(slice_edges), self.extents[3])
return self.clone((slice_values, slice_edges), extents=extents)
else:
if not (self.edges.min() <= key < self.edges.max()):
raise KeyError("Key value %s is out of the histogram bounds" % key)
idx = np.digitize([key], self.edges)[0]
return self.values[idx-1 if idx>0 else idx]
def _process_data(self, values, edges):
"""
Ensure that edges are specified as left and right edges of the
histogram bins rather than bin centers.
"""
settings = {}
(values, edges) = values if isinstance(values, tuple) else (values, edges)
if isinstance(values, Chart):
settings = dict(values.get_param_values(onlychanged=True))
edges = values.dimension_values(0)
values = values.dimension_values(1)
elif isinstance(values, np.ndarray) and len(values.shape) == 2:
edges = values[:, 0]
values = values[:, 1]
elif all(isinstance(el, tuple) for el in values):
edges, values = zip(*values)
else:
values = np.array(values)
if edges is None:
edges = np.arange(len(values), dtype=np.float)
else:
edges = np.array(edges, dtype=np.float)
if len(edges) == len(values):
edges = compute_edges(edges)
return values, edges, settings
def range(self, dimension, data_range=True):
if self.get_dimension_index(dimension) == 0 and data_range:
dim = self.get_dimension(dimension)
lower, upper = np.min(self.edges), np.max(self.edges)
return util.dimension_range(lower, upper, dim)
else:
return super(Histogram, self).range(dimension, data_range)
def dimension_values(self, dim):
dim = self.get_dimension(dim, strict=True).name
if dim in self.vdims:
return self.values
elif dim in self.kdims:
return np.convolve(self.edges, np.ones((2,))/2, mode='valid')
else:
return super(Histogram, self).dimension_values(dim)
def sample(self, samples=[], **sample_values):
raise NotImplementedError('Cannot sample a Histogram.')
def reduce(self, dimensions=None, function=None, **reduce_map):
raise NotImplementedError('Reduction of Histogram not implemented.')
class Points(Chart):
"""
Allows sets of points to be positioned over a sheet coordinate
system. Each points may optionally be associated with a chosen
numeric value.
The input data can be a Nx2 or Nx3 Numpy array where the first two
columns corresponds to the X,Y coordinates in sheet coordinates,
within the declared bounding region. For Nx3 arrays, the third
column corresponds to the magnitude values of the points. Any
additional columns will be ignored (use VectorFields instead).
The input data may be also be passed as a tuple of elements that
may be numpy arrays or values that can be cast to arrays. When
such a tuple is supplied, the elements are joined column-wise into
a single array, allowing the magnitudes to be easily supplied
separately.
Note that if magnitudes are to be rendered correctly by default,
they should lie in the range [0,1].
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2), constant=True, doc="""
The label of the x- and y-dimension of the Points in form
of a string or dimension object.""")
group = param.String(default='Points', constant=True)
vdims = param.List(default=[])
_min_dims = 2 # Minimum number of columns
class VectorField(Points):
"""
A VectorField contains is a collection of vectors where each
vector has an associated position in sheet coordinates.
The constructor of VectorField is similar to the constructor of
Points: the input data can be an NxM Numpy array where the first
two columns corresponds to the X,Y coordinates in sheet
coordinates, within the declared bounding region. As with Points,
the input can be a tuple of array objects or of objects that can
be cast to arrays (the tuple elements are joined column-wise).
The third column maps to the vector angle which must be specified
in radians. Note that it is possible to supply a collection which
isn't a numpy array, whereby each element of the collection is
assumed to be an iterable corresponding to a single column of the
NxM array.
The visualization of any additional columns is decided by the
plotting code. For instance, the fourth and fifth columns could
correspond to arrow length and colour map value. All that is
assumed is that these additional dimension are normalized between
0.0 and 1.0 for the default visualization to work well.
The only restriction is that the final data array is NxM where
M>3. In other words, the vector must have a dimensionality of 2 or
higher.
"""
group = param.String(default='VectorField', constant=True)
vdims = param.List(default=[Dimension('Angle', cyclic=True, range=(0,2*np.pi)),
Dimension('Magnitude')], bounds=(1, None))
_null_value = np.array([[], [], [], []]).T # For when data is None
_min_dims = 3 # Minimum number of columns
def __init__(self, data, kdims=None, vdims=None, **params):
if isinstance(data, list) and data and all(isinstance(d, np.ndarray) for d in data):
data = np.column_stack([d.flat if d.ndim > 1 else d for d in data])
super(VectorField, self).__init__(data, kdims=kdims, vdims=vdims, **params)
class Spikes(Chart):
"""
Spikes is a 1D or 2D Element, which represents a series of
vertical or horizontal lines distributed along some dimension. If
an additional dimension is supplied it will be used to specify the
height of the lines. The Element may therefore be used to
represent 1D distributions, spectrograms or spike trains in
electrophysiology.
"""
group = param.String(default='Spikes', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1, 1))
vdims = param.List(default=[])
_auto_indexable_1d = False
class Area(Curve):
"""
An Area Element represents the area under a Curve
and is specified in the same format as a regular
Curve, with the key dimension corresponding to a
column of x-values and the value dimension
corresponding to a column of y-values. Optionally
a second value dimension may be supplied to shade
the region between the curves.
"""
group = param.String(default='Area', constant=True)
@classmethod
def stack(cls, areas):
"""
Stacks an (Nd)Overlay of Area or Curve Elements by offsetting
their baselines. To stack a HoloMap or DynamicMap use the map
method.
"""
if not len(areas):
return areas
baseline = np.zeros(len(areas.values()[0]))
stacked = areas.clone(shared_data=False)
vdims = [areas.values()[0].vdims[0], 'Baseline']
for k, area in areas.items():
x, y = (area.dimension_values(i) for i in range(2))
stacked[k] = area.clone((x, y+baseline, baseline), vdims=vdims,
new_type=Area)
baseline += y
return stacked
| 1 | 19,581 | maybe 'standard Tukey boxplot definition' if it is standard? Otherwise sounds like it is just *a* definition for boxplots... | holoviz-holoviews | py |
@@ -55,9 +55,11 @@ var (
// NOTE: The $Format strings are replaced during 'git archive' thanks to the
// companion .gitattributes file containing 'export-subst' in this same
// directory. See also https://git-scm.com/docs/gitattributes
- gitVersion = "v0.0.0-master+$Format:%h$"
+ gitVersion = Default
gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState = "" // state of git tree, either "clean" or "dirty"
buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
+
+const Default = "v1.9.0" | 1 | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
// Base version information.
//
// This is the fallback data used when version information from git is not
// provided via go ldflags. It provides an approximation of the Kubernetes
// version for ad-hoc builds (e.g. `go build`) that cannot get the version
// information from git.
//
// If you are looking at these fields in the git tree, they look
// strange. They are modified on the fly by the build process. The
// in-tree values are dummy values used for "git archive", which also
// works for GitHub tar downloads.
//
// When releasing a new Kubernetes version, this file is updated by
// build/mark_new_version.sh to reflect the new version, and then a
// git annotated tag (using format vX.Y where X == Major version and Y
// == Minor version) is created to point to the commit that updates
// pkg/version/base.go
var (
// TODO: Deprecate gitMajor and gitMinor, use only gitVersion
// instead. First step in deprecation, keep the fields but make
// them irrelevant. (Next we'll take it out, which may muck with
// scripts consuming the kubectl version output - but most of
// these should be looking at gitVersion already anyways.)
gitMajor string // major version, always numeric
gitMinor string // minor version, numeric possibly followed by "+"
// semantic version, derived by build scripts (see
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md
// for a detailed discussion of this field)
//
// TODO: This field is still called "gitVersion" for legacy
// reasons. For prerelease versions, the build metadata on the
// semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight
// translation to be semver compliant.
// NOTE: The $Format strings are replaced during 'git archive' thanks to the
// companion .gitattributes file containing 'export-subst' in this same
// directory. See also https://git-scm.com/docs/gitattributes
gitVersion = "v0.0.0-master+$Format:%h$"
gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState = "" // state of git tree, either "clean" or "dirty"
buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
| 1 | 24,125 | I feel we should not fix this. | kubeedge-kubeedge | go |
@@ -25,9 +25,9 @@
#include <fastdds/rtps/reader/RTPSReader.h>
#include <fastdds/rtps/writer/RTPSWriter.h>
-#include <fastdds/rtps/transport/UDPv4Transport.h>
-#include <fastdds/rtps/transport/UDPv6Transport.h>
-#include <fastdds/rtps/transport/test_UDPv4Transport.h>
+#include <rtps/transport/UDPv4Transport.h>
+#include <rtps/transport/UDPv6Transport.h>
+#include <rtps/transport/test_UDPv4Transport.h>
#include <fastrtps/xmlparser/XMLProfileManager.h>
| 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* @file RTPSDomain.cpp
*/
#include <fastdds/rtps/RTPSDomain.h>
#include <fastdds/dds/log/Log.hpp>
#include <fastdds/rtps/history/WriterHistory.h>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/rtps/reader/RTPSReader.h>
#include <fastdds/rtps/writer/RTPSWriter.h>
#include <fastdds/rtps/transport/UDPv4Transport.h>
#include <fastdds/rtps/transport/UDPv6Transport.h>
#include <fastdds/rtps/transport/test_UDPv4Transport.h>
#include <fastrtps/xmlparser/XMLProfileManager.h>
#include <rtps/RTPSDomainImpl.hpp>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <rtps/common/GuidUtils.hpp>
#include <chrono>
#include <thread>
#include <cstdlib>
#include <regex>
namespace eprosima {
namespace fastrtps {
namespace rtps {
static void guid_prefix_create(
uint32_t ID,
GuidPrefix_t& guidP)
{
eprosima::fastdds::rtps::GuidUtils::instance().guid_prefix_create(ID, guidP);
}
// environment variables that forces server-client discovery
// it must contain a list of UDPv4 locators separated by ;
// the position in the list defines the default server that listens on the locator
const char* const DEFAULT_ROS2_MASTER_URI = "ROS_DISCOVERY_SERVER";
std::mutex RTPSDomain::m_mutex;
std::atomic<uint32_t> RTPSDomain::m_maxRTPSParticipantID(1);
std::vector<RTPSDomain::t_p_RTPSParticipant> RTPSDomain::m_RTPSParticipants;
std::set<uint32_t> RTPSDomain::m_RTPSParticipantIDs;
void RTPSDomain::stopAll()
{
std::unique_lock<std::mutex> lock(m_mutex);
logInfo(RTPS_PARTICIPANT, "DELETING ALL ENDPOINTS IN THIS DOMAIN");
while (m_RTPSParticipants.size() > 0)
{
RTPSDomain::t_p_RTPSParticipant participant = m_RTPSParticipants.back();
m_RTPSParticipantIDs.erase(m_RTPSParticipantIDs.find(participant.second->getRTPSParticipantID()));
m_RTPSParticipants.pop_back();
lock.unlock();
RTPSDomain::removeRTPSParticipant_nts(participant);
lock.lock();
}
logInfo(RTPS_PARTICIPANT, "RTPSParticipants deleted correctly ");
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
RTPSParticipant* RTPSDomain::createParticipant(
uint32_t domain_id,
const RTPSParticipantAttributes& attrs,
RTPSParticipantListener* listen)
{
return createParticipant(domain_id, true, attrs, listen);
}
RTPSParticipant* RTPSDomain::createParticipant(
uint32_t domain_id,
bool enabled,
const RTPSParticipantAttributes& attrs,
RTPSParticipantListener* listen)
{
logInfo(RTPS_PARTICIPANT, "");
RTPSParticipantAttributes PParam = attrs;
if (PParam.builtin.discovery_config.leaseDuration < c_TimeInfinite &&
PParam.builtin.discovery_config.leaseDuration <=
PParam.builtin.discovery_config.leaseDuration_announcementperiod)
{
logError(RTPS_PARTICIPANT,
"RTPSParticipant Attributes: LeaseDuration should be >= leaseDuration announcement period");
return nullptr;
}
uint32_t ID;
{
std::lock_guard<std::mutex> guard(m_mutex);
if (PParam.participantID < 0)
{
ID = getNewId();
while (m_RTPSParticipantIDs.insert(ID).second == false)
{
ID = getNewId();
}
}
else
{
ID = PParam.participantID;
if (m_RTPSParticipantIDs.insert(ID).second == false)
{
logError(RTPS_PARTICIPANT, "RTPSParticipant with the same ID already exists");
return nullptr;
}
}
}
if (!PParam.defaultUnicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Default Unicast Locator List contains invalid Locator");
return nullptr;
}
if (!PParam.defaultMulticastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Default Multicast Locator List contains invalid Locator");
return nullptr;
}
PParam.participantID = ID;
LocatorList_t loc;
IPFinder::getIP4Address(&loc);
// Generate a new GuidPrefix_t
GuidPrefix_t guidP;
guid_prefix_create(ID, guidP);
RTPSParticipant* p = new RTPSParticipant(nullptr);
RTPSParticipantImpl* pimpl = nullptr;
// If we force the participant to have a specific prefix we must define a different persistence GuidPrefix_t that
// would ensure builtin endpoints are able to differentiate between a communication loss and a participant recovery
if (PParam.prefix != c_GuidPrefix_Unknown)
{
pimpl = new RTPSParticipantImpl(domain_id, PParam, PParam.prefix, guidP, p, listen);
}
else
{
pimpl = new RTPSParticipantImpl(domain_id, PParam, guidP, p, listen);
}
// Above constructors create the sender resources. If a given listening port cannot be allocated an iterative
// mechanism will allocate another by default. Change the default listening port is unacceptable for server
// discovery.
if ((PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol_t::SERVER
|| PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol_t::BACKUP)
&& pimpl->did_mutation_took_place_on_meta(
PParam.builtin.metatrafficMulticastLocatorList,
PParam.builtin.metatrafficUnicastLocatorList))
{
// we do not log an error because the library may use participant creation as a trial for server existence
logInfo(RTPS_PARTICIPANT, "Server wasn't able to allocate the specified listening port");
delete pimpl;
return nullptr;
}
// Check there is at least one transport registered.
if (!pimpl->networkFactoryHasRegisteredTransports())
{
logError(RTPS_PARTICIPANT, "Cannot create participant, because there is any transport");
delete pimpl;
return nullptr;
}
#if HAVE_SECURITY
// Check security was correctly initialized
if (!pimpl->is_security_initialized())
{
logError(RTPS_PARTICIPANT, "Cannot create participant due to security initialization error");
delete pimpl;
return nullptr;
}
#endif // if HAVE_SECURITY
{
std::lock_guard<std::mutex> guard(m_mutex);
m_RTPSParticipants.push_back(t_p_RTPSParticipant(p, pimpl));
}
if (enabled)
{
// Start protocols
pimpl->enable();
}
return p;
}
bool RTPSDomain::removeRTPSParticipant(
RTPSParticipant* p)
{
if (p != nullptr)
{
p->mp_impl->disable();
std::unique_lock<std::mutex> lock(m_mutex);
for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it)
{
if (it->second->getGuid().guidPrefix == p->getGuid().guidPrefix)
{
RTPSDomain::t_p_RTPSParticipant participant = *it;
m_RTPSParticipants.erase(it);
m_RTPSParticipantIDs.erase(m_RTPSParticipantIDs.find(participant.second->getRTPSParticipantID()));
lock.unlock();
removeRTPSParticipant_nts(participant);
return true;
}
}
}
logError(RTPS_PARTICIPANT, "RTPSParticipant not valid or not recognized");
return false;
}
void RTPSDomain::removeRTPSParticipant_nts(
RTPSDomain::t_p_RTPSParticipant& participant)
{
delete(participant.second);
}
RTPSWriter* RTPSDomain::createRTPSWriter(
RTPSParticipant* p,
WriterAttributes& watt,
WriterHistory* hist,
WriterListener* listen)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSWriter* ret_val = nullptr;
if (impl->createWriter(&ret_val, watt, hist, listen))
{
return ret_val;
}
}
return nullptr;
}
RTPSWriter* RTPSDomain::createRTPSWriter(
RTPSParticipant* p,
WriterAttributes& watt,
const std::shared_ptr<IPayloadPool>& payload_pool,
WriterHistory* hist,
WriterListener* listen)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSWriter* ret_val = nullptr;
if (impl->createWriter(&ret_val, watt, payload_pool, hist, listen))
{
return ret_val;
}
}
return nullptr;
}
bool RTPSDomain::removeRTPSWriter(
RTPSWriter* writer)
{
if (writer != nullptr)
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it)
{
if (it->first->getGuid().guidPrefix == writer->getGuid().guidPrefix)
{
t_p_RTPSParticipant participant = *it;
lock.unlock();
return participant.second->deleteUserEndpoint((Endpoint*)writer);
}
}
}
return false;
}
RTPSReader* RTPSDomain::createRTPSReader(
RTPSParticipant* p,
ReaderAttributes& ratt,
ReaderHistory* rhist,
ReaderListener* rlisten)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSReader* reader;
if (impl->createReader(&reader, ratt, rhist, rlisten))
{
return reader;
}
}
return nullptr;
}
RTPSReader* RTPSDomain::createRTPSReader(
RTPSParticipant* p,
ReaderAttributes& ratt,
const std::shared_ptr<IPayloadPool>& payload_pool,
ReaderHistory* rhist,
ReaderListener* rlisten)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSReader* reader;
if (impl->createReader(&reader, ratt, payload_pool, rhist, rlisten))
{
return reader;
}
}
return nullptr;
}
bool RTPSDomain::removeRTPSReader(
RTPSReader* reader)
{
if (reader != nullptr)
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it)
{
if (it->first->getGuid().guidPrefix == reader->getGuid().guidPrefix)
{
t_p_RTPSParticipant participant = *it;
lock.unlock();
return participant.second->deleteUserEndpoint((Endpoint*)reader);
}
}
}
return false;
}
RTPSParticipant* RTPSDomain::clientServerEnvironmentCreationOverride(
uint32_t domain_id,
bool enabled,
const RTPSParticipantAttributes& att,
RTPSParticipantListener* listen /*= nullptr*/)
{
// retrieve the environment variable value
std::string list;
{
#pragma warning(suppress:4996)
const char* data = std::getenv(DEFAULT_ROS2_MASTER_URI);
if (nullptr != data)
{
list = data;
}
else
{
// if the variable is not set abort the server-client default setup
return nullptr;
}
}
// Check the specified discovery protocol: if other than simple it has priority over ros environment variable
if (att.builtin.discovery_config.discoveryProtocol != DiscoveryProtocol_t::SIMPLE)
{
logInfo(DOMAIN, "Detected non simple discovery protocol attributes."
<< " Ignoring auto default client-server setup.");
return nullptr;
}
// we only make the attributes copy when we are sure is worth
RTPSParticipantAttributes client_att(att);
// Retrieve the info from the environment variable
if (!load_environment_server_info(
list,
client_att.builtin.discovery_config.m_DiscoveryServers))
{
// it's not an error, the environment variable may not be set. Any issue with environment
// variable syntax is logError already
return nullptr;
}
logInfo(DOMAIN, "Detected auto client-server environment variable."
"Trying to create client with the default server setup.");
client_att.builtin.discovery_config.discoveryProtocol = DiscoveryProtocol_t::CLIENT;
// RemoteServerAttributes already fill in above
RTPSParticipant* part = RTPSDomain::createParticipant(domain_id, enabled, client_att, listen);
if (nullptr != part)
{
// client successfully created
logInfo(DOMAIN, "Auto default server-client setup. Default client created.");
return part;
}
// unable to create auto server-client default participants
logError(DOMAIN, "Auto default server-client setup. Unable to create the client.");
return nullptr;
}
void RTPSDomainImpl::create_participant_guid(
int32_t& participant_id,
GUID_t& guid)
{
if (participant_id < 0)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
do
{
participant_id = RTPSDomain::getNewId();
} while (RTPSDomain::m_RTPSParticipantIDs.find(participant_id) != RTPSDomain::m_RTPSParticipantIDs.end());
}
guid_prefix_create(participant_id, guid.guidPrefix);
guid.entityId = c_EntityId_RTPSParticipant;
}
RTPSParticipantImpl* RTPSDomainImpl::find_local_participant(
const GUID_t& guid)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == guid.guidPrefix)
{
// Participant found, forward the query
return participant.second;
}
}
return nullptr;
}
RTPSReader* RTPSDomainImpl::find_local_reader(
const GUID_t& reader_guid)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == reader_guid.guidPrefix)
{
// Participant found, forward the query
return participant.second->find_local_reader(reader_guid);
}
}
return nullptr;
}
RTPSWriter* RTPSDomainImpl::find_local_writer(
const GUID_t& writer_guid)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == writer_guid.guidPrefix)
{
// Participant found, forward the query
return participant.second->find_local_writer(writer_guid);
}
}
return nullptr;
}
/**
* Check whether intraprocess delivery should be used between two GUIDs.
*
* @param local_guid GUID of the local endpoint performing the query.
* @param matched_guid GUID being queried about.
*
* @returns true when intraprocess delivery is enabled, false otherwise.
*/
bool RTPSDomainImpl::should_intraprocess_between(
const GUID_t& local_guid,
const GUID_t& matched_guid)
{
if (!local_guid.is_on_same_process_as(matched_guid))
{
// Not on the same process, should not use intraprocess mechanism.
return false;
}
if (local_guid.entityId == c_EntityId_SPDPWriter || local_guid.entityId == c_EntityId_SPDPReader)
{
// Always disabled for PDP, to avoid inter-domain communications.
return false;
}
switch (xmlparser::XMLProfileManager::library_settings().intraprocess_delivery)
{
case IntraprocessDeliveryType::INTRAPROCESS_FULL:
return true;
case IntraprocessDeliveryType::INTRAPROCESS_USER_DATA_ONLY:
return !matched_guid.is_builtin();
case IntraprocessDeliveryType::INTRAPROCESS_OFF:
default:
break;
}
return false;
}
} // namespace rtps
} // namespace fastrtps
} // namespace eprosima
| 1 | 20,499 | I think these are not necessary | eProsima-Fast-DDS | cpp |
@@ -476,7 +476,8 @@ Cross-TU analysis. By default, no CTU analysis is run when
action='store_true',
dest='ctu_reanalyze_on_failure',
default=argparse.SUPPRESS,
- help="If Cross-TU analysis is enabled and fails "
+ help="DEPRECATED. The flag will be removed. "
+ "If Cross-TU analysis is enabled and fails "
"for some reason, try to re analyze the "
"same translation unit without "
"Cross-TU enabled.") | 1 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Execute analysis over an already existing build.json compilation database.
"""
import argparse
import collections
import json
import os
import re
import shutil
import sys
from codechecker_analyzer import analyzer, analyzer_context, arg, env
from codechecker_analyzer.analyzers import analyzer_types
from codechecker_analyzer.arg import OrderedCheckersAction
from codechecker_analyzer.buildlog import log_parser
from codechecker_common import arg, logger, skiplist_handler
from codechecker_common.util import load_json_or_empty
LOG = logger.get_logger('system')
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker analyze',
'formatter_class': arg.RawDescriptionDefaultHelpFormatter,
# Description is shown when the command's help is queried directly
'description': """
Use the previously created JSON Compilation Database to perform an analysis on
the project, outputting analysis results in a machine-readable format.""",
# Epilogue is shown after the arguments when the help is queried
# directly.
'epilog': """
environment variables:
CC_ANALYZERS_FROM_PATH Set to `yes` or `1` to enforce taking the analyzers
from the `PATH` instead of the given binaries.
CC_CLANGSA_PLUGIN_DIR If the CC_ANALYZERS_FROM_PATH environment variable
is set you can configure the plugin directory of the
Clang Static Analyzer by using this environment
variable.
issue hashes:
- By default the issue hash calculation method for 'Clang Static Analyzer' is
context sensitive. It means the hash will be generated based on the following
information:
* signature of the enclosing function declaration, type declaration or
namespace.
* content of the line where the bug is.
* unique name of the checker.
* position (column) within the line.
- By default the issue hash calculation method for 'Clang Tidy' is context
insensitive. It means the hash will be generated based on the following
information:
* 'file name' from the main diag section.
* 'checker name'.
* 'checker message'.
* 'line content' from the source file if can be read up.
* 'column numbers' from the main diag section.
* 'range column numbers' only from the control diag sections if column number
in the range is not the same as the previous control diag section number in
the bug path. If there are no control sections event section column numbers
are used.
- context-free: there was a bug and for Clang Tidy the default hash was
generated and not the context free hash (kept for backward compatibility). Use
'context-free-v2' instead of this.
- context-free-v2:
* 'file name' from the main diag section.
* 'checker message'.
* 'line content' from the source file if can be read up. All the whitespaces
from the source content are removed.
* 'column numbers' from the main diag sections location.
OUR RECOMMENDATION: we recommend you to use 'context-free-v2' hash because the
hash will not be changed so easily for example on code indentation or when a
checker is renamed.
Compilation databases can be created by instrumenting your project's build via
'CodeChecker log'. To transform the results of the analysis to a human-friendly
format, please see the commands 'CodeChecker parse' or 'CodeChecker store'.""",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Execute the supported code analyzers for the files "
"recorded in a JSON Compilation Database."
}
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('logfile',
type=str,
help="Path to the JSON compilation command database "
"files which were created during the build. "
"The analyzers will check only the files "
"registered in these build databases.")
parser.add_argument('-j', '--jobs',
type=int,
dest="jobs",
required=False,
default=1,
help="Number of threads to use in analysis. More "
"threads mean faster analysis at the cost of "
"using more memory.")
skip_mode = parser.add_mutually_exclusive_group()
skip_mode.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. "
"Please consult the User guide on how a "
"Skipfile should be laid out.")
skip_mode.add_argument('--file',
nargs='+',
dest="files",
metavar='FILE',
required=False,
default=argparse.SUPPRESS,
help="Analyze only the given file(s) not the whole "
"compilation database. Absolute directory "
"paths should start with '/', relative "
"directory paths should start with '*' and "
"it can contain path glob pattern. "
"Example: '/path/to/main.cpp', 'lib/*.cpp', "
"*/test*'.")
parser.add_argument('-o', '--output',
dest="output_path",
required=True,
default=argparse.SUPPRESS,
help="Store the analysis output in the given folder.")
parser.add_argument('--compiler-info-file',
dest="compiler_info_file",
required=False,
default=argparse.SUPPRESS,
help="Read the compiler includes and target from the "
"specified file rather than invoke the compiler "
"executable.")
parser.add_argument('--keep-gcc-include-fixed',
dest="keep_gcc_include_fixed",
required=False,
action='store_true',
default=False,
help="There are some implicit include paths which are "
"only used by GCC (include-fixed). This flag "
"determines whether these should be kept among "
"the implicit include paths.")
parser.add_argument('--keep-gcc-intrin',
dest="keep_gcc_intrin",
required=False,
action='store_true',
default=False,
help="There are some implicit include paths which "
"contain GCC-specific header files (those "
"which end with intrin.h). This flag determines "
"whether these should be kept among the implicit "
"include paths. Use this flag if Clang analysis "
"fails with error message related to __builtin "
"symbols.")
parser.add_argument('-t', '--type', '--output-format',
dest="output_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results should "
"use.")
parser.add_argument('--makefile',
dest="makefile",
required=False,
action='store_true',
default=False,
help="Generate a Makefile in the given output "
"directory from the analyzer commands and do not "
"execute the analysis. The analysis can be "
"executed by calling the make command like "
"'make -f output_dir/Makefile'. You can ignore "
"errors with the -i/--ignore-errors options: "
"'make -f output_dir/Makefile -i'.")
parser.add_argument('-q', '--quiet',
dest="quiet",
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Do not print the output or error of the "
"analyzers to the standard output of "
"CodeChecker.")
parser.add_argument('-c', '--clean',
dest="clean",
required=False,
action='store_true',
default=argparse.SUPPRESS,
help="Delete analysis reports stored in the output "
"directory. (By default, CodeChecker would keep "
"reports and overwrites only those files that "
"were update by the current build command).")
parser.add_argument('--compile-uniqueing',
type=str,
dest="compile_uniqueing",
default="none",
required=False,
help="Specify the method the compilation "
"actions in the compilation database are "
"uniqued before analysis. "
"CTU analysis works properly only if "
"there is exactly one "
"compilation action per source file. "
"none(default in non CTU mode): "
"no uniqueing is done. "
"strict: no uniqueing is done, "
"and an error is given if "
"there is more than one compilation "
"action for a source file. "
"alpha(default in CTU mode): If there is more "
"than one compilation action for a source "
"file, only the one is kept that belongs to the "
"alphabetically first "
"compilation target. "
"If none of the above given, "
"this parameter should "
"be a python regular expression."
"If there is more than one compilation action "
"for a source, "
"only the one is kept which matches the "
"given python regex. If more than one "
"matches an error is given. "
"The whole compilation "
"action text is searched for match.")
parser.add_argument('--report-hash',
dest="report_hash",
default=argparse.SUPPRESS,
required=False,
choices=['context-free', 'context-free-v2'],
help="R|Specify the hash calculation method for "
"reports. By default the calculation method for "
"Clang Static Analyzer is context sensitive and "
"for Clang Tidy it is context insensitive.\n"
"You can use the following calculation methods:\n"
"- context-free: there was a bug and for Clang "
"Tidy not the context free hash was generated "
"(kept for backward compatibility).\n"
"- context-free-v2: context free hash is used "
"for ClangSA and Clang Tidy.\n"
"See the 'issue hashes' section of the help "
"message of this command below for more "
"information.\n"
"USE WISELY AND AT YOUR OWN RISK!")
parser.add_argument('-n', '--name',
dest="name",
required=False,
default=argparse.SUPPRESS,
help="Annotate the run analysis with a custom name in "
"the created metadata file.")
analyzer_opts = parser.add_argument_group("analyzer arguments")
analyzer_opts.add_argument('--analyzers',
nargs='+',
dest='analyzers',
metavar='ANALYZER',
required=False,
choices=analyzer_types.supported_analyzers,
default=argparse.SUPPRESS,
help="Run analysis only with the analyzers "
"specified. Currently supported analyzers "
"are: " +
', '.join(analyzer_types.
supported_analyzers) + ".")
analyzer_opts.add_argument('--capture-analysis-output',
dest='capture_analysis_output',
action='store_true',
default=argparse.SUPPRESS,
required=False,
help="Store standard output and standard error "
"of successful analyzer invocations "
"into the '<OUTPUT_DIR>/success' "
"directory.")
analyzer_opts.add_argument('--config',
dest='config_file',
required=False,
help="R|Allow the configuration from an "
"explicit JSON based configuration file. "
"The value of the 'analyzer' key in the "
"config file will be emplaced as command "
"line arguments. The format of "
"configuration file is:\n"
"{\n"
" \"analyzer\": [\n"
" \"--enable=core.DivideZero\",\n"
" \"--enable=core.CallAndMessage\",\n"
" \"--report-hash=context-free-v2\",\n"
" \"--verbose=debug\",\n"
" \"--clean\"\n"
" ]\n"
"}")
analyzer_opts.add_argument('--saargs',
dest="clangsa_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for the Clang Static "
"Analyzer.")
analyzer_opts.add_argument('--tidyargs',
dest="tidy_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for Clang-Tidy.")
analyzer_opts.add_argument('--tidy-config',
dest='tidy_config',
required=False,
default=argparse.SUPPRESS,
help="A file in YAML format containing the "
"configuration of clang-tidy checkers. "
"The file can be dumped by "
"'CodeChecker analyzers --dump-config "
"clang-tidy' command.")
analyzer_opts.add_argument('--analyzer-config',
dest='analyzer_config',
nargs='*',
default=["clang-tidy:HeaderFilterRegex=.*"],
help="Analyzer configuration options in the "
"following format: analyzer:key=value. "
"The collection of the options can be "
"printed with "
"'CodeChecker analyzers "
"--analyzer-config'. To disable the "
"default behaviour of this option you can "
"use the "
"'clang-tidy:take-config-from-directory="
"true' option.")
analyzer_opts.add_argument('--checker-config',
dest='checker_config',
nargs='*',
default=argparse.SUPPRESS,
help="Checker configuration options in the "
"following format: analyzer:key=value. "
"The collection of the options can be "
"printed with "
"'CodeChecker checkers --checker-config'.")
analyzer_opts.add_argument('--timeout',
type=int,
dest='timeout',
required=False,
default=argparse.SUPPRESS,
help="The amount of time (in seconds) that "
"each analyzer can spend, individually, "
"to analyze the project. If the analysis "
"of a particular file takes longer than "
"this time, the analyzer is killed and "
"the analysis is considered as a failed "
"one.")
context = analyzer_context.get_context()
clang_has_z3 = analyzer_types.is_z3_capable(context)
if clang_has_z3:
analyzer_opts.add_argument('--z3',
dest='enable_z3',
choices=['on', 'off'],
default='off',
help="Enable the z3 solver backend. This "
"allows reasoning over more complex "
"queries, but performance is worse "
"than the default range-based "
"constraint solver.")
clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context)
if clang_has_z3_refutation:
analyzer_opts.add_argument('--z3-refutation',
dest='enable_z3_refutation',
choices=['on', 'off'],
default='on' if clang_has_z3_refutation
else 'off',
help="Switch on/off the Z3 SMT Solver "
"backend to "
"reduce false positives. The results "
"of the ranged based constraint "
"solver in the Clang Static Analyzer "
"will be cross checked with the Z3 "
"SMT solver. This should not cause "
"that much of a slowdown compared to "
"using the Z3 solver only.")
if analyzer_types.is_ctu_capable(context):
ctu_opts = parser.add_argument_group(
"cross translation unit analysis arguments",
"""
These arguments are only available if the Clang Static Analyzer supports
Cross-TU analysis. By default, no CTU analysis is run when
'CodeChecker analyze' is called.""")
ctu_modes = ctu_opts.add_mutually_exclusive_group()
ctu_modes.add_argument('--ctu', '--ctu-all',
action='store_const',
const=[True, True],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform Cross Translation Unit (CTU) "
"analysis, both 'collect' and 'analyze' "
"phases. In this mode, the extra files "
"created by 'collect' are cleaned up "
"after the analysis.")
ctu_modes.add_argument('--ctu-collect',
action='store_const',
const=[True, False],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform the first, 'collect' phase of "
"Cross-TU analysis. This phase generates "
"extra files needed by CTU analysis, and "
"puts them into '<OUTPUT_DIR>/ctu-dir'. "
"NOTE: If this argument is present, "
"CodeChecker will NOT execute the "
"analyzers!")
ctu_modes.add_argument('--ctu-analyze',
action='store_const',
const=[False, True],
dest='ctu_phases',
default=argparse.SUPPRESS,
help="Perform the second, 'analyze' phase of "
"Cross-TU analysis, using already "
"available extra files in "
"'<OUTPUT_DIR>/ctu-dir'. (These files "
"will not be cleaned up in this mode.)")
ctu_opts.add_argument('--ctu-reanalyze-on-failure',
action='store_true',
dest='ctu_reanalyze_on_failure',
default=argparse.SUPPRESS,
help="If Cross-TU analysis is enabled and fails "
"for some reason, try to re analyze the "
"same translation unit without "
"Cross-TU enabled.")
if analyzer_types.is_statistics_capable(context):
stat_opts = parser.add_argument_group(
"Statistics analysis feature arguments",
"""
These arguments are only available if the Clang Static Analyzer supports
Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck,
statisticsCollector.SpecialReturnValue checkers are available).""")
stat_opts.add_argument('--stats-collect', '--stats-collect',
action='store',
default=argparse.SUPPRESS,
dest='stats_output',
help="Perform the first, 'collect' phase of "
"Statistical analysis. This phase "
"generates extra files needed by "
"statistics analysis, and "
"puts them into "
"'<STATS_OUTPUT>'."
" NOTE: If this argument is present, "
"CodeChecker will NOT execute the "
"analyzers!")
stat_opts.add_argument('--stats-use', '--stats-use',
action='store',
default=argparse.SUPPRESS,
dest='stats_dir',
help="Use the previously generated statistics "
"results for the analysis from the given "
"'<STATS_DIR>'.")
stat_opts.add_argument('--stats',
action='store_true',
default=argparse.SUPPRESS,
dest='stats_enabled',
help="Perform both phases of "
"Statistical analysis. This phase "
"generates extra files needed by "
"statistics analysis and enables "
"the statistical checkers. "
"No need to enable them explicitly.")
stat_opts.add_argument('--stats-min-sample-count',
action='store',
default="10",
type=int,
dest='stats_min_sample_count',
help="Minimum number of samples (function call"
" occurrences) to be collected"
" for a statistics to be relevant "
"'<MIN-SAMPLE-COUNT>'.")
stat_opts.add_argument('--stats-relevance-threshold',
action='store',
default="0.85",
type=float,
dest='stats_relevance_threshold',
help="The minimum ratio of calls of function "
"f that must have a certain property "
"property to consider it true for that "
"function (calculated as calls "
"with a property/all calls)."
" CodeChecker will warn for"
" calls of f do not have that property."
"'<RELEVANCE_THRESHOLD>'.")
checkers_opts = parser.add_argument_group(
"checker configuration",
"""
Checkers
------------------------------------------------
The analyzer performs checks that are categorized into families or "checkers".
See 'CodeChecker checkers' for the list of available checkers. You can
fine-tune which checkers to use in the analysis by setting the enabled and
disabled flags starting from the bigger groups and going inwards, e.g.
'-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every
'core' checker, but only 'core.uninitialized.Assign' from the
'core.uninitialized' group. Please consult the manual for details. Disabling
certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang
community, and thus discouraged.
Compiler warnings and errors
------------------------------------------------
Compiler warnings are diagnostic messages that report constructions that are
not inherently erroneous but that are risky or suggest there may have been an
error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g.
Clang warning controlled by '-Wliteral-conversion' will be reported with check
name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to
use in the analysis by setting the enabled and disabled flags starting from the
bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will
enable every 'unused' warnings except 'unused-parameter'. These flags should
start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.:
'-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and
'-Wextra' warnings are enabled. For more information see:
https://clang.llvm.org/docs/DiagnosticsReference.html.
Sometimes GCC is more permissive than Clang, so it is possible that a specific
construction doesn't compile with Clang but compiles with GCC. These
compiler errors are also collected as CodeChecker reports as
'clang-diagnostic-error'.
Note that compiler errors and warnings are captured by CodeChecker only if it
was emitted by clang-tidy.""")
checkers_opts.add_argument('-e', '--enable',
dest="enable",
metavar='checker/group/profile',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group) "
"to BE USED in the analysis.")
checkers_opts.add_argument('-d', '--disable',
dest="disable",
metavar='checker/group/profile',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group) "
"to BE PROHIBITED from use in the "
"analysis.")
checkers_opts.add_argument('--enable-all',
dest="enable_all",
action='store_true',
required=False,
default=argparse.SUPPRESS,
help="Force the running analyzers to use "
"almost every checker available. The "
"checker groups 'alpha.', 'debug.' and "
"'osx.' (on Linux) are NOT enabled "
"automatically and must be EXPLICITLY "
"specified. WARNING! Enabling all "
"checkers might result in the analysis "
"losing precision and stability, and "
"could even result in a total failure of "
"the analysis. USE WISELY AND AT YOUR "
"OWN RISK!")
logger.add_verbose_arguments(parser)
parser.set_defaults(func=main,
func_process_config_file=process_config_file)
def process_config_file(args):
"""
Handler to get config file options.
"""
if args.config_file and os.path.exists(args.config_file):
cfg = load_json_or_empty(args.config_file, default={})
return cfg.get('analyzer', [])
def check_config_file(args):
"""
LOG and check about the config file usage.
If a config file is set but does not exist the program will
exit.
LOG is not initialized in the process_config_file function yet
so we can not log the usage there. Using print will
always print out the config file data which can mess up the
tests depending on the output.
"""
if args.config_file and not os.path.exists(args.config_file):
LOG.error("Configuration file '%s' does not exist.",
args.config_file)
sys.exit(1)
elif not args.config_file:
return
cfg = load_json_or_empty(args.config_file, default={})
if cfg.get("enabled"):
LOG.debug("Using config file: '%s'.", args.config_file)
return cfg.get('analyzer', [])
LOG.debug("Config file '%s' is available but disabled.", args.config_file)
def __get_skip_handler(args):
"""
Initialize and return a skiplist handler if
there is a skip list file in the arguments or files options is provided.
"""
skip_file_content = ""
if 'files' in args:
# Creates a skip file where all source files will be skipped except
# the given source files and all the header files.
skip_files = ['+{0}'.format(f) for f in args.files]
skip_files.extend(['+/*.h', '+/*.H', '+/*.tcc'])
skip_files.append('-*')
skip_file_content = "\n".join(skip_files)
elif 'skipfile' in args:
with open(args.skipfile,
encoding="utf-8", errors="ignore") as skip_file:
skip_file_content = skip_file.read()
if skip_file_content:
LOG.debug_analyzer("Creating skiplist handler.")
return skiplist_handler.SkipListHandler(skip_file_content)
def __update_skip_file(args):
"""
Remove previous skip file if there was any.
"""
skip_file_to_send = os.path.join(args.output_path, 'skip_file')
if os.path.exists(skip_file_to_send):
os.remove(skip_file_to_send)
if 'skipfile' in args:
LOG.debug("Copying skip file %s to %s",
args.skipfile, skip_file_to_send)
shutil.copyfile(args.skipfile, skip_file_to_send)
def __cleanup_metadata(metadata_prev, metadata):
""" Cleanup metadata.
If the source file for a plist does not exist, remove the plist from the
system and from the metadata.
"""
if not metadata_prev:
return
result_src_files = __get_result_source_files(metadata_prev)
for plist_file, source_file in result_src_files.items():
if not os.path.exists(source_file):
try:
LOG.info("Remove plist file '%s' because it refers to a "
"source file ('%s') which was removed.",
plist_file, source_file)
del metadata['result_source_files'][plist_file]
os.remove(plist_file)
except OSError:
LOG.warning("Failed to remove plist file: %s", plist_file)
def __get_result_source_files(metadata):
""" Get result source files from the given metadata. """
if 'result_source_files' in metadata:
return metadata['result_source_files']
result_src_files = {}
for tool in metadata.get('tools', {}):
r_src_files = tool.get('result_source_files', {})
result_src_files.update(r_src_files.items())
return result_src_files
def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
readable format.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
check_config_file(args)
if not os.path.exists(args.logfile):
LOG.error("The specified logfile '%s' does not exist!", args.logfile)
sys.exit(1)
args.output_path = os.path.abspath(args.output_path)
if os.path.exists(args.output_path) and \
not os.path.isdir(args.output_path):
LOG.error("The given output path is not a directory: " +
args.output_path)
sys.exit(1)
if 'enable_all' in args:
LOG.info("'--enable-all' was supplied for this analysis.")
# We clear the output directory in the following cases.
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
if 'ctu_phases' in args and args.ctu_phases[0] and \
os.path.isdir(ctu_dir):
# Clear the CTU-dir if the user turned on the collection phase.
LOG.debug("Previous CTU contents have been deleted.")
shutil.rmtree(ctu_dir)
if 'clean' in args and os.path.isdir(args.output_path):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current result", args.output_path)
shutil.rmtree(args.output_path)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
LOG.debug("args: " + str(args))
LOG.debug("Output will be stored to: '" + args.output_path + "'")
config_option_re = re.compile(r'^({}):.+=.+$'.format(
'|'.join(analyzer_types.supported_analyzers)))
# Check the format of analyzer options.
if 'analyzer_config' in args:
for config in args.analyzer_config:
if not re.match(config_option_re, config):
LOG.error("Analyzer option in wrong format: %s", config)
sys.exit(1)
# Check the format of checker options.
if 'checker_config' in args:
for config in args.checker_config:
if not re.match(config_option_re, config):
LOG.error("Checker option in wrong format: %s", config)
sys.exit(1)
# Process the skip list if present.
skip_handler = __get_skip_handler(args)
# Enable alpha uniqueing by default if ctu analysis is used.
if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
args.compile_uniqueing = "alpha"
compiler_info_file = None
if 'compiler_info_file' in args:
LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
if not os.path.exists(args.compiler_info_file):
LOG.error("Compiler info file %s does not exist",
args.compiler_info_file)
sys.exit(1)
compiler_info_file = args.compiler_info_file
ctu_or_stats_enabled = False
# Skip list is applied only in pre-analysis
# if --ctu-collect was called explicitly.
pre_analysis_skip_handler = None
if 'ctu_phases' in args:
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if ctu_collect and not ctu_analyze:
pre_analysis_skip_handler = skip_handler
if ctu_collect or ctu_analyze:
ctu_or_stats_enabled = True
# Skip list is applied only in pre-analysis
# if --stats-collect was called explicitly.
if 'stats_output' in args and args.stats_output:
pre_analysis_skip_handler = skip_handler
ctu_or_stats_enabled = True
if 'stats_enabled' in args and args.stats_enabled:
ctu_or_stats_enabled = True
context = analyzer_context.get_context()
analyzer_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
compile_commands = load_json_or_empty(args.logfile, default={})
# Number of all the compilation commands in the parsed log files,
# logged by the logger.
all_cmp_cmd_count = len(compile_commands)
actions, skipped_cmp_cmd_count = log_parser.parse_unique_log(
compile_commands,
args.output_path,
args.compile_uniqueing,
compiler_info_file,
args.keep_gcc_include_fixed,
args.keep_gcc_intrin,
skip_handler,
pre_analysis_skip_handler,
ctu_or_stats_enabled,
analyzer_env)
if not actions:
LOG.info("No analysis is required.\nThere were no compilation "
"commands in the provided compilation database or "
"all of them were skipped.")
sys.exit(0)
uniqued_compilation_db_file = os.path.join(
args.output_path, "unique_compile_commands.json")
with open(uniqued_compilation_db_file, 'w',
encoding="utf-8", errors="ignore") as f:
json.dump(actions, f,
cls=log_parser.CompileCommandEncoder)
metadata = {
'version': 2,
'tools': [{
'name': 'codechecker',
'action_num': len(actions),
'command': sys.argv,
'version': "{0} ({1})".format(context.package_git_tag,
context.package_git_hash),
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {},
'analyzers': {}
}]}
metadata_tool = metadata['tools'][0]
if 'name' in args:
metadata_tool['run_name'] = args.name
# Update metadata dictionary with old values.
metadata_file = os.path.join(args.output_path, 'metadata.json')
metadata_prev = None
if os.path.exists(metadata_file):
metadata_prev = load_json_or_empty(metadata_file)
metadata_tool['result_source_files'] = \
__get_result_source_files(metadata_prev)
CompileCmdParseCount = \
collections.namedtuple('CompileCmdParseCount',
'total, analyze, skipped, removed_by_uniqueing')
cmp_cmd_to_be_uniqued = all_cmp_cmd_count - skipped_cmp_cmd_count
# Number of compile commands removed during uniqueing.
removed_during_uniqueing = cmp_cmd_to_be_uniqued - len(actions)
all_to_be_analyzed = cmp_cmd_to_be_uniqued - removed_during_uniqueing
compile_cmd_count = CompileCmdParseCount(
total=all_cmp_cmd_count,
analyze=all_to_be_analyzed,
skipped=skipped_cmp_cmd_count,
removed_by_uniqueing=removed_during_uniqueing)
LOG.debug_analyzer("Total number of compile commands without "
"skipping or uniqueing: %d", compile_cmd_count.total)
LOG.debug_analyzer("Compile commands removed by uniqueing: %d",
compile_cmd_count.removed_by_uniqueing)
LOG.debug_analyzer("Compile commands skipped during log processing: %d",
compile_cmd_count.skipped)
LOG.debug_analyzer("Compile commands forwarded for analysis: %d",
compile_cmd_count.analyze)
analyzer.perform_analysis(args, skip_handler, context, actions,
metadata_tool,
compile_cmd_count)
__update_skip_file(args)
__cleanup_metadata(metadata_prev, metadata)
LOG.debug("Analysis metadata write to '%s'", metadata_file)
with open(metadata_file, 'w',
encoding="utf-8", errors="ignore") as metafile:
json.dump(metadata, metafile)
# WARN: store command will search for this file!!!!
compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
try:
source = os.path.abspath(args.logfile)
target = os.path.abspath(compile_cmd_json)
if source != target:
shutil.copyfile(source, target)
except shutil.Error:
LOG.debug("Compilation database JSON file is the same.")
except Exception:
LOG.debug("Copying compilation database JSON file failed.")
try:
# pylint: disable=no-name-in-module
from codechecker_analyzer import analyzer_statistics
analyzer_statistics.collect(metadata, "analyze")
except Exception:
pass
| 1 | 12,119 | Please update the user guide too. | Ericsson-codechecker | c |
@@ -459,8 +459,9 @@ class Folio extends AbstractAPI implements
foreach ($this->getPagedResults(
'locations', '/locations'
) as $location) {
- $locationMap[$location->id]
- = $location->discoveryDisplayName ?? $location->name;
+ $name = $location->discoveryDisplayName ?? $location->name;
+ $code = $location->code;
+ $locationMap[$location->id] = [$name, $code];
}
}
$this->putCachedData($cacheKey, $locationMap); | 1 | <?php
/**
* FOLIO REST API driver
*
* PHP version 7
*
* Copyright (C) Villanova University 2018.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package ILS_Drivers
* @author Chris Hallberg <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
namespace VuFind\ILS\Driver;
use Exception;
use VuFind\Exception\ILS as ILSException;
use VuFind\I18n\Translator\TranslatorAwareInterface;
use VuFindHttp\HttpServiceAwareInterface as HttpServiceAwareInterface;
/**
* FOLIO REST API driver
*
* @category VuFind
* @package ILS_Drivers
* @author Chris Hallberg <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
class Folio extends AbstractAPI implements
HttpServiceAwareInterface, TranslatorAwareInterface
{
use \VuFindHttp\HttpServiceAwareTrait;
use \VuFind\I18n\Translator\TranslatorAwareTrait;
use \VuFind\Log\LoggerAwareTrait {
logWarning as warning;
logError as error;
}
use CacheTrait {
getCacheKey as protected getBaseCacheKey;
}
/**
* Authentication tenant (X-Okapi-Tenant)
*
* @var string
*/
protected $tenant = null;
/**
* Authentication token (X-Okapi-Token)
*
* @var string
*/
protected $token = null;
/**
* Factory function for constructing the SessionContainer.
*
* @var callable
*/
protected $sessionFactory;
/**
* Session cache
*
* @var \Laminas\Session\Container
*/
protected $sessionCache;
/**
* Date converter
*
* @var \VuFind\Date\Converter
*/
protected $dateConverter;
/**
* Constructor
*
* @param \VuFind\Date\Converter $dateConverter Date converter object
* @param callable $sessionFactory Factory function returning
* SessionContainer object
*/
public function __construct(\VuFind\Date\Converter $dateConverter,
$sessionFactory
) {
$this->dateConverter = $dateConverter;
$this->sessionFactory = $sessionFactory;
}
/**
* Set the configuration for the driver.
*
* @param array $config Configuration array (usually loaded from a VuFind .ini
* file whose name corresponds with the driver class name).
*
* @throws ILSException if base url excluded
* @return void
*/
public function setConfig($config)
{
parent::setConfig($config);
$this->tenant = $this->config['API']['tenant'];
}
/**
* Get the type of FOLIO ID used to match up with VuFind's bib IDs.
*
* @return string
*/
protected function getBibIdType()
{
// Normalize string to tolerate minor variations in config file:
return trim(strtolower($this->config['IDs']['type'] ?? 'instance'));
}
/**
* Function that obscures and logs debug data
*
* @param string $method Request method
* (GET/POST/PUT/DELETE/etc.)
* @param string $path Request URL
* @param array $params Request parameters
* @param \Laminas\Http\Headers $req_headers Headers object
*
* @return void
*/
protected function debugRequest($method, $path, $params, $req_headers)
{
// Only log non-GET requests
if ($method == 'GET') {
return;
}
// remove passwords
$logParams = $params;
if (isset($logParams['password'])) {
unset($logParams['password']);
}
// truncate headers for token obscuring
$logHeaders = $req_headers->toArray();
if (isset($logHeaders['X-Okapi-Token'])) {
$logHeaders['X-Okapi-Token'] = substr(
$logHeaders['X-Okapi-Token'], 0, 30
) . '...';
}
$this->debug(
$method . ' request.' .
' URL: ' . $path . '.' .
' Params: ' . print_r($logParams, true) . '.' .
' Headers: ' . print_r($logHeaders, true)
);
}
/**
* Add instance-specific context to a cache key suffix (to ensure that
* multiple drivers don't accidentally share values in the cache.
*
* @param string $key Cache key suffix
*
* @return string
*/
protected function getCacheKey($key = null)
{
// Override the base class formatting with FOLIO-specific details
// to ensure proper caching in a MultiBackend environment.
return 'FOLIO-'
. md5("{$this->tenant}|$key");
}
/**
* (From AbstractAPI) Allow default corrections to all requests
*
* Add X-Okapi headers and Content-Type to every request
*
* @param \Laminas\Http\Headers $headers the request headers
* @param object $params the parameters object
*
* @return array
*/
public function preRequest(\Laminas\Http\Headers $headers, $params)
{
$headers->addHeaderLine('Accept', 'application/json');
if (!$headers->has('Content-Type')) {
$headers->addHeaderLine('Content-Type', 'application/json');
}
$headers->addHeaderLine('X-Okapi-Tenant', $this->tenant);
if ($this->token != null) {
$headers->addHeaderLine('X-Okapi-Token', $this->token);
}
return [$headers, $params];
}
/**
* Login and receive a new token
*
* @return void
*/
protected function renewTenantToken()
{
$this->token = null;
$auth = [
'username' => $this->config['API']['username'],
'password' => $this->config['API']['password'],
];
$response = $this->makeRequest('POST', '/authn/login', json_encode($auth));
if ($response->getStatusCode() >= 400) {
throw new ILSException($response->getBody());
}
$this->token = $response->getHeaders()->get('X-Okapi-Token')
->getFieldValue();
$this->sessionCache->folio_token = $this->token;
$this->debug(
'Token renewed. Tenant: ' . $auth['username'] .
' Token: ' . substr($this->token, 0, 30) . '...'
);
}
/**
* Check if our token is still valid
*
* Method taken from Stripes JS (loginServices.js:validateUser)
*
* @return void
*/
protected function checkTenantToken()
{
$response = $this->makeRequest('GET', '/users');
if ($response->getStatusCode() >= 400) {
$this->token = null;
$this->renewTenantToken();
}
}
/**
* Initialize the driver.
*
* Check or renew our auth token
*
* @return void
*/
public function init()
{
$factory = $this->sessionFactory;
$this->sessionCache = $factory($this->tenant);
if ($this->sessionCache->folio_token ?? false) {
$this->token = $this->sessionCache->folio_token;
$this->debug(
'Token taken from cache: ' . substr($this->token, 0, 30) . '...'
);
}
if ($this->token == null) {
$this->renewTenantToken();
} else {
$this->checkTenantToken();
}
}
/**
* Given some kind of identifier (instance, holding or item), retrieve the
* associated instance object from FOLIO.
*
* @param string $instanceId Instance ID, if available.
* @param string $holdingId Holding ID, if available.
* @param string $itemId Item ID, if available.
*
* @return object
*/
protected function getInstanceById($instanceId = null, $holdingId = null,
$itemId = null
) {
if ($instanceId == null) {
if ($holdingId == null) {
if ($itemId == null) {
throw new \Exception('No IDs provided to getInstanceObject.');
}
$response = $this->makeRequest(
'GET',
'/item-storage/items/' . $itemId
);
$item = json_decode($response->getBody());
$holdingId = $item->holdingsRecordId;
}
$response = $this->makeRequest(
'GET', '/holdings-storage/holdings/' . $holdingId
);
$holding = json_decode($response->getBody());
$instanceId = $holding->instanceId;
}
$response = $this->makeRequest(
'GET', '/inventory/instances/' . $instanceId
);
return json_decode($response->getBody());
}
/**
* Given an instance object or identifer, or a holding or item identifier,
* determine an appropriate value to use as VuFind's bibliographic ID.
*
* @param string $instanceOrInstanceId Instance object or ID (will be looked up
* using holding or item ID if not provided)
* @param string $holdingId Holding-level id (optional)
* @param string $itemId Item-level id (optional)
*
* @return string Appropriate bib id retrieved from FOLIO identifiers
*/
protected function getBibId($instanceOrInstanceId = null, $holdingId = null,
$itemId = null
) {
$idType = $this->getBibIdType();
// Special case: if we're using instance IDs and we already have one,
// short-circuit the lookup process:
if ($idType === 'instance' && is_string($instanceOrInstanceId)) {
return $instanceOrInstanceId;
}
$instance = is_object($instanceOrInstanceId)
? $instanceOrInstanceId
: $this->getInstanceById($instanceOrInstanceId, $holdingId, $itemId);
switch ($idType) {
case 'hrid':
return $instance->hrid;
case 'instance':
return $instance->id;
}
throw new \Exception('Unsupported ID type: ' . $idType);
}
/**
* Escape a string for use in a CQL query.
*
* @param string $in Input string
*
* @return string
*/
protected function escapeCql($in)
{
return str_replace('"', '\"', str_replace('&', '%26', $in));
}
/**
* Retrieve FOLIO instance using VuFind's chosen bibliographic identifier.
*
* @param string $bibId Bib-level id
*
* @return array
*/
protected function getInstanceByBibId($bibId)
{
// Figure out which ID type to use in the CQL query; if the user configured
// instance IDs, use the 'id' field, otherwise pass the setting through
// directly:
$idType = $this->getBibIdType();
$idField = $idType === 'instance' ? 'id' : $idType;
$query = [
'query' => '(' . $idField . '=="' . $this->escapeCql($bibId) . '")'
];
$response = $this->makeRequest('GET', '/instance-storage/instances', $query);
$instances = json_decode($response->getBody());
if (count($instances->instances) == 0) {
throw new ILSException("Item Not Found");
}
return $instances->instances[0];
}
/**
* Get raw object of item from inventory/items/
*
* @param string $itemId Item-level id
*
* @return array
*/
public function getStatus($itemId)
{
return $this->getHolding($itemId);
}
/**
* This method calls getStatus for an array of records or implement a bulk method
*
* @param array $idList Item-level ids
*
* @return array values from getStatus
*/
public function getStatuses($idList)
{
$status = [];
foreach ($idList as $id) {
$status[] = $this->getStatus($id);
}
return $status;
}
/**
* Retrieves renew, hold and cancel settings from the driver ini file.
*
* @param string $function The name of the feature to be checked
* @param array $params Optional feature-specific parameters (array)
*
* @return array An array with key-value pairs.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getConfig($function, $params = null)
{
return $this->config[$function] ?? false;
}
/**
* Check item location against list of configured locations
* where holds should be offered
*
* @param string $locationName locationName from getHolding
*
* @return bool
*/
protected function isHoldable($locationName)
{
return !in_array(
$locationName,
(array)($this->config['Holds']['excludeHoldLocations'] ?? [])
);
}
/**
* Gets locations from the /locations endpoint and sets
* an array of location IDs to display names.
* Display names are set from discoveryDisplayName, or name
* if discoveryDisplayName is not available.
*
* @return array
*/
protected function getLocations()
{
$cacheKey = 'locationMap';
$locationMap = $this->getCachedData($cacheKey);
if (null === $locationMap) {
$locationMap = [];
foreach ($this->getPagedResults(
'locations', '/locations'
) as $location) {
$locationMap[$location->id]
= $location->discoveryDisplayName ?? $location->name;
}
}
$this->putCachedData($cacheKey, $locationMap);
return $locationMap;
}
/**
* Get Inventory Location Name
*
* @param string $locationId UUID of item location
*
* @return string $locationName display name of location
*/
protected function getLocationName($locationId)
{
$locationMap = $this->getLocations();
$locationName = '';
if (array_key_exists($locationId, $locationMap)) {
$locationName = $locationMap[$locationId];
} else {
// if key is not found in cache, the location could have
// been added before the cache expired so check again
$locationResponse = $this->makeRequest(
'GET', '/locations/' . $locationId
);
if ($locationResponse->isSuccess()) {
$location = json_decode($locationResponse->getBody());
$locationName = $location->discoveryDisplayName ?? $location->name;
}
}
return $locationName;
}
/**
* This method queries the ILS for holding information.
*
* @param string $bibId Bib-level id
* @param array $patron Patron login information from $this->patronLogin
* @param array $options Extra options (not currently used)
*
* @return array An array of associative holding arrays
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getHolding($bibId, array $patron = null, array $options = [])
{
$instance = $this->getInstanceByBibId($bibId);
$query = [
'query' => '(instanceId=="' . $instance->id
. '" NOT discoverySuppress==true)'
];
$items = [];
foreach ($this->getPagedResults(
'holdingsRecords', '/holdings-storage/holdings', $query
) as $holding) {
$query = [
'query' => '(holdingsRecordId=="' . $holding->id
. '" NOT discoverySuppress==true)'
];
$notesFormatter = function ($note) {
return !($note->staffOnly ?? false)
&& !empty($note->note) ? $note->note : '';
};
$textFormatter = function ($supplement) {
$format = '%s %s';
$supStat = $supplement->statement;
$supNote = $supplement->note;
$statement = trim(sprintf($format, $supStat, $supNote));
return $statement ?? '';
};
$holdingNotes = array_filter(
array_map($notesFormatter, $holding->notes ?? [])
);
$hasHoldingNotes = !empty(implode($holdingNotes));
$holdingsStatements = array_map(
$textFormatter,
$holding->holdingsStatements ?? []
);
$holdingsSupplements = array_map(
$textFormatter,
$holding->holdingsStatementsForSupplements ?? []
);
$holdingsIndexes = array_map(
$textFormatter,
$holding->holdingsStatementsForIndexes ?? []
);
foreach ($this->getPagedResults(
'items', '/item-storage/items', $query
) as $item) {
$itemNotes = array_filter(
array_map($notesFormatter, $item->notes ?? [])
);
$locationId = $item->effectiveLocationId;
$locationName = $this->getLocationName($locationId);
$items[] = [
'id' => $bibId,
'item_id' => $item->id,
'holding_id' => $holding->id,
'number' => count($items) + 1,
'barcode' => $item->barcode ?? '',
'status' => $item->status->name,
'availability' => $item->status->name == 'Available',
'is_holdable' => $this->isHoldable($locationName),
'holdings_notes'=> $hasHoldingNotes ? $holdingNotes : null,
'item_notes' => !empty(implode($itemNotes)) ? $itemNotes : null,
'issues' => $holdingsStatements,
'supplements' => $holdingsSupplements,
'indexes' => $holdingsIndexes,
'callnumber' => $holding->callNumber ?? '',
'location' => $locationName,
'reserve' => 'TODO',
'addLink' => true
];
}
}
return $items;
}
/**
* Support method for patronLogin(): authenticate the patron with an Okapi
* login attempt. Returns a CQL query for retrieving more information about
* the authenticated user.
*
* @param string $username The patron username
* @param string $password The patron password
*
* @return string
*/
protected function patronLoginWithOkapi($username, $password)
{
$tenant = $this->config['API']['tenant'];
$credentials = compact('tenant', 'username', 'password');
// Get token
$response = $this->makeRequest(
'POST',
'/authn/login',
json_encode($credentials)
);
$debugMsg = 'User logged in. User: ' . $username . '.';
// We've authenticated the user with Okapi, but we only have their
// username; set up a query to retrieve full info below.
$query = 'username == ' . $username;
// Replace admin with user as tenant if configured to do so:
if ($this->config['User']['use_user_token'] ?? false) {
$this->token = $response->getHeaders()->get('X-Okapi-Token')
->getFieldValue();
$debugMsg .= ' Token: ' . substr($this->token, 0, 30) . '...';
}
$this->debug($debugMsg);
return $query;
}
/**
* Support method for patronLogin(): authenticate the patron with a CQL looup.
* Returns the CQL query for retrieving more information about the user.
*
* @param string $username The patron username
* @param string $password The patron password
*
* @return string
*/
protected function getUserWithCql($username, $password)
{
// Construct user query using barcode, username, etc.
$usernameField = $this->config['User']['username_field'] ?? 'username';
$passwordField = $this->config['User']['password_field'] ?? false;
$cql = $this->config['User']['cql']
?? '%%username_field%% == "%%username%%"'
. ($passwordField ? ' and %%password_field%% == "%%password%%"' : '');
$placeholders = [
'%%username_field%%',
'%%password_field%%',
'%%username%%',
'%%password%%',
];
$values = [
$usernameField,
$passwordField,
$this->escapeCql($username),
$this->escapeCql($password),
];
return str_replace($placeholders, $values, $cql);
}
/**
* Given a CQL query, fetch a single user; if we get an unexpected count, treat
* that as an unsuccessful login by returning null.
*
* @param string $query CQL query
*
* @return object
*/
protected function fetchUserWithCql($query)
{
$response = $this->makeRequest('GET', '/users', compact('query'));
$json = json_decode($response->getBody());
return count($json->users) === 1 ? $json->users[0] : null;
}
/**
* Helper function to retrieve paged results from FOLIO API
*
* @param string $responseKey Key containing values to collect in response
* @param string $interface FOLIO api interface to call
* @param array $query CQL query
*
* @return array
*/
protected function getPagedResults($responseKey, $interface, $query = [])
{
$count = 0;
$limit = 1000;
$offset = 0;
do {
$combinedQuery = array_merge($query, compact('offset', 'limit'));
$response = $this->makeRequest(
'GET',
$interface,
$combinedQuery
);
$json = json_decode($response->getBody());
if (!$response->isSuccess() || !$json) {
$msg = $json->errors[0]->message ?? json_last_error_msg();
throw new ILSException($msg);
}
$total = $json->totalRecords ?? 0;
$previousCount = $count;
foreach ($json->$responseKey ?? [] as $item) {
$count++;
if ($count % $limit == 0) {
$offset += $limit;
}
yield $item ?? '';
}
// Continue until the count reaches the total records
// found, if count does not increase, something has gone
// wrong. Stop so we don't loop forever.
} while ($count < $total && $previousCount != $count);
}
/**
* Patron Login
*
* This is responsible for authenticating a patron against the catalog.
*
* @param string $username The patron username
* @param string $password The patron password
*
* @return mixed Associative array of patron info on successful login,
* null on unsuccessful login.
*/
public function patronLogin($username, $password)
{
$doOkapiLogin = $this->config['User']['okapi_login'] ?? false;
$usernameField = $this->config['User']['username_field'] ?? 'username';
// If the username field is not the default 'username' we will need to
// do a lookup to find the correct username value for Okapi login. We also
// need to do this lookup if we're skipping Okapi login entirely.
if (!$doOkapiLogin || $usernameField !== 'username') {
$query = $this->getUserWithCql($username, $password);
$profile = $this->fetchUserWithCql($query);
if ($profile === null) {
return null;
}
}
// If we need to do an Okapi login, we have the information we need to do
// it at this point.
if ($doOkapiLogin) {
try {
// If we fetched the profile earlier, we want to use the username
// from there; otherwise, we'll use the passed-in version.
$query = $this->patronLoginWithOkapi(
$profile->username ?? $username,
$password
);
} catch (Exception $e) {
return null;
}
// If we didn't load a profile earlier, we should do so now:
if (!isset($profile)) {
$profile = $this->fetchUserWithCql($query);
if ($profile === null) {
return null;
}
}
}
return [
'id' => $profile->id,
'username' => $username,
'cat_username' => $username,
'cat_password' => $password,
'firstname' => $profile->personal->firstName ?? null,
'lastname' => $profile->personal->lastName ?? null,
'email' => $profile->personal->email ?? null,
];
}
/**
* This method queries the ILS for a patron's current profile information
*
* @param array $patron Patron login information from $this->patronLogin
*
* @return array Profile data in associative array
*/
public function getMyProfile($patron)
{
$query = ['query' => 'id == "' . $patron['id'] . '"'];
$response = $this->makeRequest('GET', '/users', $query);
$users = json_decode($response->getBody());
$profile = $users->users[0];
$expiration = isset($profile->expirationDate)
? $this->dateConverter->convertToDisplayDate(
"Y-m-d H:i", $profile->expirationDate
)
: null;
return [
'id' => $profile->id,
'firstname' => $profile->personal->firstName ?? null,
'lastname' => $profile->personal->lastName ?? null,
'address1' => $profile->personal->addresses[0]->addressLine1 ?? null,
'city' => $profile->personal->addresses[0]->city ?? null,
'country' => $profile->personal->addresses[0]->countryId ?? null,
'zip' => $profile->personal->addresses[0]->postalCode ?? null,
'phone' => $profile->personal->phone ?? null,
'mobile_phone' => $profile->personal->mobilePhone ?? null,
'expiration_date' => $expiration,
];
}
/**
* This method queries the ILS for a patron's current checked out items
*
* Input: Patron array returned by patronLogin method
* Output: Returns an array of associative arrays.
* Each associative array contains these keys:
* duedate - The item's due date (a string).
* dueTime - The item's due time (a string, optional).
* dueStatus - A special status – may be 'due' (for items due very soon)
* or 'overdue' (for overdue items). (optional).
* id - The bibliographic ID of the checked out item.
* source - The search backend from which the record may be retrieved
* (optional - defaults to Solr). Introduced in VuFind 2.4.
* barcode - The barcode of the item (optional).
* renew - The number of times the item has been renewed (optional).
* renewLimit - The maximum number of renewals allowed
* (optional - introduced in VuFind 2.3).
* request - The number of pending requests for the item (optional).
* volume – The volume number of the item (optional).
* publication_year – The publication year of the item (optional).
* renewable – Whether or not an item is renewable
* (required for renewals).
* message – A message regarding the item (optional).
* title - The title of the item (optional – only used if the record
* cannot be found in VuFind's index).
* item_id - this is used to match up renew responses and must match
* the item_id in the renew response.
* institution_name - Display name of the institution that owns the item.
* isbn - An ISBN for use in cover image loading
* (optional – introduced in release 2.3)
* issn - An ISSN for use in cover image loading
* (optional – introduced in release 2.3)
* oclc - An OCLC number for use in cover image loading
* (optional – introduced in release 2.3)
* upc - A UPC for use in cover image loading
* (optional – introduced in release 2.3)
* borrowingLocation - A string describing the location where the item
* was checked out (optional – introduced in release 2.4)
*
* @param array $patron Patron login information from $this->patronLogin
*
* @return array Transactions associative arrays
*/
public function getMyTransactions($patron)
{
$query = ['query' => 'userId==' . $patron['id'] . ' and status.name==Open'];
$transactions = [];
foreach ($this->getPagedResults(
'loans', '/circulation/loans', $query
) as $trans) {
$date = date_create($trans->dueDate);
$transactions[] = [
'duedate' => date_format($date, "j M Y"),
'dueTime' => date_format($date, "g:i:s a"),
// TODO: Due Status
// 'dueStatus' => $trans['itemId'],
'id' => $this->getBibId($trans->item->instanceId),
'item_id' => $trans->item->id,
'barcode' => $trans->item->barcode,
'renew' => $trans->renewalCount ?? 0,
'renewable' => true,
'title' => $trans->item->title,
];
}
return $transactions;
}
/**
* Get FOLIO loan IDs for use in renewMyItems.
*
* @param array $transaction An single transaction
* array from getMyTransactions
*
* @return string The FOLIO loan ID for this loan
*/
public function getRenewDetails($transaction)
{
return $transaction['item_id'];
}
/**
* Attempt to renew a list of items for a given patron.
*
* @param array $renewDetails An associative array with
* patron and details
*
* @return array $renewResult result of attempt to renew loans
*/
public function renewMyItems($renewDetails)
{
$renewalResults = ['details' => []];
foreach ($renewDetails['details'] as $loanId) {
$requestbody = [
'itemId' => $loanId,
'userId' => $renewDetails['patron']['id']
];
$response = $this->makeRequest(
'POST', '/circulation/renew-by-id', json_encode($requestbody)
);
if ($response->isSuccess()) {
$json = json_decode($response->getBody());
$renewal = [
'success' => true,
'new_date' => $this->dateConverter->convertToDisplayDate(
"Y-m-d H:i", $json->dueDate
),
'new_time' => $this->dateConverter->convertToDisplayTime(
"Y-m-d H:i", $json->dueDate
),
'item_id' => $json->itemId,
'sysMessage' => $json->action
];
} else {
try {
$json = json_decode($response->getBody());
$sysMessage = $json->errors[0]->message;
} catch (Exception $e) {
$sysMessage = "Renewal Failed";
}
$renewal = [
'success' => false,
'sysMessage' => $sysMessage
];
}
$renewalResults['details'][$loanId] = $renewal;
}
return $renewalResults;
}
/**
* Get Pick Up Locations
*
* This is responsible get a list of valid locations for holds / recall
* retrieval
*
* @param array $patron Patron information returned by $this->patronLogin
*
* @return array An array of associative arrays with locationID and
* locationDisplay keys
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getPickupLocations($patron)
{
$query = ['query' => 'pickupLocation=true'];
$locations = [];
foreach ($this->getPagedResults(
'servicepoints', '/service-points', $query
) as $servicepoint) {
$locations[] = [
'locationID' => $servicepoint->id,
'locationDisplay' => $servicepoint->discoveryDisplayName
];
}
return $locations;
}
/**
* This method queries the ILS for a patron's current holds
*
* Input: Patron array returned by patronLogin method
* Output: Returns an array of associative arrays, one for each hold associated
* with the specified account. Each associative array contains these keys:
* type - A string describing the type of hold – i.e. hold vs. recall
* (optional).
* id - The bibliographic record ID associated with the hold (optional).
* source - The search backend from which the record may be retrieved
* (optional - defaults to Solr). Introduced in VuFind 2.4.
* location - A string describing the pickup location for the held item
* (optional). In VuFind 1.2, this should correspond with a locationID value from
* getPickUpLocations. In VuFind 1.3 and later, it may be either
* a locationID value or a raw ready-to-display string.
* reqnum - A control number for the request (optional).
* expire - The expiration date of the hold (a string).
* create - The creation date of the hold (a string).
* position – The position of the user in the holds queue (optional)
* available – Whether or not the hold is available (true/false) (optional)
* item_id – The item id the request item (optional).
* volume – The volume number of the item (optional)
* publication_year – The publication year of the item (optional)
* title - The title of the item
* (optional – only used if the record cannot be found in VuFind's index).
* isbn - An ISBN for use in cover image loading (optional)
* issn - An ISSN for use in cover image loading (optional)
* oclc - An OCLC number for use in cover image loading (optional)
* upc - A UPC for use in cover image loading (optional)
* cancel_details - The cancel token, or a blank string if cancel is illegal
* for this hold; if omitted, this will be dynamically generated using
* getCancelHoldDetails(). You should only fill this in if it is more efficient
* to calculate the value up front; if it is an expensive calculation, you should
* omit the value entirely and let getCancelHoldDetails() do its job on demand.
* This optional feature was introduced in release 3.1.
*
* @param array $patron Patron login information from $this->patronLogin
*
* @return array Associative array of holds information
*/
public function getMyHolds($patron)
{
$query = [
'query' => '(requesterId == "' . $patron['id'] . '" ' .
'and status == Open*)'
];
$holds = [];
foreach ($this->getPagedResults(
'requests', '/request-storage/requests', $query
) as $hold) {
$requestDate = date_create($hold->requestDate);
// Set expire date if it was included in the response
$expireDate = isset($hold->requestExpirationDate)
? date_create($hold->requestExpirationDate) : null;
$holds[] = [
'type' => $hold->requestType,
'create' => date_format($requestDate, "j M Y"),
'expire' => isset($expireDate)
? date_format($expireDate, "j M Y") : "",
'id' => $this->getBibId(null, null, $hold->itemId),
'item_id' => $hold->itemId,
'reqnum' => $hold->id,
'title' => $hold->item->title
];
}
return $holds;
}
/**
* Place Hold
*
* Attempts to place a hold or recall on a particular item and returns
* an array with result details.
*
* @param array $holdDetails An array of item and patron data
*
* @return mixed An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function placeHold($holdDetails)
{
$default_request = $this->config['Holds']['default_request'] ?? 'Hold';
try {
$requiredBy = date_create_from_format(
'm-d-Y',
$holdDetails['requiredBy']
);
} catch (Exception $e) {
throw new ILSException('hold_date_invalid');
}
$requestBody = [
'itemId' => $holdDetails['item_id'],
'requestType' => $holdDetails['status'] == 'Available'
? 'Page' : $default_request,
'requesterId' => $holdDetails['patron']['id'],
'requestDate' => date('c'),
'fulfilmentPreference' => 'Hold Shelf',
'requestExpirationDate' => date_format($requiredBy, 'Y-m-d'),
'pickupServicePointId' => $holdDetails['pickUpLocation']
];
$response = $this->makeRequest(
'POST',
'/circulation/requests',
json_encode($requestBody)
);
if ($response->isSuccess()) {
$json = json_decode($response->getBody());
$result = [
'success' => true,
'status' => $json->status
];
} else {
try {
$json = json_decode($response->getBody());
$result = [
'success' => false,
'status' => $json->errors[0]->message
];
} catch (Exception $e) {
throw new ILSException($response->getBody());
}
}
return $result;
}
/**
* Get FOLIO hold IDs for use in cancelHolds.
*
* @param array $hold An single request
* array from getMyHolds
*
* @return string request ID for this request
*/
public function getCancelHoldDetails($hold)
{
return $hold['reqnum'];
}
/**
* Cancel Holds
*
* Attempts to Cancel a hold or recall on a particular item. The
* data in $cancelDetails['details'] is determined by getCancelHoldDetails().
*
* @param array $cancelDetails An array of item and patron data
*
* @return array An array of data on each request including
* whether or not it was successful and a system message (if available)
*/
public function cancelHolds($cancelDetails)
{
$details = $cancelDetails['details'];
$patron = $cancelDetails['patron'];
$count = 0;
$cancelResult = ['items' => []];
foreach ($details as $requestId) {
$response = $this->makeRequest(
'GET', '/circulation/requests/' . $requestId
);
$request_json = json_decode($response->getBody());
// confirm request belongs to signed in patron
if ($request_json->requesterId != $patron['id']) {
throw new ILSException("Invalid Request");
}
// Change status to Closed and add cancellationID
$request_json->status = 'Closed - Cancelled';
$request_json->cancellationReasonId
= $this->config['Holds']['cancellation_reason'];
$cancel_response = $this->makeRequest(
'PUT', '/circulation/requests/' . $requestId,
json_encode($request_json)
);
if ($cancel_response->getStatusCode() == 204) {
$count++;
$cancelResult['items'][$request_json->itemId] = [
'success' => true,
'status' => 'hold_cancel_success'
];
} else {
$cancelResult['items'][$request_json->itemId] = [
'success' => false,
'status' => 'hold_cancel_fail'
];
}
}
$cancelResult['count'] = $count;
return $cancelResult;
}
/**
* Obtain a list of course resources, creating an id => value associative array.
*
* @param string $type Type of resource to retrieve from the API.
* @param string $responseKey Key containing useful values in response (defaults
* to $type if unspecified)
* @param string $valueKey Key containing value to extract from response
* (defaults to 'name')
*
* @return array
*/
protected function getCourseResourceList($type, $responseKey = null,
$valueKey = 'name'
) {
$retVal = [];
// Results can be paginated, so let's loop until we've gotten everything:
foreach ($this->getPagedResults(
$responseKey ?? $type, '/coursereserves/' . $type
) as $item) {
$retVal[$item->id] = $item->$valueKey ?? '';
}
return $retVal;
}
/**
* Get Departments
*
* Obtain a list of departments for use in limiting the reserves list.
*
* @return array An associative array with key = dept. ID, value = dept. name.
*/
public function getDepartments()
{
return $this->getCourseResourceList('departments');
}
/**
* Get Instructors
*
* Obtain a list of instructors for use in limiting the reserves list.
*
* @return array An associative array with key = ID, value = name.
*/
public function getInstructors()
{
$retVal = [];
$ids = array_keys(
$this->getCourseResourceList('courselistings', 'courseListings')
);
foreach ($ids as $id) {
$retVal += $this->getCourseResourceList(
'courselistings/' . $id . '/instructors', 'instructors'
);
}
return $retVal;
}
/**
* Get Courses
*
* Obtain a list of courses for use in limiting the reserves list.
*
* @return array An associative array with key = ID, value = name.
*/
public function getCourses()
{
return $this->getCourseResourceList('courses');
}
/**
* Given a course listing ID, get an array of associated courses.
*
* @param string $courseListingId Course listing ID
*
* @return array
*/
protected function getCourseDetails($courseListingId)
{
$values = empty($courseListingId)
? []
: $this->getCourseResourceList(
'courselistings/' . $courseListingId . '/courses',
'courses',
'departmentId'
);
// Return an array with empty values in it if we can't find any values,
// because we want to loop at least once to build our reserves response.
return empty($values) ? ['' => ''] : $values;
}
/**
* Given a course listing ID, get an array of associated instructors.
*
* @param string $courseListingId Course listing ID
*
* @return array
*/
protected function getInstructorIds($courseListingId)
{
$values = empty($courseListingId)
? []
: $this->getCourseResourceList(
'courselistings/' . $courseListingId . '/instructors', 'instructors'
);
// Return an array with null in it if we can't find any values, because
// we want to loop at least once to build our course reserves response.
return empty($values) ? [null] : array_keys($values);
}
/**
* Find Reserves
*
* Obtain information on course reserves.
*
* @param string $course ID from getCourses (empty string to match all)
* @param string $inst ID from getInstructors (empty string to match all)
* @param string $dept ID from getDepartments (empty string to match all)
*
* @return mixed An array of associative arrays representing reserve items.
*/
public function findReserves($course, $inst, $dept)
{
$retVal = [];
// Results can be paginated, so let's loop until we've gotten everything:
foreach ($this->getPagedResults(
'reserves', '/coursereserves/reserves'
) as $item) {
try {
$bibId = $this->getBibId(null, null, $item->itemId);
} catch (\Exception $e) {
$bibId = null;
}
if ($bibId !== null) {
$courseData = $this->getCourseDetails(
$item->courseListingId ?? null
);
$instructorIds = $this->getInstructorIds(
$item->courseListingId ?? null
);
foreach ($courseData as $courseId => $departmentId) {
foreach ($instructorIds as $instructorId) {
$retVal[] = [
'BIB_ID' => $bibId,
'COURSE_ID' => $courseId == '' ? null : $courseId,
'DEPARTMENT_ID' => $departmentId == ''
? null : $departmentId,
'INSTRUCTOR_ID' => $instructorId,
];
}
}
}
}
// If the user has requested a filter, apply it now:
if (!empty($course) || !empty($inst) || !empty($dept)) {
$filter = function ($value) use ($course, $inst, $dept) {
return (empty($course) || $course == $value['COURSE_ID'])
&& (empty($inst) || $inst == $value['INSTRUCTOR_ID'])
&& (empty($dept) || $dept == $value['DEPARTMENT_ID']);
};
return array_filter($retVal, $filter);
}
return $retVal;
}
// @codingStandardsIgnoreStart
/** NOT FINISHED BELOW THIS LINE **/
/**
* Check for request blocks.
*
* @param array $patron The patron array with username and password
*
* @return array|boolean An array of block messages or false if there are no
* blocks
* @author Michael Birkner
*/
public function getRequestBlocks($patron)
{
return false;
}
/**
* This method returns information on recently received issues of a serial.
*
* Input: Bibliographic record ID
* Output: Array of associative arrays, each with a single key:
* issue - String describing the issue
*
* Currently, most drivers do not implement this method, instead always returning
* an empty array. It is only necessary to implement this in more detail if you
* want to populate the “Most Recent Received Issues” section of the record
* holdings tab.
*/
public function getPurchaseHistory($bibID)
{
return [];
}
/**
* This method queries the ILS for a patron's current fines
*
* Input: Patron array returned by patronLogin method
* Output: Returns an array of associative arrays, one for each fine
* associated with the specified account. Each associative array contains
* these keys:
* amount - The total amount of the fine IN PENNIES. Be sure to adjust
* decimal points appropriately (i.e. for a $1.00 fine, amount should be 100).
* checkout - A string representing the date when the item was
* checked out.
* fine - A string describing the reason for the fine
* (i.e. “Overdue”, “Long Overdue”).
* balance - The unpaid portion of the fine IN PENNIES.
* createdate – A string representing the date when the fine was accrued
* (optional)
* duedate - A string representing the date when the item was due.
* id - The bibliographic ID of the record involved in the fine.
* source - The search backend from which the record may be retrieved
* (optional - defaults to Solr). Introduced in VuFind 2.4.
*
*/
public function getMyFines($patron)
{
$query = ['query' => 'userId==' . $patron['id'] . ' and status.name==Closed'];
$fines = [];
foreach ($this->getPagedResults(
'accounts', '/accounts', $query
) as $fine) {
$date = date_create($fine->metadata->createdDate);
$title = $fine->title ?? null;
$fines[] = [
'id' => $fine->id,
'amount' => $fine->amount * 100,
'balance' => $fine->remaining * 100,
'status' => $fine->paymentStatus->name,
'type' => $fine->feeFineType,
'title' => $title,
'createdate' => date_format($date, "j M Y")
];
}
return $fines;
}
/**
* Get a list of funds that can be used to limit the “new item” search. Note that
* “fund” may be a misnomer – if funds are not an appropriate way to limit your
* new item results, you can return a different set of values from this function.
* For example, you might just make this a wrapper for getDepartments(). The
* important thing is that whatever you return from this function, the IDs can be
* used as a limiter to the getNewItems() function, and the names are appropriate
* for display on the new item search screen. If you do not want or support such
* limits, just return an empty array here and the limit control on the new item
* search screen will disappear.
*
* Output: An associative array with key = fund ID, value = fund name.
*
* IMPORTANT: The return value for this method changed in r2184. If you are using
* VuFind 1.0RC2 or earlier, this function returns a flat array of options
* (no ID-based keys), and empty return values may cause problems. It is
* recommended that you update to newer code before implementing the new item
* feature in your driver.
*/
public function getFunds()
{
return [];
}
/**
* This method retrieves a patron's historic transactions
* (previously checked out items).
*
* :!: The getConfig method must return a non-false value for this feature to be
* enabled. For privacy reasons, the entire feature should be disabled by default
* unless explicitly turned on in the driver's .ini file.
*
* This feature was added in VuFind 5.0.
*
* getConfig may return the following keys if the service supports paging on
* the ILS side:
* max_results - Maximum number of results that can be requested at once.
* Overrides the config.ini Catalog section setting historic_loan_page_size.
* page_size - An array of allowed page sizes
* (number of records per page)
* default_page_size - Default number of records per page
* getConfig may return the following keys if the service supports sorting:
* sort - An associative array where each key is a sort key and its
* value is a translation key
* default_sort - Default sort key
* Input: Patron array returned by patronLogin method and an array of
* optional parameters (keys = 'limit', 'page', 'sort').
* Output: Returns an array of associative arrays containing some or all of
* these keys:
* title - item title
* checkoutDate - date checked out
* dueDate - date due
* id - bibliographic ID
* barcode - item barcode
* returnDate - date returned
* publication_year - publication year
* volume - item volume
* institution_name - owning institution
* borrowingLocation - checkout location
* message - message about the transaction
*
*/
public function getMyTransactionHistory($patron)
{
return[];
}
/**
* This method queries the ILS for new items
*
* Input: getNewItems takes the following parameters:
* page - page number of results to retrieve (counting starts at 1)
* limit - the size of each page of results to retrieve
* daysOld - the maximum age of records to retrieve in days (maximum 30)
* fundID - optional fund ID to use for limiting results (use a value
* returned by getFunds, or exclude for no limit); note that “fund” may be a
* misnomer – if funds are not an appropriate way to limit your new item results,
* you can return a different set of values from getFunds. The important thing is
* that this parameter supports an ID returned by getFunds, whatever that may
* mean.
* Output: An associative array with two keys: 'count' (the number of items
* in the 'results' array) and 'results' (an array of associative arrays, each
* with a single key: 'id', a record ID).
*
* IMPORTANT: The fundID parameter changed behavior in r2184. In VuFind 1.0RC2
* and earlier versions, it receives one of the VALUES returned by getFunds();
* in more recent code, it receives one of the KEYS from getFunds(). See getFunds
* for additional notes.
*/
public function getNewItems($page, $limit, $daysOld, $fundID = null)
{
return [];
}
// @codingStandardsIgnoreEnd
}
| 1 | 31,447 | If you use `compact('name', 'code')` here, you'll get an associative array, which might make the rest of the code more readable (instead of using hard-coded 0/1 indexes). | vufind-org-vufind | php |
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, salesforce.com, inc.
+ * Copyright (c) 2014, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions | 1 | /*
* Copyright (c) 2011, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.rest;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import android.util.Log;
import com.android.volley.AuthFailureError;
import com.android.volley.NetworkResponse;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.android.volley.toolbox.BasicNetwork;
import com.android.volley.toolbox.HttpHeaderParser;
import com.android.volley.toolbox.HttpStack;
import com.android.volley.toolbox.NoCache;
import com.google.common.collect.Maps;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.auth.HttpAccess.Execution;
import com.salesforce.androidsdk.rest.RestRequest.RestMethod;
/**
* RestClient allows you to send authenticated HTTP requests to a force.com server.
*/
public class RestClient {
private ClientInfo clientInfo;
private RequestQueue requestQueue;
private SalesforceHttpStack httpStack;
/**
* AuthTokenProvider interface.
* RestClient will call its authTokenProvider to refresh its authToken once it has expired.
*/
public interface AuthTokenProvider {
String getNewAuthToken();
String getRefreshToken();
long getLastRefreshTime();
}
/**
* AsyncRequestCallback interface.
* Interface through which the result of an asynchronous request is handled.
*/
public interface AsyncRequestCallback {
void onSuccess(RestRequest request, RestResponse response);
void onError(Exception exception);
}
/**
* Constructs a RestClient with the given clientInfo, authToken, httpAccessor and authTokenProvider.
* When it gets a 401 (not authorized) response from the server:
* <ul>
* <li> If authTokenProvider is not null, it will ask the authTokenProvider for a new access token and retry the request a second time.</li>
* <li> Otherwise it will return the 401 response.</li>
* </ul>
* @param clientInfo
* @param authToken
* @param httpAccessor
* @param authTokenProvider
*/
public RestClient(ClientInfo clientInfo, String authToken, HttpAccess httpAccessor, AuthTokenProvider authTokenProvider) {
this(clientInfo, new SalesforceHttpStack(authToken, httpAccessor, authTokenProvider));
}
public RestClient(ClientInfo clientInfo, SalesforceHttpStack httpStack) {
this.clientInfo = clientInfo;
this.httpStack = httpStack;
this.requestQueue = new RequestQueue(new NoCache(), new BasicNetwork(httpStack));
this.requestQueue.start();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("RestClient: {\n")
.append(getClientInfo())
// Un-comment if you must: tokens should not be printed to the log
// .append(" authToken: ").append(getAuthToken()).append("\n")
// .append(" refreshToken: ").append(getRefreshToken()).append("\n")
.append(" timeSinceLastRefresh: ").append(httpStack.getElapsedTimeSinceLastRefresh()).append("\n")
.append("}\n");
return sb.toString();
}
/**
* @return The authToken for this RestClient.
*/
public synchronized String getAuthToken() {
return httpStack.getAuthToken();
}
/**
* @return The refresh token, if available.
*/
public String getRefreshToken() {
return httpStack.getRefreshToken();
}
/**
* @return The client info.
*/
public ClientInfo getClientInfo() {
return clientInfo;
}
/**
* @return underlying RequestQueue (using when calling sendAsync)
*/
public RequestQueue getRequestQueue() {
return requestQueue;
}
/**
* Send the given restRequest and process the result asynchronously with the given callback.
* Note: Intended to be used by code on the UI thread.
* @param restRequest
* @param callback
* @return volley.Request object wrapped around the restRequest (to allow cancellation etc)
*/
public Request<?> sendAsync(RestRequest restRequest, AsyncRequestCallback callback) {
WrappedRestRequest wrappedRestRequest = new WrappedRestRequest(clientInfo, restRequest, callback);
return requestQueue.add(wrappedRestRequest);
}
/**
* Send the given restRequest synchronously and return a RestResponse
* Note: Cannot be used by code on the UI thread (use sendAsync instead).
* @param restRequest
* @return
* @throws IOException
*/
public RestResponse sendSync(RestRequest restRequest) throws IOException {
return sendSync(restRequest.getMethod(), restRequest.getPath(), restRequest.getRequestEntity(), restRequest.getAdditionalHttpHeaders());
}
/**
* Send an arbitrary HTTP request synchronously, using the given method, path and httpEntity.
* Note: Cannot be used by code on the UI thread (use sendAsync instead).
*
* @param method the HTTP method for the request (GET/POST/DELETE etc)
* @param path the URI path, this will automatically be resolved against the users current instance host.
* @param httpEntity the request body if there is one, can be null.
* @param additionalHttpHeaders additional HTTP headers to add the generated HTTP request, can be null.
* @return a RestResponse instance that has information about the HTTP response returned by the server.
*/
public RestResponse sendSync(RestMethod method, String path, HttpEntity httpEntity) throws IOException {
return sendSync(method, path, httpEntity, null);
}
/**
* Send an arbitrary HTTP request synchronously, using the given method, path, httpEntity and additionalHttpHeaders.
* Note: Cannot be used by code on the UI thread (use sendAsync instead).
*
* @param method the HTTP method for the request (GET/POST/DELETE etc)
* @param path the URI path, this will automatically be resolved against the users current instance host.
* @param httpEntity the request body if there is one, can be null.
* @param additionalHttpHeaders additional HTTP headers to add the generated HTTP request, can be null.
* @return a RestResponse instance that has information about the HTTP response returned by the server.
*
* @throws IOException
*/
public RestResponse sendSync(RestMethod method, String path, HttpEntity httpEntity, Map<String, String> additionalHttpHeaders) throws IOException {
return new RestResponse(httpStack.performRequest(method.asVolleyMethod(), clientInfo.resolveUrl(path), httpEntity, additionalHttpHeaders, true));
}
/**
* Only used in tests
* @param httpAccessor
*/
public void setHttpAccessor(HttpAccess httpAccessor) {
this.httpStack.setHttpAccessor(httpAccessor);
}
/**
* All immutable information for an authenticated client (e.g. username, org ID, etc.).
*/
public static class ClientInfo {
public final String clientId;
public final URI instanceUrl;
public final URI loginUrl;
public final URI identityUrl;
public final String accountName;
public final String username;
public final String userId;
public final String orgId;
public final String communityId;
public final String communityUrl;
/**
* Parameterized constructor.
*
* @param clientId Client ID.
* @param instanceUrl Instance URL.
* @param loginUrl Login URL.
* @param identityUrl Identity URL.
* @param accountName Account name.
* @param username User name.
* @param userId User ID.
* @param orgId Org ID.
* @param communityId Community ID.
* @param communityUrl Community URL.
*/
public ClientInfo(String clientId, URI instanceUrl, URI loginUrl,
URI identityUrl, String accountName, String username,
String userId, String orgId, String communityId, String communityUrl) {
this.clientId = clientId;
this.instanceUrl = instanceUrl;
this.loginUrl = loginUrl;
this.identityUrl = identityUrl;
this.accountName = accountName;
this.username = username;
this.userId = userId;
this.orgId = orgId;
this.communityId = communityId;
this.communityUrl = communityUrl;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(" ClientInfo: {\n")
.append(" loginUrl: ").append(loginUrl.toString()).append("\n")
.append(" identityUrl: ").append(identityUrl.toString()).append("\n")
.append(" instanceUrl: ").append(instanceUrl.toString()).append("\n")
.append(" accountName: ").append(accountName).append("\n")
.append(" username: ").append(username).append("\n")
.append(" userId: ").append(userId).append("\n")
.append(" orgId: ").append(orgId).append("\n")
.append(" communityId: ").append(communityId).append("\n")
.append(" communityUrl: ").append(communityUrl).append("\n")
.append(" }\n");
return sb.toString();
}
/**
* Returns a string representation of the instance URL. If this is a
* community user, the community URL will be returned. If not, the
* instance URL will be returned.
*
* @return Instance URL.
*/
public String getInstanceUrlAsString() {
if (communityUrl != null && !"".equals(communityUrl.trim())) {
return communityUrl;
}
return instanceUrl.toString();
}
/**
* Returns a URI representation of the instance URL. If this is a
* community user, the community URL will be returned. If not, the
* instance URL will be returned.
*
* @return Instance URL.
*/
public URI getInstanceUrl() {
if (communityUrl != null && !"".equals(communityUrl.trim())) {
URI uri = null;
try {
uri = new URI(communityUrl);
} catch (URISyntaxException e) {
Log.e("ClientInfo: getCommunityInstanceUrl",
"URISyntaxException thrown on URL: " + communityUrl);
}
return uri;
}
return instanceUrl;
}
/**
* Resolves the given path against the community URL or the instance
* URL, depending on whether the user is a community user or not.
*
* @param path Path.
* @return Resolved URL.
*/
public URI resolveUrl(String path) {
final StringBuilder commInstanceUrl = new StringBuilder();
if (communityUrl != null && !"".equals(communityUrl.trim())) {
commInstanceUrl.append(communityUrl);
} else {
commInstanceUrl.append(instanceUrl.toString());
}
if (!commInstanceUrl.toString().endsWith("/")) {
commInstanceUrl.append("/");
}
if (path.startsWith("/")) {
path = path.substring(1);
}
commInstanceUrl.append(path);
URI uri = null;
try {
uri = new URI(commInstanceUrl.toString());
} catch (URISyntaxException e) {
Log.e("ClientInfo: resolveUrl",
"URISyntaxException thrown on URL: " + commInstanceUrl.toString());
}
return uri;
}
}
/**
* HttpStack for talking to Salesforce (sets oauth header and does oauth refresh when needed)
*/
public static class SalesforceHttpStack implements HttpStack {
private final AuthTokenProvider authTokenProvider;
private HttpAccess httpAccessor;
private String authToken;
/**
* Constructs a SalesforceHttpStack with the given clientInfo, authToken, httpAccessor and authTokenProvider.
* When it gets a 401 (not authorized) response from the server:
* <ul>
* <li> If authTokenProvider is not null, it will ask the authTokenProvider for a new access token and retry the request a second time.</li>
* <li> Otherwise it will return the 401 response.</li>
* </ul>
* @param authToken
* @param httpAccessor
* @param authTokenProvider
*/
public SalesforceHttpStack(String authToken, HttpAccess httpAccessor, AuthTokenProvider authTokenProvider) {
this.authToken = authToken;
this.httpAccessor = httpAccessor;
this.authTokenProvider = authTokenProvider;
}
@Override
public HttpResponse performRequest(Request<?> request, Map<String, String> additionalHeaders)
throws IOException, AuthFailureError {
int method = request.getMethod();
URI url = URI.create(request.getUrl());
HttpEntity requestEntity = null;
if (request instanceof WrappedRestRequest) {
RestRequest restRequest = ((WrappedRestRequest) request).getRestRequest();
// To avoid httpEntity -> bytes -> httpEntity conversion
requestEntity = restRequest.getRequestEntity();
// Combine headers
if (restRequest.getAdditionalHttpHeaders() != null) {
if (additionalHeaders == null) {
additionalHeaders = restRequest.getAdditionalHttpHeaders();
}
else {
additionalHeaders = Maps.newHashMap(additionalHeaders);
additionalHeaders.putAll(restRequest.getAdditionalHttpHeaders());
}
}
}
else {
if (request.getBody() != null) {
requestEntity = new ByteArrayEntity(request.getBody());
}
}
return performRequest(method, url, requestEntity, additionalHeaders, true);
}
/**
* @return The authToken for this RestClient.
*/
public synchronized String getAuthToken() {
return authToken;
}
/**
* Change authToken for this RestClient
* @param newAuthToken
*/
private synchronized void setAuthToken(String newAuthToken) {
authToken = newAuthToken;
}
/**
* @return The refresh token, if available.
*/
public String getRefreshToken() {
return (authTokenProvider != null ? authTokenProvider.getRefreshToken() : null);
}
/**
* @return Elapsed time (ms) since the last refresh.
*/
public long getElapsedTimeSinceLastRefresh() {
long lastRefreshTime = (authTokenProvider != null ? authTokenProvider.getLastRefreshTime() : -1);
if (lastRefreshTime < 0) {
return -1;
}
else {
return System.currentTimeMillis() - lastRefreshTime;
}
}
/**
* Only used in tests
* @param httpAccessor
*/
public void setHttpAccessor(HttpAccess httpAccessor) {
this.httpAccessor = httpAccessor;
}
/**
* @param method
* @param url
* @param httpEntity
* @param additionalHttpHeaders
* @param retryInvalidToken
* @return
* @throws IOException
*/
public HttpResponse performRequest(int method, URI url, HttpEntity httpEntity, Map<String, String> additionalHttpHeaders, boolean retryInvalidToken) throws IOException {
Execution exec = null;
// Prepare headers
Map<String, String> headers = new HashMap<String, String>();
if (additionalHttpHeaders != null) {
headers.putAll(additionalHttpHeaders);
}
if (getAuthToken() != null) {
headers.put("Authorization", "Bearer " + authToken);
}
// Do the actual call
switch(method) {
case Request.Method.DELETE:
exec = httpAccessor.doDelete(headers, url); break;
case Request.Method.GET:
exec = httpAccessor.doGet(headers, url); break;
case RestMethod.MethodHEAD:
exec = httpAccessor.doHead(headers, url); break;
case RestMethod.MethodPATCH:
exec = httpAccessor.doPatch(headers, url, httpEntity); break;
case Request.Method.POST:
exec = httpAccessor.doPost(headers, url, httpEntity); break;
case Request.Method.PUT:
exec = httpAccessor.doPut(headers, url, httpEntity); break;
}
HttpResponse response = exec.response;
int statusCode = response.getStatusLine().getStatusCode();
// 401 bad access token *
if (statusCode == HttpStatus.SC_UNAUTHORIZED) {
// If we haven't retried already and we have an accessTokenProvider
// Then let's try to get a new authToken
if (retryInvalidToken && authTokenProvider != null) {
// remember to consume this response so the connection can get re-used
HttpEntity entity = response.getEntity();
if (entity != null && entity.isStreaming()) {
InputStream instream = entity.getContent();
if (instream != null) {
instream.close();
}
}
String newAuthToken = authTokenProvider.getNewAuthToken();
if (newAuthToken != null) {
setAuthToken(newAuthToken);
// Retry with the new authToken
return performRequest(method, url, httpEntity, additionalHttpHeaders, false);
}
}
}
// Done
return response;
}
}
/**
* A RestRequest wrapped in a Request<?>
*/
public static class WrappedRestRequest extends Request<RestResponse> {
private RestRequest restRequest;
private AsyncRequestCallback callback;
/**
* Constructor
* @param restRequest
* @param callback
*/
public WrappedRestRequest(ClientInfo clientInfo, RestRequest restRequest, final AsyncRequestCallback callback) {
super(restRequest.getMethod().asVolleyMethod(),
clientInfo.resolveUrl(restRequest.getPath()).toString(),
new Response.ErrorListener() {
@Override
public void onErrorResponse(VolleyError error) {
callback.onError(error);
}
});
this.restRequest = restRequest;
this.callback = callback;
}
public RestRequest getRestRequest() {
return restRequest;
}
public HttpEntity getRequestEntity() {
return restRequest.getRequestEntity();
}
@Override
public byte[] getBody() throws AuthFailureError {
try {
HttpEntity requestEntity = restRequest.getRequestEntity();
return requestEntity == null ? null : EntityUtils.toByteArray(requestEntity);
}
catch (IOException e) {
Log.e("WrappedRestRequest.getBody", "Could not read request entity", e);
return null;
}
}
@Override
public String getBodyContentType() {
HttpEntity requestEntity = restRequest.getRequestEntity();
Header contentType = requestEntity == null ? null : requestEntity.getContentType();
return (contentType == null ? "application/x-www-form-urlencoded" : contentType.getValue()) + "; charset=" + HTTP.UTF_8;
}
@Override
protected void deliverResponse(RestResponse restResponse) {
callback.onSuccess(restRequest, restResponse);
}
@Override
protected Response<RestResponse> parseNetworkResponse(
NetworkResponse networkResponse) {
return Response.success(new RestResponse(networkResponse),
HttpHeaderParser.parseCacheHeaders(networkResponse));
}
}
}
| 1 | 14,165 | Should it be 2011-14 instead ;-) | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -51,6 +51,10 @@ func New(cfg *any.Any, logger *zap.Logger, scope tally.Scope) (service.Service,
return nil, err
}
+ if pgcfg.MaxIdleConnections > 2 {
+ sqlDB.SetMaxIdleConns(int(pgcfg.MaxIdleConnections))
+ }
+
return &client{logger: logger, scope: scope, sqlDB: sqlDB}, nil
}
| 1 | package postgres
// <!-- START clutchdoc -->
// description: Provides a connection to the configured PostgreSQL database.
// <!-- END clutchdoc -->
import (
"database/sql"
"errors"
"fmt"
"strings"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
_ "github.com/lib/pq"
"github.com/uber-go/tally"
"go.uber.org/zap"
postgresv1 "github.com/lyft/clutch/backend/api/config/service/db/postgres/v1"
"github.com/lyft/clutch/backend/service"
)
const Name = "clutch.service.db.postgres"
type client struct {
sqlDB *sql.DB
logger *zap.Logger
scope tally.Scope
}
type Client interface {
DB() *sql.DB
}
func (c *client) DB() *sql.DB { return c.sqlDB }
func New(cfg *any.Any, logger *zap.Logger, scope tally.Scope) (service.Service, error) {
pgcfg := &postgresv1.Config{}
err := ptypes.UnmarshalAny(cfg, pgcfg)
if err != nil {
return nil, err
}
connection, err := connString(pgcfg.Connection)
if err != nil {
return nil, err
}
sqlDB, err := sql.Open("postgres", connection)
if err != nil {
return nil, err
}
return &client{logger: logger, scope: scope, sqlDB: sqlDB}, nil
}
func connString(cfg *postgresv1.Connection) (string, error) {
if cfg == nil {
return "", errors.New("no connection information")
}
connection := fmt.Sprintf(
"host=%s port=%d dbname=%s user=%s",
cfg.Host, cfg.Port, cfg.Dbname, cfg.User,
)
switch cfg.GetSslMode() {
case postgresv1.Connection_UNSPECIFIED:
break
default:
mode := strings.ReplaceAll(strings.ToLower(cfg.SslMode.String()), "_", "-")
connection += fmt.Sprintf(" sslmode=%s", mode)
}
switch cfg.GetAuthn().(type) {
case *postgresv1.Connection_Password:
connection += fmt.Sprintf(" password=%s", cfg.GetPassword())
default:
break
}
return connection, nil
}
| 1 | 10,401 | I want to write some type of test for this but there are not Getter methods to assert this value. I tried to extract the value via the stats that are exposed without luck. | lyft-clutch | go |
@@ -574,6 +574,15 @@ class ApiClient(object):
group_key (str): key of the group to get.
"""
+ @abc.abstractmethod
+ def iter_gsuite_group_settings(self, gsuite_id):
+ """Iterate Gsuite group settings from GCP API.
+
+ Args:
+ gsuite_id (str): Gsuite id.
+ """
+
+
@abc.abstractmethod
def iter_gsuite_groups(self, gsuite_id):
"""Iterate Gsuite groups from GCP API. | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCP API client fassade."""
# pylint: disable=invalid-name,too-many-lines
# pylint: disable=too-many-public-methods,too-many-instance-attributes
import abc
from google.cloud.forseti.common.gcp_api import admin_directory
from google.cloud.forseti.common.gcp_api import appengine
from google.cloud.forseti.common.gcp_api import bigquery
from google.cloud.forseti.common.gcp_api import cloud_resource_manager
from google.cloud.forseti.common.gcp_api import cloudbilling
from google.cloud.forseti.common.gcp_api import cloudsql
from google.cloud.forseti.common.gcp_api import compute
from google.cloud.forseti.common.gcp_api import container
from google.cloud.forseti.common.gcp_api import iam
from google.cloud.forseti.common.gcp_api import servicemanagement
from google.cloud.forseti.common.gcp_api import stackdriver_logging
from google.cloud.forseti.common.gcp_api import storage
class ResourceNotSupported(Exception):
"""Exception raised for resources not supported by the API client."""
class ApiClient(object):
"""The gcp api client interface"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fetch_bigquery_dataset_policy(self, project_id,
project_number, dataset_id):
"""Dataset policy Iterator for a dataset from gcp API call.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
dataset_id (str): id of the dataset to query.
"""
@abc.abstractmethod
def fetch_bigquery_iam_policy(self, project_id, project_number, dataset_id):
"""Gets IAM policy of a bigquery dataset from gcp API call.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
dataset_id (str): id of the dataset to query.
"""
@abc.abstractmethod
def iter_bigquery_datasets(self, project_number):
"""Iterate Datasets from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_bigquery_tables(self, dataset_reference):
"""Iterate Tables from GCP API.
Args:
dataset_reference (dict): The project and dataset ID to get
bigquery tables.
"""
@abc.abstractmethod
def fetch_billing_account_iam_policy(self, account_id):
"""Gets IAM policy of a Billing Account from GCP API.
Args:
account_id (str): id of the billing account to get policy.
"""
@abc.abstractmethod
def fetch_billing_project_info(self, project_number):
"""Project Billing Info from gcp API call.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_billing_accounts(self):
"""Iterate visible Billing Accounts in an organization from GCP API."""
@abc.abstractmethod
def iter_cloudsql_instances(self, project_number):
"""Iterate Cloud sql instances from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def is_compute_api_enabled(self, project_number):
"""Verifies the Compute API is enabled on a project.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_compute_ig_instances(self, project_number, instance_group_name,
region=None, zone=None):
"""Get the instances for an instance group from GCP API.
One and only one of zone (for zonal instance groups) and region
(for regional instance groups) must be specified.
Args:
project_number (str): number of the project to query.
instance_group_name (str): The instance group's name.
region (str): The regional instance group's region.
zone (str): The zonal instance group's zone.
"""
@abc.abstractmethod
def fetch_compute_project(self, project_number):
"""Fetch compute project data from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_autoscalers(self, project_number):
"""Iterate Autoscalers from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_backendbuckets(self, project_number):
"""Iterate Backend buckets from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_backendservices(self, project_number):
"""Iterate Backend services from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_disks(self, project_number):
"""Iterate Compute Engine disks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_firewalls(self, project_number):
"""Iterate Compute Engine Firewalls from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_forwardingrules(self, project_number):
"""Iterate Forwarding Rules from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_healthchecks(self, project_number):
"""Iterate Health checks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_httphealthchecks(self, project_number):
"""Iterate HTTP Health checks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_httpshealthchecks(self, project_number):
"""Iterate HTTPS Health checks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_ig_managers(self, project_number):
"""Iterate Instance Group Manager from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_images(self, project_number):
"""Iterate Images from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_instancegroups(self, project_number):
"""Iterate Compute Engine groups from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_instances(self, project_number):
"""Iterate compute engine instance from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_instancetemplates(self, project_number):
"""Iterate Instance Templates from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_licenses(self, project_number):
"""Iterate Licenses from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_networks(self, project_number):
"""Iterate Networks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_project(self, project_number):
"""Iterate Project from GCP API.
Will only ever return up to 1 result. Ensures compatibility with other
resource iterators.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_routers(self, project_number):
"""Iterate Compute Engine routers from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_snapshots(self, project_number):
"""Iterate Compute Engine snapshots from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_sslcertificates(self, project_number):
"""Iterate SSL Certificates from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_subnetworks(self, project_number):
"""Iterate Subnetworks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targethttpproxies(self, project_number):
"""Iterate Target HTTP proxies from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targethttpsproxies(self, project_number):
"""Iterate Target HTTPS proxies from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targetinstances(self, project_number):
"""Iterate Target Instances from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targetpools(self, project_number):
"""Iterate Target Pools from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targetsslproxies(self, project_number):
"""Iterate Target SSL proxies from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targettcpproxies(self, project_number):
"""Iterate Target TCP proxies from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_targetvpngateways(self, project_number):
"""Iterate Target VPN Gateways from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_urlmaps(self, project_number):
"""Iterate URL maps from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_compute_vpntunnels(self, project_number):
"""Iterate VPN tunnels from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_container_serviceconfig(self, project_id, zone=None,
location=None):
"""Fetch Kubernetes Engine per zone service config from GCP API.
Args:
project_id (str): id of the project to query.
zone (str): zone of the Kubernetes Engine.
location (str): location of the Kubernetes Engine.
"""
@abc.abstractmethod
def iter_container_clusters(self, project_number):
"""Iterate Kubernetes Engine Cluster from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_crm_folder(self, folder_id):
"""Fetch Folder data from GCP API.
Args:
folder_id (str): id of the folder to query.
"""
@abc.abstractmethod
def fetch_crm_folder_iam_policy(self, folder_id):
"""Folder IAM policy in a folder from gcp API call.
Args:
folder_id (str): id of the folder to get policy.
"""
@abc.abstractmethod
def fetch_crm_organization(self, org_id):
"""Fetch Organization data from GCP API.
Args:
org_id (str): id of the organization to get.
"""
@abc.abstractmethod
def fetch_crm_organization_iam_policy(self, org_id):
"""Organization IAM policy from gcp API call.
Args:
org_id (str): id of the organization to get policy.
"""
@abc.abstractmethod
def fetch_crm_project(self, project_number):
"""Fetch Project data from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_crm_project_iam_policy(self, project_number):
"""Project IAM policy from gcp API call.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_crm_folder_org_policies(self, folder_id):
"""Folder organization policies from gcp API call.
Args:
folder_id (str): id of the folder to get policy.
"""
@abc.abstractmethod
def iter_crm_folders(self, parent_id):
"""Iterate Folders from GCP API.
Args:
parent_id (str): id of the parent of the folder.
"""
@abc.abstractmethod
def iter_crm_organization_org_policies(self, org_id):
"""Organization organization policies from gcp API call.
Args:
org_id (str): id of the organization to get policy.
"""
@abc.abstractmethod
def iter_crm_project_liens(self, project_number):
"""Iterate Liens from GCP API.
Args:
project_number (str): number of the parent project of the lien.
"""
@abc.abstractmethod
def iter_crm_project_org_policies(self, project_number):
"""Project organization policies from gcp API call.
Args:
project_number (str): number of the parent project of the policy.
"""
@abc.abstractmethod
def iter_crm_projects(self, parent_type, parent_id):
"""Iterate Projects from GCP API.
Args:
parent_type (str): type of the parent, "folder" or "organization".
parent_id (str): id of the parent of the folder.
"""
@abc.abstractmethod
def fetch_dataproc_cluster_iam_policy(self, cluster):
"""Fetch Dataproc Cluster IAM Policy from GCP API.
Args:
cluster (str): The Dataproc cluster to query, must be in the format
projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}
"""
@abc.abstractmethod
def iter_dataproc_clusters(self, project_id, region=None):
"""Iterate Dataproc clusters from GCP API.
Args:
project_id (str): id of the project to query.
region (str): The region to query. Not required when using Cloud
Asset API.
"""
@abc.abstractmethod
def iter_dns_managedzones(self, project_number):
"""Iterate CloudDNS Managed Zones from GCP API.
Args:
project_number (str): number of the parent project.
"""
@abc.abstractmethod
def iter_dns_policies(self, project_number):
"""Iterate CloudDNS Policies from GCP API.
Args:
project_number (str): number of the parent project of the policy.
"""
@abc.abstractmethod
def fetch_gae_app(self, project_id):
"""Fetch the AppEngine App.
Args:
project_id (str): id of the project to query.
"""
@abc.abstractmethod
def iter_gae_instances(self, project_id, service_id, version_id):
"""Iterate gae instances from GCP API.
Args:
project_id (str): id of the project to query.
service_id (str): id of the appengine service.
version_id (str): id of the appengine version.
"""
@abc.abstractmethod
def iter_gae_services(self, project_id):
"""Iterate gae services from GCP API.
Args:
project_id (str): id of the project to query.
"""
@abc.abstractmethod
def iter_gae_versions(self, project_id, service_id):
"""Iterate gae versions from GCP API.
Args:
project_id (str): id of the project to query.
service_id (str): id of the appengine service.
"""
@abc.abstractmethod
def iter_gsuite_group_members(self, group_key):
"""Iterate Gsuite group members from GCP API.
Args:
group_key (str): key of the group to get.
"""
@abc.abstractmethod
def iter_gsuite_groups(self, gsuite_id):
"""Iterate Gsuite groups from GCP API.
Args:
gsuite_id (str): Gsuite id.
"""
@abc.abstractmethod
def iter_gsuite_users(self, gsuite_id):
"""Iterate Gsuite users from GCP API.
Args:
gsuite_id (str): Gsuite id.
"""
@abc.abstractmethod
def fetch_iam_serviceaccount_iam_policy(self, name, unique_id):
"""Service Account IAM policy from gcp API call.
Args:
name (str): The service account name to query, must be in the format
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}
unique_id (str): The unique id of the service account.
"""
@abc.abstractmethod
def iter_iam_curated_roles(self):
"""Iterate Curated roles in an organization from GCP API.
"""
@abc.abstractmethod
def iter_iam_organization_roles(self, org_id):
"""Iterate Organization roles from GCP API.
Args:
org_id (str): id of the organization to get.
"""
@abc.abstractmethod
def iter_iam_project_roles(self, project_id, project_number):
"""Iterate Project roles in a project from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_iam_serviceaccount_exported_keys(self, name):
"""Iterate Service Account User Managed Keys from GCP API.
Args:
name (str): name of the service account.
"""
@abc.abstractmethod
def iter_iam_serviceaccounts(self, project_id, project_number):
"""Iterate Service Accounts in a project from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_kms_cryptokey_iam_policy(self, cryptokey):
"""Fetch KMS Cryptokey IAM Policy from GCP API.
Args:
cryptokey (str): The KMS cryptokey to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/
cryptoKeys/{CRYPTOKEY_NAME}
"""
@abc.abstractmethod
def fetch_kms_keyring_iam_policy(self, keyring):
"""Fetch KMS Keyring IAM Policy from GCP API.
Args:
keyring (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}
"""
@abc.abstractmethod
def iter_kms_cryptokeys(self, parent):
"""Iterate KMS Cryptokeys in a keyring from GCP API.
Args:
parent (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}
"""
@abc.abstractmethod
def iter_kms_cryptokeyversions(self, parent):
"""Iterate KMS Cryptokey Versions from GCP API.
Args:
parent (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/
cryptoKeys/{CRYPTOKEY_NAME}
"""
@abc.abstractmethod
def iter_kms_keyrings(self, project_id, location=None):
"""Iterate KMS Keyrings in a project from GCP API.
Args:
project_id (str): id of the project to query.
location (str): The location to query. Not required when
using Cloud Asset API.
"""
@abc.abstractmethod
def fetch_pubsub_subscription_iam_policy(self, name):
"""PubSub Subscription IAM policy from gcp API call.
Args:
name (str): The pubsub topic to query, must be in the format
projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}
"""
@abc.abstractmethod
def fetch_pubsub_topic_iam_policy(self, name):
"""PubSub Topic IAM policy from gcp API call.
Args:
name (str): The pubsub topic to query, must be in the format
projects/{PROJECT_ID}/topics/{TOPIC_NAME}
"""
@abc.abstractmethod
def iter_pubsub_subscriptions(self, project_id, project_number):
"""Iterate PubSub subscriptions from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_pubsub_topics(self, project_id, project_number):
"""Iterate PubSub topics from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_services_enabled_apis(self, project_number):
"""Project enabled API services from gcp API call.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_spanner_instances(self, project_number):
"""Iterate Spanner Instances from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_spanner_databases(self, parent):
"""Iterate Spanner Databases from GCP API.
Args:
parent (str): parent spanner instance to query.
"""
@abc.abstractmethod
def iter_stackdriver_billing_account_sinks(self, acct_id):
"""Iterate Billing Account logging sinks from GCP API.
Args:
acct_id (str): id of the billing account to query.
"""
@abc.abstractmethod
def iter_stackdriver_folder_sinks(self, folder_id):
"""Iterate Folder logging sinks from GCP API.
Args:
folder_id (str): id of the folder to query.
"""
@abc.abstractmethod
def iter_stackdriver_organization_sinks(self, org_id):
"""Iterate Organization logging sinks from GCP API.
Args:
org_id (str): id of the organization to query.
"""
@abc.abstractmethod
def iter_stackdriver_project_sinks(self, project_number):
"""Iterate Project logging sinks from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_storage_bucket_acls(self, bucket_id, project_id, project_number):
"""Bucket Access Controls from GCP API.
Args:
bucket_id (str): id of the bucket to query.
project_id (str): id of the project to query.
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def fetch_storage_bucket_iam_policy(self, bucket_id):
"""Bucket IAM policy Iterator from gcp API call.
Args:
bucket_id (str): id of the bucket to query.
"""
@abc.abstractmethod
def fetch_storage_object_iam_policy(self, bucket_name, object_name):
"""Object IAM policy Iterator for an object from gcp API call.
Args:
bucket_name (str): name of the bucket.
object_name (str): name of the object.
"""
@abc.abstractmethod
def iter_storage_buckets(self, project_number):
"""Iterate Buckets from GCP API.
Args:
project_number (str): number of the project to query.
"""
@abc.abstractmethod
def iter_storage_objects(self, bucket_id):
"""Iterate Objects from GCP API.
Args:
bucket_id (str): id of the bucket to get.
"""
def create_lazy(attribute, factory):
"""Create attributes right before they are needed.
Args:
attribute (str): Attribute name to check/create.
factory (function): Factory to create object.
Returns:
function: Decorator.
"""
def f_wrapper(func):
"""Create decorator.
Args:
func (function): Function to wrap.
Returns:
function: Decorator.
"""
def wrapper(*args, **kwargs):
"""Decorator implementation.
Args:
*args (list): Original func arguments.
**kwargs (dict): Original func arguments.
Returns:
object: Result produced by the wrapped func.
"""
this = args[0]
if not hasattr(this, attribute) or not getattr(this, attribute):
setattr(this, attribute, factory(this))
return func(*args, **kwargs)
return wrapper
return f_wrapper
def is_api_disabled(config, api_name):
"""Check if api_name is disabled in the config.
Args:
config (dict): GCP API client configuration.
api_name (str): The name of the GCP api to check.
Returns:
bool: True if the API is disabled in the configuration, else False.
"""
return config.get(api_name, {}).get('disable_polling', False)
class ApiClientImpl(ApiClient):
"""The gcp api client Implementation"""
def __init__(self, config):
"""Initialize.
Args:
config (dict): GCP API client configuration.
"""
self.ad = None
self.appengine = None
self.bigquery = None
self.crm = None
self.cloudbilling = None
self.cloudsql = None
self.compute = None
self.container = None
self.iam = None
self.servicemanagement = None
self.stackdriver_logging = None
self.storage = None
self.config = config
def _create_ad(self):
"""Create admin directory API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, admin_directory.API_NAME):
raise ResourceNotSupported('Admin API disabled by server '
'configuration.')
return admin_directory.AdminDirectoryClient(self.config)
def _create_appengine(self):
"""Create AppEngine API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, appengine.API_NAME):
raise ResourceNotSupported('AppEngine API disabled by server '
'configuration.')
return appengine.AppEngineClient(self.config)
def _create_bq(self):
"""Create bigquery API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, bigquery.API_NAME):
raise ResourceNotSupported('Bigquery API disabled by server '
'configuration.')
return bigquery.BigQueryClient(self.config)
def _create_crm(self):
"""Create resource manager API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, cloud_resource_manager.API_NAME):
raise ResourceNotSupported('Resource Manager API disabled by '
'server configuration.')
return cloud_resource_manager.CloudResourceManagerClient(self.config)
def _create_cloudbilling(self):
"""Create cloud billing API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, cloudbilling.API_NAME):
raise ResourceNotSupported('Cloud Billing API disabled by server '
'configuration.')
return cloudbilling.CloudBillingClient(self.config)
def _create_cloudsql(self):
"""Create cloud sql API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, cloudsql.API_NAME):
raise ResourceNotSupported('CloudSQL Admin API disabled by server '
'configuration.')
return cloudsql.CloudsqlClient(self.config)
def _create_compute(self):
"""Create compute API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, compute.API_NAME):
raise ResourceNotSupported('Compute Engine API disabled by server '
'configuration.')
return compute.ComputeClient(self.config)
def _create_container(self):
"""Create Kubernetes Engine API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, container.API_NAME):
raise ResourceNotSupported('Kubernetes Engine API disabled by '
'server configuration.')
return container.ContainerClient(self.config)
def _create_iam(self):
"""Create IAM API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, iam.API_NAME):
raise ResourceNotSupported('IAM API disabled by server '
'configuration.')
return iam.IAMClient(self.config)
def _create_servicemanagement(self):
"""Create servicemanagement API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, servicemanagement.API_NAME):
raise ResourceNotSupported('Service Management API disabled by '
'server configuration.')
return servicemanagement.ServiceManagementClient(self.config)
def _create_stackdriver_logging(self):
"""Create stackdriver_logging API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, stackdriver_logging.API_NAME):
raise ResourceNotSupported('Stackdriver Logging API disabled by '
'server configuration.')
return stackdriver_logging.StackdriverLoggingClient(self.config)
def _create_storage(self):
"""Create storage API client.
Returns:
object: Client.
Raises:
ResourceNotSupported: Raised if polling is disabled for this API in
the GCP API client configuration.
"""
if is_api_disabled(self.config, storage.API_NAME):
raise ResourceNotSupported('Storage API disabled by server '
'configuration.')
return storage.StorageClient(self.config)
@create_lazy('bigquery', _create_bq)
def fetch_bigquery_dataset_policy(self, project_id,
project_number, dataset_id):
"""Dataset policy Iterator for a dataset from gcp API call.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
dataset_id (str): id of the dataset to query.
Returns:
dict: Dataset Policy.
"""
del project_id
return self.bigquery.get_dataset_access(project_number, dataset_id)
def fetch_bigquery_iam_policy(self, project_id, project_number, dataset_id):
"""Gets IAM policy of a bigquery dataset from gcp API call.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
dataset_id (str): id of the dataset to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Bigquery Dataset IAM policy is not '
'supported by this API client')
@create_lazy('bigquery', _create_bq)
def iter_bigquery_datasets(self, project_number):
"""Iterate Datasets from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of datasets.
"""
for dataset in self.bigquery.get_datasets_for_projectid(project_number):
yield dataset
@create_lazy('bigquery', _create_bq)
def iter_bigquery_tables(self, dataset_reference):
"""Iterate Tables from GCP API.
Args:
dataset_reference (dict): The project and dataset ID to get
bigquery tables.
Yields:
dict: Generator of tables.
"""
for table in self.bigquery.get_tables(dataset_reference['projectId'],
dataset_reference['datasetId']):
yield table
@create_lazy('cloudbilling', _create_cloudbilling)
def fetch_billing_account_iam_policy(self, account_id):
"""Gets IAM policy of a Billing Account from GCP API.
Args:
account_id (str): id of the billing account to get policy.
Returns:
dict: Billing Account IAM policy.
"""
return self.cloudbilling.get_billing_acct_iam_policies(account_id)
@create_lazy('cloudbilling', _create_cloudbilling)
def fetch_billing_project_info(self, project_number):
"""Project Billing Info from gcp API call.
Args:
project_number (str): number of the project to query.
Returns:
dict: Project Billing Info resource.
"""
return self.cloudbilling.get_billing_info(project_number)
@create_lazy('cloudbilling', _create_cloudbilling)
def iter_billing_accounts(self):
"""Iterate visible Billing Accounts in an organization from GCP API.
Yields:
dict: Generator of billing accounts.
"""
for account in self.cloudbilling.get_billing_accounts():
yield account
@create_lazy('cloudsql', _create_cloudsql)
def iter_cloudsql_instances(self, project_number):
"""Iterate Cloud sql instances from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of cloudsql instance.
"""
for item in self.cloudsql.get_instances(project_number):
yield item
@create_lazy('compute', _create_compute)
def is_compute_api_enabled(self, project_number):
"""Verifies the Compute API is enabled on a project.
Args:
project_number (str): number of the project to query.
Returns:
bool: True if API is enabled, else False.
"""
return self.compute.is_api_enabled(project_number)
@create_lazy('compute', _create_compute)
def fetch_compute_ig_instances(self, project_number, instance_group_name,
region=None, zone=None):
"""Get the instances for an instance group from GCP API.
One and only one of zone (for zonal instance groups) and region
(for regional instance groups) must be specified.
Args:
project_number (str): number of the project to query.
instance_group_name (str): The instance group's name.
region (str): The regional instance group's region.
zone (str): The zonal instance group's zone.
Returns:
list: instance URLs for this instance group.
"""
return self.compute.get_instance_group_instances(project_number,
instance_group_name,
region,
zone)
@create_lazy('compute', _create_compute)
def fetch_compute_project(self, project_number):
"""Fetch compute project data from GCP API.
Args:
project_number (str): number of the project to query.
Returns:
dict: Compute project metadata resource.
"""
return self.compute.get_project(project_number)
def iter_compute_autoscalers(self, project_number):
"""Iterate Autoscalers from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute Autoscalers are not supported by '
'this API client')
def iter_compute_backendbuckets(self, project_number):
"""Iterate Backend buckets from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute BackendBuckets are not supported '
'by this API client')
@create_lazy('compute', _create_compute)
def iter_compute_backendservices(self, project_number):
"""Iterate Backend services from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of backend service.
"""
for backendservice in self.compute.get_backend_services(project_number):
yield backendservice
@create_lazy('compute', _create_compute)
def iter_compute_disks(self, project_number):
"""Iterate Compute Engine disks from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Disk.
"""
for disk in self.compute.get_disks(project_number):
yield disk
@create_lazy('compute', _create_compute)
def iter_compute_firewalls(self, project_number):
"""Iterate Compute Engine Firewalls from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Engine Firewall.
"""
for rule in self.compute.get_firewall_rules(project_number):
yield rule
@create_lazy('compute', _create_compute)
def iter_compute_forwardingrules(self, project_number):
"""Iterate Forwarding Rules from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of forwarding rule resources.
"""
for forwardingrule in self.compute.get_forwarding_rules(project_number):
yield forwardingrule
def iter_compute_healthchecks(self, project_number):
"""Iterate Health checks from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute HealthChecks are not supported by '
'this API client')
def iter_compute_httphealthchecks(self, project_number):
"""Iterate HTTP Health checks from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute HttpHealthChecks are not supported '
'by this API client')
def iter_compute_httpshealthchecks(self, project_number):
"""Iterate HTTPS Health checks from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute HttpsHealthChecks are not '
'supported by this API client')
@create_lazy('compute', _create_compute)
def iter_compute_ig_managers(self, project_number):
"""Iterate Instance Group Manager from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of instance group manager resources.
"""
for igmanager in self.compute.get_instance_group_managers(
project_number):
yield igmanager
@create_lazy('compute', _create_compute)
def iter_compute_images(self, project_number):
"""Iterate Images from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of image resources.
"""
for image in self.compute.get_images(project_number):
yield image
@create_lazy('compute', _create_compute)
def iter_compute_instancegroups(self, project_number):
"""Iterate Compute Engine groups from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Instance group.
"""
for instancegroup in self.compute.get_instance_groups(
project_number, include_instance_urls=False):
yield instancegroup
@create_lazy('compute', _create_compute)
def iter_compute_instances(self, project_number):
"""Iterate compute engine instance from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Engine Instance.
"""
for instance in self.compute.get_instances(project_number):
yield instance
@create_lazy('compute', _create_compute)
def iter_compute_instancetemplates(self, project_number):
"""Iterate Instance Templates from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of instance template resources.
"""
for instancetemplate in self.compute.get_instance_templates(
project_number):
yield instancetemplate
def iter_compute_licenses(self, project_number):
"""Iterate Licenses from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute Licenses are not supported by '
'this API client')
@create_lazy('compute', _create_compute)
def iter_compute_networks(self, project_number):
"""Iterate Networks from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of network resources.
"""
for network in self.compute.get_networks(project_number):
yield network
def iter_compute_project(self, project_number):
"""Iterate Project from GCP API.
Will only ever return up to 1 result. Ensures compatibility with other
resource iterators.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of compute project resources.
"""
yield self.fetch_compute_project(project_number)
def iter_compute_routers(self, project_number):
"""Iterate Compute Engine routers from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute Routers are not supported '
'by this API client')
@create_lazy('compute', _create_compute)
def iter_compute_snapshots(self, project_number):
"""Iterate Compute Engine snapshots from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Compute Snapshots.
"""
for snapshot in self.compute.get_snapshots(project_number):
yield snapshot
def iter_compute_sslcertificates(self, project_number):
"""Iterate SSL Certificates from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute SslCertificates are not supported '
'by this API client')
@create_lazy('compute', _create_compute)
def iter_compute_subnetworks(self, project_number):
"""Iterate Subnetworks from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of subnetwork resources.
"""
for subnetwork in self.compute.get_subnetworks(project_number):
yield subnetwork
def iter_compute_targethttpproxies(self, project_number):
"""Iterate Target HTTP proxies from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetHttpProxies are not '
'supported by this API client')
def iter_compute_targethttpsproxies(self, project_number):
"""Iterate Target HTTPS proxies from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetHttpsProxies are not '
'supported by this API client')
def iter_compute_targetinstances(self, project_number):
"""Iterate Target Instances from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetInstances are not '
'supported by this API client')
def iter_compute_targetpools(self, project_number):
"""Iterate Target Pools from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetPools are not '
'supported by this API client')
def iter_compute_targetsslproxies(self, project_number):
"""Iterate Target SSL proxies from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetSslProxies are not '
'supported by this API client')
def iter_compute_targettcpproxies(self, project_number):
"""Iterate Target TCP proxies from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetTcpProxies are not '
'supported by this API client')
def iter_compute_targetvpngateways(self, project_number):
"""Iterate Target VPN Gateways from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute TargetVpnGateways are not '
'supported by this API client')
def iter_compute_urlmaps(self, project_number):
"""Iterate URL maps from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute UrlMaps are not supported by this '
'API client')
def iter_compute_vpntunnels(self, project_number):
"""Iterate VPN tunnels from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Compute VpnTunnels are not supported by '
'this API client')
@create_lazy('container', _create_container)
def fetch_container_serviceconfig(self, project_id, zone=None,
location=None):
"""Fetch Kubernetes Engine per zone service config from GCP API.
Args:
project_id (str): id of the project to query.
zone (str): zone of the Kubernetes Engine.
location (str): location of the Kubernetes Engine.
Returns:
dict: Generator of Kubernetes Engine Cluster resources.
"""
return self.container.get_serverconfig(project_id, zone=zone,
location=location)
@create_lazy('container', _create_container)
def iter_container_clusters(self, project_number):
"""Iterate Kubernetes Engine Cluster from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of Kubernetes Engine Cluster resources.
"""
for cluster in self.container.get_clusters(project_number):
# Don't store the master auth data in the database.
if 'masterAuth' in cluster:
cluster['masterAuth'] = {
k: '[redacted]'
for k in cluster['masterAuth'].keys()}
yield cluster
@create_lazy('crm', _create_crm)
def fetch_crm_folder(self, folder_id):
"""Fetch Folder data from GCP API.
Args:
folder_id (str): id of the folder to query.
Returns:
dict: Generator of folder.
"""
return self.crm.get_folder(folder_id)
@create_lazy('crm', _create_crm)
def fetch_crm_folder_iam_policy(self, folder_id):
"""Folder IAM policy in a folder from gcp API call.
Args:
folder_id (str): id of the folder to get policy.
Returns:
dict: Folder IAM policy.
"""
return self.crm.get_folder_iam_policies(folder_id)
@create_lazy('crm', _create_crm)
def fetch_crm_organization(self, org_id):
"""Fetch Organization data from GCP API.
Args:
org_id (str): id of the organization to get.
Returns:
dict: Generator of organization.
"""
return self.crm.get_organization(org_id)
@create_lazy('crm', _create_crm)
def fetch_crm_organization_iam_policy(self, org_id):
"""Organization IAM policy from gcp API call.
Args:
org_id (str): id of the organization to get policy.
Returns:
dict: Organization IAM policy.
"""
return self.crm.get_org_iam_policies(org_id)
@create_lazy('crm', _create_crm)
def fetch_crm_project(self, project_number):
"""Fetch Project data from GCP API.
Args:
project_number (str): number of the project to query.
Returns:
dict: Generator of project.
"""
return self.crm.get_project(project_number)
@create_lazy('crm', _create_crm)
def fetch_crm_project_iam_policy(self, project_number):
"""Project IAM policy from gcp API call.
Args:
project_number (str): number of the project to query.
Returns:
dict: Project IAM Policy.
"""
return self.crm.get_project_iam_policies(project_number)
@create_lazy('crm', _create_crm)
def iter_crm_folder_org_policies(self, folder_id):
"""Folder organization policies from gcp API call.
Args:
folder_id (str): id of the folder to get policy.
Yields:
dict: Generator of org policies.
"""
for org_policy in self.crm.get_folder_org_policies(folder_id):
yield org_policy
@create_lazy('crm', _create_crm)
def iter_crm_folders(self, parent_id):
"""Iterate Folders from GCP API.
Args:
parent_id (str): id of the parent of the folder.
Yields:
dict: Generator of folders.
"""
for folder in self.crm.get_folders(parent_id):
yield folder
@create_lazy('crm', _create_crm)
def iter_crm_organization_org_policies(self, org_id):
"""Organization organization policies from gcp API call.
Args:
org_id (str): id of the organization to get policy.
Yields:
dict: Generator of org policies.
"""
for org_policy in self.crm.get_org_org_policies(org_id):
yield org_policy
@create_lazy('crm', _create_crm)
def iter_crm_project_liens(self, project_number):
"""Iterate Liens from GCP API.
Args:
project_number (str): number of the parent project of the lien.
Yields:
dict: Generator of liens.
"""
for lien in self.crm.get_project_liens(project_number):
yield lien
@create_lazy('crm', _create_crm)
def iter_crm_project_org_policies(self, project_number):
"""Project organization policies from gcp API call.
Args:
project_number (str): number of the parent project of the policy.
Yields:
dict: Generator of org policies.
"""
for org_policy in self.crm.get_project_org_policies(project_number):
yield org_policy
@create_lazy('crm', _create_crm)
def iter_crm_projects(self, parent_type, parent_id):
"""Iterate Projects from GCP API.
Args:
parent_type (str): type of the parent, "folder" or "organization".
parent_id (str): id of the parent of the folder.
Yields:
dict: Generator of projects.
"""
for page in self.crm.get_projects(parent_id=parent_id,
parent_type=parent_type):
for project in page.get('projects', []):
yield project
def fetch_dataproc_cluster_iam_policy(self, cluster):
"""Fetch Dataproc Cluster IAM Policy from GCP API.
Args:
cluster (str): The Dataproc cluster to query, must be in the format
projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Cloud Dataproc Clusters are not supported '
'by this API client')
def iter_dataproc_clusters(self, project_id, region=None):
"""Iterate Dataproc clusters from GCP API.
Args:
project_id (str): id of the project to query.
region (str): The region to query. Not required when using Cloud
Asset API.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Cloud Dataproc Clusters are not supported '
'by this API client')
def iter_dns_managedzones(self, project_number):
"""Iterate CloudDNS Managed Zones from GCP API.
Args:
project_number (str): number of the parent project.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Cloud DNS Managed Zones are not supported '
'by this API client')
def iter_dns_policies(self, project_number):
"""Iterate CloudDNS Policies from GCP API.
Args:
project_number (str): number of the parent project of the policy.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Cloud DNS Policies are not supported by '
'this API client')
@create_lazy('appengine', _create_appengine)
def fetch_gae_app(self, project_id):
"""Fetch the AppEngine App.
Args:
project_id (str): id of the project to query.
Returns:
dict: AppEngine App resource.
"""
return self.appengine.get_app(project_id)
@create_lazy('appengine', _create_appengine)
def iter_gae_instances(self, project_id, service_id, version_id):
"""Iterate gae instances from GCP API.
Args:
project_id (str): id of the project to query.
service_id (str): id of the appengine service.
version_id (str): version id of the appengine.
Yields:
dict: Generator of AppEngine Instance resources.
"""
for instance in self.appengine.list_instances(
project_id, service_id, version_id):
yield instance
@create_lazy('appengine', _create_appengine)
def iter_gae_services(self, project_id):
"""Iterate gae services from GCP API.
Args:
project_id (str): id of the project to query.
Yields:
dict: Generator of AppEngine Service resources.
"""
for service in self.appengine.list_services(project_id):
yield service
@create_lazy('appengine', _create_appengine)
def iter_gae_versions(self, project_id, service_id):
"""Iterate gae versions from GCP API.
Args:
project_id (str): id of the project to query.
service_id (str): id of the appengine service.
Yields:
dict: Generator of AppEngine Version resources.
"""
for version in self.appengine.list_versions(project_id, service_id):
yield version
@create_lazy('ad', _create_ad)
def iter_gsuite_group_members(self, group_key):
"""Iterate Gsuite group members from GCP API.
Args:
group_key (str): key of the group to get.
Yields:
dict: Generator of group_member
"""
for member in self.ad.get_group_members(group_key):
yield member
@create_lazy('ad', _create_ad)
def iter_gsuite_groups(self, gsuite_id):
"""Iterate Gsuite groups from GCP API.
Args:
gsuite_id (str): Gsuite id.
Yields:
dict: Generator of groups.
"""
result = self.ad.get_groups(gsuite_id)
for group in result:
yield group
@create_lazy('ad', _create_ad)
def iter_gsuite_users(self, gsuite_id):
"""Iterate Gsuite users from GCP API.
Args:
gsuite_id (str): Gsuite id.
Yields:
dict: Generator of user.
"""
for user in self.ad.get_users(gsuite_id):
yield user
@create_lazy('iam', _create_iam)
def fetch_iam_serviceaccount_iam_policy(self, name, unique_id):
"""Service Account IAM policy from gcp API call.
Args:
name (str): The service account name to query, must be in the format
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}
unique_id (str): The unique id of the service account.
Returns:
dict: Service Account IAM policy.
"""
del unique_id # Used by CAI, not the API.
return self.iam.get_service_account_iam_policy(name)
@create_lazy('iam', _create_iam)
def iter_iam_curated_roles(self):
"""Iterate Curated roles in an organization from GCP API.
Yields:
dict: Generator of curated roles.
"""
for role in self.iam.get_curated_roles():
yield role
@create_lazy('iam', _create_iam)
def iter_iam_organization_roles(self, org_id):
"""Iterate Organization roles from GCP API.
Args:
org_id (str): id of the organization to get.
Yields:
dict: Generator of organization role.
"""
for role in self.iam.get_organization_roles(org_id):
yield role
@create_lazy('iam', _create_iam)
def iter_iam_project_roles(self, project_id, project_number):
"""Iterate Project roles in a project from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Yields:
dict: Generator of project roles.
"""
del project_number # Used by CAI, not the API.
for role in self.iam.get_project_roles(project_id):
yield role
@create_lazy('iam', _create_iam)
def iter_iam_serviceaccount_exported_keys(self, name):
"""Iterate Service Account User Managed Keys from GCP API.
Args:
name (str): name of the service account.
Yields:
dict: Generator of service account user managed (exported) keys
"""
for key in self.iam.get_service_account_keys(
name, key_type=iam.IAMClient.USER_MANAGED):
yield key
@create_lazy('iam', _create_iam)
def iter_iam_serviceaccounts(self, project_id, project_number):
"""Iterate Service Accounts in a project from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Yields:
dict: Generator of service account.
"""
del project_number # Used by CAI, not the API.
for serviceaccount in self.iam.get_service_accounts(project_id):
yield serviceaccount
def fetch_kms_cryptokey_iam_policy(self, cryptokey):
"""Fetch KMS Cryptokey IAM Policy from GCP API.
Args:
cryptokey (str): The KMS cryptokey to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/
cryptoKeys/{CRYPTOKEY_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Key Management Service is not supported by '
'this API client')
def fetch_kms_keyring_iam_policy(self, keyring):
"""Fetch KMS Keyring IAM Policy from GCP API.
Args:
keyring (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Key Management Service is not supported by '
'this API client')
def iter_kms_cryptokeys(self, parent):
"""Iterate KMS Cryptokeys in a keyring from GCP API.
Args:
parent (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Key Management Service is not supported by '
'this API client')
def iter_kms_cryptokeyversions(self, parent):
"""Iterate KMS Cryptokey Versions from GCP API.
Args:
parent (str): The KMS keyring to query, must be in the format
projects/{PROJECT_ID}/locations/{LOCATION}/keyRings/{RING_NAME}/
cryptoKeys/{CRYPTOKEY_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Key Management Service is not supported by '
'this API client')
def iter_kms_keyrings(self, project_id, location=None):
"""Iterate KMS Keyrings in a project from GCP API.
Args:
project_id (str): id of the project to query.
location (str): The location to query. Not required when
using Cloud Asset API.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Key Management Service is not supported by '
'this API client')
def fetch_pubsub_subscription_iam_policy(self, name):
"""PubSub Subscription IAM policy from gcp API call.
Args:
name (str): The pubsub topic to query, must be in the format
projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('PubSub Subscriptions are not supported by '
'this API client')
def fetch_pubsub_topic_iam_policy(self, name):
"""PubSub Topic IAM policy from gcp API call.
Args:
name (str): The pubsub topic to query, must be in the format
projects/{PROJECT_ID}/topics/{TOPIC_NAME}
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('PubSub Topics are not supported by this '
'API client')
def iter_pubsub_subscriptions(self, project_id, project_number):
"""Iterate PubSub subscriptions from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('PubSub Subscriptions are not supported by '
'this API client')
def iter_pubsub_topics(self, project_id, project_number):
"""Iterate PubSub topics from GCP API.
Args:
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('PubSub Topics are not supported by this '
'API client')
@create_lazy('servicemanagement', _create_servicemanagement)
def fetch_services_enabled_apis(self, project_number):
"""Project enabled API services from gcp API call.
Args:
project_number (str): number of the project to query.
Returns:
list: A list of ManagedService resource dicts.
"""
return self.servicemanagement.get_enabled_apis(project_number)
def iter_spanner_instances(self, project_number):
"""Iterate Spanner Instances from GCP API.
Args:
project_number (str): number of the project to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Spanner Instances are not supported by '
'this API client')
def iter_spanner_databases(self, parent):
"""Iterate Spanner Databases from GCP API.
Args:
parent (str): parent spanner instance to query.
Raises:
ResourceNotSupported: Raised for all calls using this class.
"""
raise ResourceNotSupported('Spanner Databases are not supported by '
'this API client')
@create_lazy('stackdriver_logging', _create_stackdriver_logging)
def iter_stackdriver_billing_account_sinks(self, acct_id):
"""Iterate Billing Account logging sinks from GCP API.
Args:
acct_id (str): id of the billing account to query.
Yields:
dict: Generator of billing account logging sinks.
"""
for sink in self.stackdriver_logging.get_billing_account_sinks(acct_id):
yield sink
@create_lazy('stackdriver_logging', _create_stackdriver_logging)
def iter_stackdriver_folder_sinks(self, folder_id):
"""Iterate Folder logging sinks from GCP API.
Args:
folder_id (str): id of the folder to query.
Yields:
dict: Generator of folder logging sinks.
"""
for sink in self.stackdriver_logging.get_folder_sinks(folder_id):
yield sink
@create_lazy('stackdriver_logging', _create_stackdriver_logging)
def iter_stackdriver_organization_sinks(self, org_id):
"""Iterate Organization logging sinks from GCP API.
Args:
org_id (str): id of the organization to query.
Yields:
dict: Generator of organization logging sinks.
"""
for sink in self.stackdriver_logging.get_organization_sinks(org_id):
yield sink
@create_lazy('stackdriver_logging', _create_stackdriver_logging)
def iter_stackdriver_project_sinks(self, project_number):
"""Iterate Project logging sinks from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of project logging sinks.
"""
for sink in self.stackdriver_logging.get_project_sinks(project_number):
yield sink
@create_lazy('storage', _create_storage)
def fetch_storage_bucket_acls(self, bucket_id, project_id, project_number):
"""Bucket Access Controls from GCP API.
Args:
bucket_id (str): id of the bucket to query.
project_id (str): id of the project to query.
project_number (str): number of the project to query.
Returns:
list: Bucket Access Controls.
"""
del project_id, project_number
return self.storage.get_bucket_acls(bucket_id)
@create_lazy('storage', _create_storage)
def fetch_storage_bucket_iam_policy(self, bucket_id):
"""Bucket IAM policy Iterator from gcp API call.
Args:
bucket_id (str): id of the bucket to query.
Returns:
dict: Bucket IAM policy.
"""
return self.storage.get_bucket_iam_policy(bucket_id)
@create_lazy('storage', _create_storage)
def fetch_storage_object_iam_policy(self, bucket_name, object_name):
"""Object IAM policy Iterator for an object from gcp API call.
Args:
bucket_name (str): name of the bucket.
object_name (str): name of the object.
Returns:
dict: Object IAM policy.
"""
return self.storage.get_storage_object_iam_policy(bucket_name,
object_name)
@create_lazy('storage', _create_storage)
def iter_storage_buckets(self, project_number):
"""Iterate Buckets from GCP API.
Args:
project_number (str): number of the project to query.
Yields:
dict: Generator of buckets.
"""
for bucket in self.storage.get_buckets(project_number):
yield bucket
@create_lazy('storage', _create_storage)
def iter_storage_objects(self, bucket_id):
"""Iterate Objects from GCP API.
Args:
bucket_id (str): id of the bucket to get.
Yields:
dict: Generator of objects.
"""
for gcs_object in self.storage.get_objects(bucket_name=bucket_id):
yield gcs_object
| 1 | 33,388 | This needs to take the group id, not the gsuite id. | forseti-security-forseti-security | py |
@@ -47,8 +47,8 @@ namespace Microsoft.CodeAnalysis.Sarif.Converters
}
private const string EmptyResult = @"{
- ""$schema"": ""http://json.schemastore.org/sarif-1.0.0-beta.5"",
- ""version"": ""1.0.0-beta.5"",
+ ""$schema"": ""http://json.schemastore.org/sarif-1.0.0"",
+ ""version"": ""1.0.0"",
""runs"": [
{
""tool"": { | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.IO;
using System.Linq;
using System.Xml;
using FluentAssertions;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Microsoft.CodeAnalysis.Sarif.Writers;
namespace Microsoft.CodeAnalysis.Sarif.Converters
{
[TestClass]
public class AndroidStudioConverterTests
{
private AndroidStudioConverter _converter = null;
[TestInitialize]
public void Initialize()
{
_converter = new AndroidStudioConverter();
}
[TestMethod]
[ExpectedException(typeof(ArgumentNullException))]
public void AndroidStudioConverter_Convert_Nulls()
{
_converter.Convert(null, null);
}
[TestMethod]
[ExpectedException(typeof(ArgumentNullException))]
public void AndroidStudioConverter_Convert_NullInput()
{
_converter.Convert(null, new ResultLogObjectWriter());
}
[TestMethod]
[ExpectedException(typeof(ArgumentNullException))]
public void AndroidStudioConverter_Convert_NullOutput()
{
_converter.Convert(new MemoryStream(), null);
}
private const string EmptyResult = @"{
""$schema"": ""http://json.schemastore.org/sarif-1.0.0-beta.5"",
""version"": ""1.0.0-beta.5"",
""runs"": [
{
""tool"": {
""name"": ""AndroidStudio""
},
""results"": []
}
]
}";
[TestMethod]
public void AndroidStudioConverter_Convert_NoResults()
{
string androidStudioLog = @"<?xml version=""1.0"" encoding=""UTF-8""?><problems></problems>";
string actualJson = Utilities.GetConverterJson(_converter, androidStudioLog);
Assert.AreEqual(EmptyResult, actualJson);
}
[TestMethod]
public void AndroidStudioConverter_Convert_EmptyResult()
{
string androidStudioLog = @"<?xml version=""1.0"" encoding=""UTF-8""?><problems><problem></problem></problems>";
string actualJson = Utilities.GetConverterJson(_converter, androidStudioLog);
Assert.AreEqual(EmptyResult, actualJson);
}
[TestMethod]
public void AndroidStudioConverter_Convert_EmptyResultSingleTag()
{
string androidStudioLog = @"<?xml version=""1.0"" encoding=""UTF-8""?><problems><problem /></problems>";
string actualJson = Utilities.GetConverterJson(_converter, androidStudioLog);
Assert.AreEqual(EmptyResult, actualJson);
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void AndroidStudioConverter_Convert_InvalidLineNumber()
{
string androidStudioLog = @"<?xml version=""1.0"" encoding=""UTF-8""?><problems><problem><file>file://$PROJECT_DIR$/northwindtraders-droid-2/src/main/java/com/northwindtraders/droid/model/Model.java</file><line>NotALineNumber</line><module>northwindtraders-droid-2</module><package>com.northwindtraders.droid.model</package><entry_point TYPE=""file"" FQNAME=""file://$PROJECT_DIR$/northwindtraders-droid-2/src/main/java/com/northwindtraders/droid/model/Model.java""/><problem_class severity=""WARNING"" attribute_key=""WARNING_ATTRIBUTES"">Assertions</problem_class><hints/><description>Assertions are unreliable. Use BuildConfig.DEBUG conditional checks instead.</description></problem></problems>";
Utilities.GetConverterJson(_converter, androidStudioLog);
}
[TestMethod]
[ExpectedException(typeof(XmlException))]
public void AndroidStudioConverter_Convert_NonXmlNodeTypeText_ChildElements()
{
string androidStudioLog = @"<?xml version=""1.0"" encoding=""UTF-8""?><problems><problem><file><child_file>file://$PROJECT_DIR$/</child_file></file><line>1</line><module>northwindtraders-droid-2</module><package>com.northwindtraders.droid.model</package><entry_point TYPE=""file"" FQNAME=""file://$PROJECT_DIR$/northwindtraders-droid-2/src/main/java/com/northwindtraders/droid/model/Model.java""/><problem_class severity=""WARNING"" attribute_key=""WARNING_ATTRIBUTES"">Assertions</problem_class><hints/><description>Assertions are unreliable. Use BuildConfig.DEBUG conditional checks instead.</description></problem></problems>";
Utilities.GetConverterJson(_converter, androidStudioLog);
}
[TestMethod]
public void AndroidStudioConverter_GetShortDescription_UsesDescriptionIfPresent()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.Description = "Cute fluffy kittens";
var uut = new AndroidStudioProblem(builder);
string result = AndroidStudioConverter.GetShortDescriptionForProblem(uut);
Assert.AreEqual("Cute fluffy kittens", result);
}
[TestMethod]
public void AndroidStudioConverter_GetShortDescription_UsesProblemClassIfDescriptionNotPresent()
{
var uut = AndroidStudioProblemTests.GetDefaultProblem();
string result = AndroidStudioConverter.GetShortDescriptionForProblem(uut);
result.Should().Contain("A Problematic Problem");
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_EmptyHintsDoNotAffectDescription()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.Description = "hungry EVIL zombies";
var uut = new AndroidStudioProblem(builder);
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
Assert.AreEqual("hungry EVIL zombies", result.Message);
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_HintsAreStapledToDescription()
{
var builder = new AndroidStudioProblem.Builder
{
ProblemClass = "Unused",
File = "Unused",
Description = "hungry EVIL zombies",
Hints = ImmutableArray.Create("comment", "delete")
};
var uut = new AndroidStudioProblem(builder);
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
Assert.AreEqual(@"hungry EVIL zombies
Possible resolution: comment
Possible resolution: delete", result.Message);
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_UsesProblemClassForRuleId()
{
var uut = AndroidStudioProblemTests.GetDefaultProblem();
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
Assert.AreEqual("A Problematic Problem", result.RuleId);
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_HasNoPropertiesIfAttributeKeyAndSeverity()
{
var uut = AndroidStudioProblemTests.GetDefaultProblem();
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
Assert.IsNull(result.Properties);
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_AttributeKeyIsPersistedInProperties()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.AttributeKey = "key";
var uut = new AndroidStudioProblem(builder);
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
result.PropertyNames.Count.Should().Be(1);
result.GetProperty("attributeKey").Should().Be("key");
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_SeverityIsPersistedInProperties()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.Severity = "warning";
var uut = new AndroidStudioProblem(builder);
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
result.PropertyNames.Count.Should().Be(1);
result.GetProperty("severity").Should().Be("warning");
}
[TestMethod]
public void AndroidStudioConverter_ConvertToSarifResult_MultiplePropertiesArePersisted()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.AttributeKey = "key";
builder.Severity = "warning";
var uut = new AndroidStudioProblem(builder);
Result result = new AndroidStudioConverter().ConvertProblemToSarifResult(uut);
result.PropertyNames.Count.Should().Be(2);
result.GetProperty("severity").Should().Be("warning");
result.GetProperty("attributeKey").Should().Be("key");
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_RecordsTopLevelFileAsSourceFile()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = "expected_file.java";
builder.EntryPointType = "file";
builder.EntryPointName = "bad_file.java";
Location loc = GetLocationInfoForBuilder(builder).Location;
loc.ResultFile.Uri.ToString().Should().Be("expected_file.java");
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_DoesNotRecordTopLevelEntryPointAsSourceFile()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = null;
builder.EntryPointType = "file";
builder.EntryPointName = "expected_file.java";
Location loc = GetLocationInfoForBuilder(builder).Location;
loc.ResultFile.Should().BeNull();
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_RecordsModuleAsTopLevelIfPresent()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = null;
builder.Module = "my_fancy_binary";
builder.EntryPointType = "method";
builder.EntryPointName = "my_method";
var expectedLocation = new Location
{
FullyQualifiedLogicalName = "my_fancy_binary\\my_method",
};
var expectedLogicalLocations = new Dictionary<string, LogicalLocation>
{
{
"my_fancy_binary", new LogicalLocation { ParentKey = null, Name = "my_fancy_binary", Kind = LogicalLocationKind.Module }
},
{
@"my_fancy_binary\my_method",
new LogicalLocation { ParentKey = "my_fancy_binary", Name = "my_method", Kind = LogicalLocationKind.Member }
},
};
var converter = new AndroidStudioConverter();
Result result = converter.ConvertProblemToSarifResult(new AndroidStudioProblem(builder));
result.Locations[0].ValueEquals(expectedLocation).Should().BeTrue();
foreach (string key in expectedLogicalLocations.Keys)
{
expectedLogicalLocations[key].ValueEquals(converter.LogicalLocationsDictionary[key]).Should().BeTrue();
}
converter.LogicalLocationsDictionary.Count.Should().Be(expectedLogicalLocations.Count);
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_GeneratesLocationWithOnlyMethodEntryPoint()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = null;
builder.Package = null;
builder.Module = null;
builder.EntryPointType = "method";
builder.EntryPointName = "my_method";
var expectedLocation = new Location
{
FullyQualifiedLogicalName = "my_method"
};
var expectedLogicalLocations = new Dictionary<string, LogicalLocation>
{
{
"my_method", new LogicalLocation { ParentKey = null, Name = "my_method", Kind = LogicalLocationKind.Member }
},
};
var converter = new AndroidStudioConverter();
Result result = converter.ConvertProblemToSarifResult(new AndroidStudioProblem(builder));
result.Locations[0].ValueEquals(expectedLocation).Should().BeTrue();
foreach (string key in expectedLogicalLocations.Keys)
{
expectedLogicalLocations[key].ValueEquals(converter.LogicalLocationsDictionary[key]).Should().BeTrue();
}
converter.LogicalLocationsDictionary.Count.Should().Be(expectedLogicalLocations.Count);
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_GeneratesLocationWithMethodEntryPointAndPackage()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = null;
builder.Package = "FancyPackageName";
builder.Module = null;
builder.EntryPointType = "method";
builder.EntryPointName = "my_method";
var expectedLocation = new Location
{
FullyQualifiedLogicalName = "FancyPackageName\\my_method"
};
var expectedLogicalLocations = new Dictionary<string, LogicalLocation>
{
{
"FancyPackageName", new LogicalLocation { ParentKey = null, Name = "FancyPackageName", Kind = LogicalLocationKind.Package }
},
{
@"FancyPackageName\my_method", new LogicalLocation { ParentKey = "FancyPackageName", Name = "my_method", Kind = LogicalLocationKind.Member }
},
};
var converter = new AndroidStudioConverter();
Result result = converter.ConvertProblemToSarifResult(new AndroidStudioProblem(builder));
result.Locations[0].ValueEquals(expectedLocation).Should().BeTrue();
foreach (string key in expectedLogicalLocations.Keys)
{
expectedLogicalLocations[key].ValueEquals(converter.LogicalLocationsDictionary[key]).Should().BeTrue();
}
converter.LogicalLocationsDictionary.Count.Should().Be(expectedLogicalLocations.Count);
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_GeneratesLocationWithOnlyPackage()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = null;
builder.Package = "FancyPackageName";
builder.Module = null;
builder.EntryPointName = null;
var expectedLocation = new Location
{
FullyQualifiedLogicalName = "FancyPackageName"
};
var expectedLogicalLocations = new Dictionary<string, LogicalLocation>
{
{
"FancyPackageName", new LogicalLocation { ParentKey = null, Name = "FancyPackageName", Kind = LogicalLocationKind.Package }
}
};
var converter = new AndroidStudioConverter();
Result result = converter.ConvertProblemToSarifResult(new AndroidStudioProblem(builder));
result.Locations[0].ValueEquals(expectedLocation).Should().BeTrue();
foreach (string key in expectedLogicalLocations.Keys)
{
expectedLogicalLocations[key].ValueEquals(converter.LogicalLocationsDictionary[key]).Should().BeTrue();
}
converter.LogicalLocationsDictionary.Count.Should().Be(expectedLogicalLocations.Count);
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_CanRecordSourceFileAndModule()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = "File Goes Here";
builder.Package = null;
builder.Module = "LastResortModule";
builder.EntryPointName = null;
var expectedLocation = new Location
{
ResultFile = new PhysicalLocation
{
Uri = new Uri("File Goes Here", UriKind.RelativeOrAbsolute),
},
FullyQualifiedLogicalName = "LastResortModule"
};
var expectedLogicalLocations = new Dictionary<string, LogicalLocation>
{
{
"LastResortModule", new LogicalLocation { ParentKey = null, Name = "LastResortModule", Kind = LogicalLocationKind.Module }
}
};
var converter = new AndroidStudioConverter();
Result result = converter.ConvertProblemToSarifResult(new AndroidStudioProblem(builder));
result.Locations[0].ValueEquals(expectedLocation).Should().BeTrue();
foreach (string key in expectedLogicalLocations.Keys)
{
expectedLogicalLocations[key].ValueEquals(converter.LogicalLocationsDictionary[key]).Should().BeTrue();
}
converter.LogicalLocationsDictionary.Count.Should().Be(expectedLogicalLocations.Count);
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_RemovesProjectDirPrefix()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.File = "file://$PROJECT_DIR$/mydir/myfile.xml";
LocationInfo locationInfo = GetLocationInfoForBuilder(builder);
locationInfo.Location.ResultFile.Uri.ToString().Should().Be("mydir/myfile.xml");
}
[TestMethod]
public void AndroidStudioConverter_ConvertSarifResult_PersistsSourceLineInfo()
{
var builder = AndroidStudioProblemTests.GetDefaultProblemBuilder();
builder.Line = 42;
LocationInfo locationInfo = GetLocationInfoForBuilder(builder);
locationInfo.Location.ResultFile.Region.StartLine.Should().Be(42);
}
private struct LocationInfo
{
public Location Location;
public LogicalLocation LogicalLocation;
}
private static LocationInfo GetLocationInfoForBuilder(AndroidStudioProblem.Builder builder)
{
var converter = new AndroidStudioConverter();
Result result = converter.ConvertProblemToSarifResult(new AndroidStudioProblem(builder));
Location location = result.Locations.First();
string logicalLocationKey = converter.LogicalLocationsDictionary.Keys.SingleOrDefault();
LogicalLocation logicalLocation = logicalLocationKey != null
? converter.LogicalLocationsDictionary[logicalLocationKey]
: null;
return new LocationInfo
{
Location = location,
LogicalLocation = logicalLocation
};
}
}
}
| 1 | 10,873 | These should use the constants defined in JsonTests.cs | microsoft-sarif-sdk | .cs |
@@ -67,7 +67,7 @@ function DashboardClicksWidget() {
if ( error ) {
trackEvent( 'plugin_setup', 'search_console_error', error.message );
- return getDataErrorComponent( __( 'Search Console', 'google-site-kit' ), error.message );
+ return getDataErrorComponent( 'search-console', error );
}
if ( ! data || ! data.length ) { | 1 | /**
* DashboardClicksWidget component.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { __, _x } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import { STORE_NAME } from '../../datastore/constants';
import { STORE_NAME as CORE_SITE } from '../../../../googlesitekit/datastore/site/constants';
import { STORE_NAME as CORE_USER } from '../../../../googlesitekit/datastore/user/constants';
import { extractForSparkline, getSiteKitAdminURL } from '../../../../util';
import { trackEvent } from '../../../../util/tracking';
import { extractSearchConsoleDashboardData } from '../../util';
import whenActive from '../../../../util/when-active';
import DataBlock from '../../../../components/data-block';
import Sparkline from '../../../../components/sparkline';
import PreviewBlock from '../../../../components/preview-block';
import getDataErrorComponent from '../../../../components/notifications/data-error';
import getNoDataComponent from '../../../../components/notifications/nodata';
const { useSelect } = Data;
function DashboardClicksWidget() {
const { data, error, loading } = useSelect( ( select ) => {
const store = select( STORE_NAME );
const args = {
dimensions: 'date',
compareDateRanges: true,
dateRange: select( CORE_USER ).getDateRange(),
};
const url = select( CORE_SITE ).getCurrentEntityURL();
if ( url ) {
args.url = url;
}
return {
data: store.getReport( args ),
error: store.getErrorForSelector( 'getReport', [ args ] ),
loading: store.isResolving( 'getReport', [ args ] ),
};
} );
if ( loading ) {
return <PreviewBlock width="100%" height="202px" />;
}
if ( error ) {
trackEvent( 'plugin_setup', 'search_console_error', error.message );
return getDataErrorComponent( __( 'Search Console', 'google-site-kit' ), error.message );
}
if ( ! data || ! data.length ) {
return getNoDataComponent( __( 'Search Console', 'google-site-kit' ) );
}
const href = getSiteKitAdminURL( 'googlesitekit-module-search-console', {} );
const { totalClicks, totalClicksChange, dataMap } = extractSearchConsoleDashboardData( data );
return (
<div className="mdc-layout-grid__cell mdc-layout-grid__cell--align-bottom mdc-layout-grid__cell--span-2-phone mdc-layout-grid__cell--span-2-tablet mdc-layout-grid__cell--span-3-desktop">
<DataBlock
className="overview-total-clicks"
title={ __( 'Clicks', 'google-site-kit' ) }
datapoint={ totalClicks }
change={ totalClicksChange }
changeDataUnit="%"
source={ {
name: _x( 'Search Console', 'Service name', 'google-site-kit' ),
link: href,
} }
sparkline={
<Sparkline
data={ extractForSparkline( dataMap, 1 ) }
change={ totalClicksChange }
/>
}
/>
</div>
);
}
export default whenActive( { moduleName: 'search-console' } )( DashboardClicksWidget );
| 1 | 31,967 | Kind of unrelated to this PR, but let's update this to `getDataErrorComponent( 'search-console', error.message, false, false, false, error )` so that everything is passed as expected. | google-site-kit-wp | js |
@@ -38,6 +38,15 @@ class LoadImageFromFile(object):
self.file_client = None
def __call__(self, results):
+ """Call functions to load image and get image meta information.
+
+ Args:
+ results (dict): Result dict from :obj:`dataset`.
+
+ Returns:
+ dict: The dict contains loaded image and meta information.
+ """
+
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
| 1 | import os.path as osp
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmdet.core import BitmapMasks, PolygonMasks
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes()`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
results['img_fields'] = ['img']
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadMultiChannelImageFromFiles(object):
"""Load multi-channel images from a list of separate channel files.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename", which is expected to be a list of filenames).
Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes()`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='unchanged',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = [
osp.join(results['img_prefix'], fname)
for fname in results['img_info']['filename']
]
else:
filename = results['img_info']['filename']
img = []
for name in filename:
img_bytes = self.file_client.get(name)
img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
img = np.stack(img, axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadAnnotations(object):
"""Load annotations.
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Default: True.
with_label (bool): Whether to parse and load the label annotation.
Default: True.
with_mask (bool): Whether to parse and load the mask annotation.
Default: False.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Default: False.
poly2mask (bool): Whether to convert the instance masks from polygons
to bitmaps. Default: True.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
poly2mask=True,
file_client_args=dict(backend='disk')):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.file_client_args = file_client_args.copy()
self.file_client = None
def _load_bboxes(self, results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
results['gt_labels'] = results['ann_info']['labels'].copy()
return results
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_polygons(self, polygons):
"""Convert polygons to list of ndarray and filter invalid polygons.
Args:
polygons (list[list]): polygons of one instance.
Returns:
list[ndarray]: processed polygons.
"""
polygons = [np.array(p) for p in polygons]
valid_polygons = []
for polygon in polygons:
if len(polygon) % 2 == 0 and len(polygon) >= 6:
valid_polygons.append(polygon)
return valid_polygons
def _load_masks(self, results):
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if self.poly2mask:
gt_masks = BitmapMasks(
[self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
else:
gt_masks = PolygonMasks(
[self.process_polygons(polygons) for polygons in gt_masks], h,
w)
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
return results
def _load_semantic_seg(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
img_bytes = self.file_client.get(filename)
results['gt_semantic_seg'] = mmcv.imfrombytes(
img_bytes, flag='unchanged').squeeze()
results['seg_fields'].append('gt_semantic_seg')
return results
def __call__(self, results):
if self.with_bbox:
results = self._load_bboxes(results)
if results is None:
return None
if self.with_label:
results = self._load_labels(results)
if self.with_mask:
results = self._load_masks(results)
if self.with_seg:
results = self._load_semantic_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(with_bbox={self.with_bbox}, '
repr_str += f'with_label={self.with_label}, '
repr_str += f'with_mask={self.with_mask}, '
repr_str += f'with_seg={self.with_seg})'
repr_str += f'poly2mask={self.poly2mask})'
repr_str += f'poly2mask={self.file_client_args})'
return repr_str
@PIPELINES.register_module()
class LoadProposals(object):
def __init__(self, num_max_proposals=None):
self.num_max_proposals = num_max_proposals
def __call__(self, results):
proposals = results['proposals']
if proposals.shape[1] not in (4, 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
f'but found {proposals.shape}')
proposals = proposals[:, :4]
if self.num_max_proposals is not None:
proposals = proposals[:self.num_max_proposals]
if len(proposals) == 0:
proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
results['proposals'] = proposals
results['bbox_fields'].append('proposals')
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(num_max_proposals={self.num_max_proposals})'
| 1 | 20,549 | :obj:\`dataset\` cannot be correctly rendered | open-mmlab-mmdetection | py |
@@ -28,4 +28,10 @@ module Blacklight::User
def existing_bookmark_for(document)
bookmarks_for_documents([document]).first
end
+
+ ##
+ # @return [String] a user-displayable login/identifier for the user account
+ def to_s
+ email
+ end
end | 1 | # frozen_string_literal: true
module Blacklight::User
# This gives us an is_blacklight_user method that can be included in
# the containing applications models.
# SEE ALSO: The /lib/blacklight/engine.rb class for how when this
# is injected into the hosting application through ActiveRecord::Base extend
def self.included(base)
return unless base.respond_to? :has_many
base.send :has_many, :bookmarks, dependent: :destroy, as: :user
base.send :has_many, :searches, dependent: :destroy, as: :user
end
def bookmarks_for_documents documents = []
if documents.any?
bookmarks.where(document_type: documents.first.class.base_class.to_s, document_id: documents.map(&:id))
else
[]
end
end
def document_is_bookmarked?(document)
bookmarks_for_documents([document]).any?
end
# returns a Bookmark object if there is one for document_id, else
# nil.
def existing_bookmark_for(document)
bookmarks_for_documents([document]).first
end
end
| 1 | 8,269 | I think `email` is something we get from devise, and I think the goal of putting it in the generator was not to tie others to that particular implementation? | projectblacklight-blacklight | rb |
@@ -108,15 +108,16 @@ func NewBatchSpanProcessor(e export.SpanBatcher, opts ...BatchSpanProcessorOptio
bsp.stopWait.Add(1)
go func(ctx context.Context) {
defer ticker.Stop()
+ batch := make([]*export.SpanData, 0, bsp.o.MaxExportBatchSize)
for {
select {
case <-bsp.stopCh:
- bsp.processQueue()
+ bsp.processQueue(&batch)
close(bsp.queue)
bsp.stopWait.Done()
return
case <-ticker.C:
- bsp.processQueue()
+ bsp.processQueue(&batch)
}
}
}(context.Background()) | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"errors"
"sync"
"sync/atomic"
"time"
export "go.opentelemetry.io/otel/sdk/export/trace"
)
const (
defaultMaxQueueSize = 2048
defaultScheduledDelay = 5000 * time.Millisecond
defaultMaxExportBatchSize = 512
)
var (
errNilExporter = errors.New("exporter is nil")
)
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
type BatchSpanProcessorOptions struct {
// MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
// queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
// The default value of MaxQueueSize is 2048.
MaxQueueSize int
// ScheduledDelayMillis is the delay interval in milliseconds between two consecutive
// processing of batches.
// The default value of ScheduledDelayMillis is 5000 msec.
ScheduledDelayMillis time.Duration
// MaxExportBatchSize is the maximum number of spans to process in a single batch.
// If there are more than one batch worth of spans then it processes multiple batches
// of spans one batch after the other without any delay.
// The default value of MaxExportBatchSize is 512.
MaxExportBatchSize int
// BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
// AND if BlockOnQueueFull is set to true.
// Blocking option should be used carefully as it can severely affect the performance of an
// application.
BlockOnQueueFull bool
}
// BatchSpanProcessor implements SpanProcessor interfaces. It is used by
// exporters to receive export.SpanData asynchronously.
// Use BatchSpanProcessorOptions to change the behavior of the processor.
type BatchSpanProcessor struct {
e export.SpanBatcher
o BatchSpanProcessorOptions
queue chan *export.SpanData
dropped uint32
stopWait sync.WaitGroup
stopOnce sync.Once
stopCh chan struct{}
}
var _ SpanProcessor = (*BatchSpanProcessor)(nil)
// NewBatchSpanProcessor creates a new instance of BatchSpanProcessor
// for a given export. It returns an error if exporter is nil.
// The newly created BatchSpanProcessor should then be registered with sdk
// using RegisterSpanProcessor.
func NewBatchSpanProcessor(e export.SpanBatcher, opts ...BatchSpanProcessorOption) (*BatchSpanProcessor, error) {
if e == nil {
return nil, errNilExporter
}
o := BatchSpanProcessorOptions{
ScheduledDelayMillis: defaultScheduledDelay,
MaxQueueSize: defaultMaxQueueSize,
MaxExportBatchSize: defaultMaxExportBatchSize,
}
for _, opt := range opts {
opt(&o)
}
bsp := &BatchSpanProcessor{
e: e,
o: o,
}
bsp.queue = make(chan *export.SpanData, bsp.o.MaxQueueSize)
bsp.stopCh = make(chan struct{})
//Start timer to export metrics
ticker := time.NewTicker(bsp.o.ScheduledDelayMillis)
bsp.stopWait.Add(1)
go func(ctx context.Context) {
defer ticker.Stop()
for {
select {
case <-bsp.stopCh:
bsp.processQueue()
close(bsp.queue)
bsp.stopWait.Done()
return
case <-ticker.C:
bsp.processQueue()
}
}
}(context.Background())
return bsp, nil
}
// OnStart method does nothing.
func (bsp *BatchSpanProcessor) OnStart(sd *export.SpanData) {
}
// OnEnd method enqueues export.SpanData for later processing.
func (bsp *BatchSpanProcessor) OnEnd(sd *export.SpanData) {
bsp.enqueue(sd)
}
// Shutdown flushes the queue and waits until all spans are processed.
// It only executes once. Subsequent call does nothing.
func (bsp *BatchSpanProcessor) Shutdown() {
bsp.stopOnce.Do(func() {
close(bsp.stopCh)
bsp.stopWait.Wait()
})
}
func WithMaxQueueSize(size int) BatchSpanProcessorOption {
return func(o *BatchSpanProcessorOptions) {
o.MaxQueueSize = size
}
}
func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
return func(o *BatchSpanProcessorOptions) {
o.MaxExportBatchSize = size
}
}
func WithScheduleDelayMillis(delay time.Duration) BatchSpanProcessorOption {
return func(o *BatchSpanProcessorOptions) {
o.ScheduledDelayMillis = delay
}
}
func WithBlocking() BatchSpanProcessorOption {
return func(o *BatchSpanProcessorOptions) {
o.BlockOnQueueFull = true
}
}
func (bsp *BatchSpanProcessor) processQueue() {
batch := make([]*export.SpanData, 0, bsp.o.MaxExportBatchSize)
for {
var sd *export.SpanData
var ok bool
select {
case sd = <-bsp.queue:
if sd != nil && sd.SpanContext.IsSampled() {
batch = append(batch, sd)
}
ok = true
default:
ok = false
}
if ok {
if len(batch) >= bsp.o.MaxExportBatchSize {
bsp.e.ExportSpans(context.Background(), batch)
batch = batch[:0]
}
} else {
if len(batch) > 0 {
bsp.e.ExportSpans(context.Background(), batch)
}
break
}
}
}
func (bsp *BatchSpanProcessor) enqueue(sd *export.SpanData) {
if bsp.o.BlockOnQueueFull {
bsp.queue <- sd
} else {
var ok bool
select {
case bsp.queue <- sd:
ok = true
default:
ok = false
}
if !ok {
atomic.AddUint32(&bsp.dropped, 1)
}
}
}
| 1 | 11,709 | If the span producers pushing to the queue do so faster than the this can drain, it will cause this to hang. I'm guessing we can update the `enqueue` method to check if the `stopCh` is closed and not send any more spans while this flushes what has already been pushed. | open-telemetry-opentelemetry-go | go |
@@ -106,9 +106,9 @@ abstract class Gallery implements GalleryInterface
return $this->defaultFormat;
}
- final public function setGalleryItems(Collection $galleryItems): void
+ final public function setGalleryItems(iterable $galleryItems): void
{
- $this->galleryItems = new ArrayCollection();
+ $this->galleryItems->clear();
foreach ($galleryItems as $galleryItem) {
$this->addGalleryItem($galleryItem); | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Model;
use Doctrine\Common\Collections\ArrayCollection;
use Doctrine\Common\Collections\Collection;
use Sonata\MediaBundle\Provider\MediaProviderInterface;
abstract class Gallery implements GalleryInterface
{
protected ?string $context = null;
protected ?string $name = null;
protected bool $enabled = false;
protected ?\DateTimeInterface $updatedAt = null;
protected ?\DateTimeInterface $createdAt = null;
protected string $defaultFormat = MediaProviderInterface::FORMAT_REFERENCE;
/**
* @var Collection<int, GalleryItemInterface>
*/
protected Collection $galleryItems;
public function __construct()
{
$this->galleryItems = new ArrayCollection();
}
public function __toString(): string
{
return $this->getName() ?? '-';
}
final public function setName(?string $name): void
{
$this->name = $name;
}
final public function getName(): ?string
{
return $this->name;
}
final public function setContext(?string $context): void
{
$this->context = $context;
}
final public function getContext(): ?string
{
return $this->context;
}
final public function setEnabled(bool $enabled): void
{
$this->enabled = $enabled;
}
final public function getEnabled(): bool
{
return $this->enabled;
}
final public function setUpdatedAt(?\DateTimeInterface $updatedAt): void
{
$this->updatedAt = $updatedAt;
}
final public function getUpdatedAt(): ?\DateTimeInterface
{
return $this->updatedAt;
}
final public function setCreatedAt(?\DateTimeInterface $createdAt): void
{
$this->createdAt = $createdAt;
}
final public function getCreatedAt(): ?\DateTimeInterface
{
return $this->createdAt;
}
final public function setDefaultFormat(string $defaultFormat): void
{
$this->defaultFormat = $defaultFormat;
}
final public function getDefaultFormat(): string
{
return $this->defaultFormat;
}
final public function setGalleryItems(Collection $galleryItems): void
{
$this->galleryItems = new ArrayCollection();
foreach ($galleryItems as $galleryItem) {
$this->addGalleryItem($galleryItem);
}
}
final public function getGalleryItems(): Collection
{
return $this->galleryItems;
}
final public function addGalleryItem(GalleryItemInterface $galleryItem): void
{
$galleryItem->setGallery($this);
$this->galleryItems[] = $galleryItem;
}
final public function removeGalleryItem(GalleryItemInterface $galleryItem): void
{
if ($this->galleryItems->contains($galleryItem)) {
$this->galleryItems->removeElement($galleryItem);
}
}
final public function reorderGalleryItems(): void
{
$iterator = $this->getGalleryItems()->getIterator();
if (!$iterator instanceof \ArrayIterator) {
throw new \RuntimeException(sprintf(
'The gallery %s cannot be reordered, $galleryItems should implement %s',
$this->getId() ?? '',
\ArrayIterator::class
));
}
$iterator->uasort(static function (GalleryItemInterface $a, GalleryItemInterface $b): int {
return $a->getPosition() <=> $b->getPosition();
});
$this->setGalleryItems(new ArrayCollection(iterator_to_array($iterator)));
}
}
| 1 | 12,846 | Not sure about that, an array is iterable right? But if I pass array, that clear method wont work | sonata-project-SonataMediaBundle | php |
@@ -6,9 +6,14 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "sync"
"testing"
- "github.com/spiffe/sri/pkg/server/ca"
+ "github.com/spiffe/go-spiffe"
+ iface "github.com/spiffe/sri/pkg/common/plugin"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
| 1 | package main
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"testing"
"github.com/spiffe/sri/pkg/server/ca"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
upca "github.com/spiffe/sri/plugin/server/upstreamca-memory/pkg"
)
func TestMemory_Configure(t *testing.T) {
t.SkipNow()
}
func TestMemory_GetPluginInfo(t *testing.T) {
m := createDefault(t)
res, err := m.GetPluginInfo()
require.NoError(t, err)
assert.NotNil(t, res)
}
func TestMemory_GenerateCsr(t *testing.T) {
m := createDefault(t)
csr, err := m.GenerateCsr()
require.NoError(t, err)
assert.NotEmpty(t, csr)
}
func TestMemory_FetchCertificate(t *testing.T) {
m := createDefault(t)
cert, err := m.FetchCertificate()
require.NoError(t, err)
assert.Empty(t, cert)
}
func TestMemory_bootstrap(t *testing.T) {
m := createDefault(t)
ca, err := upca.NewWithDefault()
require.NoError(t, err)
csr, err := m.GenerateCsr()
require.NoError(t, err)
cresp, err := ca.SubmitCSR(csr)
require.NoError(t, err)
err = m.LoadCertificate(cresp.Cert)
require.NoError(t, err)
lcert, err := m.FetchCertificate()
require.NoError(t, err)
assert.Equal(t, cresp.Cert, lcert)
wcsr := createWorkloadCSR(t)
wcert, err := m.SignCsr(wcsr)
require.NoError(t, err)
assert.NotEmpty(t, wcert)
}
func createDefault(t *testing.T) ca.ControlPlaneCa {
m, err := NewWithDefault()
require.NoError(t, err)
return m
}
func createWorkloadCSR(t *testing.T) []byte {
keysz := 1024
key, err := rsa.GenerateKey(rand.Reader, keysz)
require.NoError(t, err)
template := x509.CertificateRequest{
Subject: pkix.Name{
Country: []string{"US"},
Organization: []string{"SPIFFE"},
CommonName: "workload",
},
SignatureAlgorithm: x509.SHA256WithRSA,
}
csr, err := x509.CreateCertificateRequest(rand.Reader, &template, key)
require.NoError(t, err)
return pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csr,
})
}
| 1 | 8,277 | make sure the config changes stuck. are there any invalid config values? maybe write tests around empty `trust_domain`, negative/missing `ttl`, invalid `key_size`, etc... | spiffe-spire | go |
@@ -5,7 +5,7 @@ var WebDriver = require('selenium-webdriver');
module.exports = function(grunt) {
/**
- * Keep injecting scripts until window.mochaResults is set
+ * Keep injecting scripts until `window.__mochaResult__` is set
*/
function collectTestResults(driver) {
// inject a script that waits half a second | 1 | /*global window */
'use strict';
var WebDriver = require('selenium-webdriver');
module.exports = function(grunt) {
/**
* Keep injecting scripts until window.mochaResults is set
*/
function collectTestResults(driver) {
// inject a script that waits half a second
return driver
.executeAsyncScript(function() {
var callback = arguments[arguments.length - 1];
setTimeout(function() {
// return the mocha results (or undefined if not finished)
callback(window.mochaResults);
}, 500);
})
.then(function(result) {
// If there are no results, listen a little longer
if (!result) {
return collectTestResults(driver);
// if there are, return them
} else {
return Promise.resolve(result);
}
});
}
/**
* Test each URL
*/
function runTestUrls(driver, isMobile, urls, errors) {
var url = urls.shift();
errors = errors || [];
return (
driver
.get(url)
// Get results
.then(function() {
let driverBrowser = driver
.getCapabilities()
.then(capabilities => capabilities.get('browserName'));
return Promise.all([driverBrowser, collectTestResults(driver)]);
})
// And process them
.then(function([browser, result]) {
grunt.log.writeln(url + ' [' + browser + ']');
// Remember the errors
(result.reports || []).forEach(function(err) {
grunt.log.error(err.message);
err.url = url;
err.browser = browser;
errors.push(err);
});
// Log the result of the page tests
grunt.log[result.failures ? 'error' : 'ok'](
'passes: ' +
result.passes +
', ' +
'failures: ' +
result.failures +
', ' +
'duration: ' +
result.duration / 1000 +
's'
);
grunt.log.writeln();
})
.then(function() {
// Start the next job, if any
if (urls.length > 0) {
return runTestUrls(driver, isMobile, urls, errors);
} else {
driver.quit();
return Promise.resolve(errors);
}
})
);
}
/*
* Build web driver depends whether REMOTE_SELENIUM_URL is set
*/
function buildWebDriver(browser) {
var webdriver, capabilities;
var mobileBrowser = browser.split('-mobile');
if (mobileBrowser.length > 1) {
browser = mobileBrowser[0];
capabilities = {
browserName: mobileBrowser[0],
chromeOptions: {
mobileEmulation: {
deviceMetrics: {
width: 320,
height: 568,
pixelRatio: 2
}
}
}
};
}
if (process.env.REMOTE_SELENIUM_URL) {
webdriver = new WebDriver.Builder()
.forBrowser(browser)
.withCapabilities(capabilities)
.usingServer(process.env.REMOTE_SELENIUM_URL)
.build();
} else {
webdriver = new WebDriver.Builder()
.withCapabilities(capabilities)
.forBrowser(browser)
.build();
}
return {
driver: webdriver,
isMobile: mobileBrowser.length > 1
};
}
/**
* Run all tests in a browser using webdriver
*/
grunt.registerMultiTask(
'test-webdriver',
'Task for launching Webdriver with options and running tests against options URLs',
function() {
var driver;
var isMobile = false;
var done = this.async();
var options = this.options({
browser: 'firefox'
});
// yes, really, and this isn't documented anywhere either.
options.browser =
options.browser === 'edge' ? 'MicrosoftEdge' : options.browser;
if (
(process.platform === 'win32' && options.browser === 'safari') ||
(process.platform === 'darwin' &&
['ie', 'MicrosoftEdge'].indexOf(options.browser) !== -1) ||
((process.platform === 'linux' || process.env.REMOTE_SELENIUM_URL) &&
['ie', 'MicrosoftEdge', 'safari'].indexOf(options.browser) !== -1)
) {
grunt.log.writeln();
grunt.log.writeln(
'Skipped ' +
options.browser +
' as it is not supported on this platform'
);
return done();
}
// try to load the browser
try {
var webDriver = buildWebDriver(options.browser);
driver = webDriver.driver;
isMobile = webDriver.isMobile;
// If load fails, warn user and move to the next task
} catch (err) {
grunt.log.writeln();
grunt.log.error(err.message);
grunt.log.writeln('Aborted testing using ' + options.browser);
return done();
}
// Give driver timeout options for scripts
driver
.manage()
.timeouts()
.setScriptTimeout(!isMobile ? 60000 * 5 : 60000 * 10);
// allow to wait for page load implicitly
driver
.manage()
.timeouts()
.implicitlyWait(50000);
// Test all pages
runTestUrls(driver, isMobile, options.urls)
.then(function(testErrors) {
// log each error and abort
testErrors.forEach(function(err) {
grunt.log.writeln();
grunt.log.error('URL: ' + err.url);
grunt.log.error('Browser: ' + err.browser);
grunt.log.error('Describe: ' + err.titles.join(' > '));
grunt.log.error('it ' + err.name);
grunt.log.error(err.stack);
grunt.log.writeln();
});
// Return the success to Grunt
done(testErrors.length === 0);
// catch any potential problems
})
.catch(function(err) {
grunt.log.error(err);
done(false);
});
}
);
};
| 1 | 14,575 | Why are you changing this? | dequelabs-axe-core | js |
@@ -29,6 +29,10 @@ module Ncr
validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true
validates :vendor, presence: true
validates :building_number, presence: true
+ validates :rwa_number, format: {
+ with: /[a-zA-Z][0-9]{7}/,
+ message: "one letter followed by 7 numbers"
+ }, allow_blank: true
# TODO validates :proposal, presence: true
def set_defaults | 1 | module Ncr
# Make sure all table names use 'ncr_XXX'
def self.table_name_prefix
'ncr_'
end
DATA = YAML.load_file("#{Rails.root}/config/data/ncr.yaml")
EXPENSE_TYPES = %w(BA61 BA80)
BUILDING_NUMBERS = DATA['BUILDING_NUMBERS']
OFFICES = DATA['OFFICES']
class WorkOrder < ActiveRecord::Base
include ObservableModel
# TODO include ProposalDelegate
has_one :proposal, as: :client_data
# TODO remove the dependence
has_one :cart, through: :proposal
after_initialize :set_defaults
# @TODO: use integer number of cents to avoid floating point issues
validates :amount, numericality: {
greater_than_or_equal_to: 0,
less_than_or_equal_to: 3000
}
validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true
validates :vendor, presence: true
validates :building_number, presence: true
# TODO validates :proposal, presence: true
def set_defaults
self.not_to_exceed ||= false
self.emergency ||= false
end
# @todo - this is an awkward dance due to the lingering Cart model. Remove
# that dependence
def init_and_save_cart(approver_email, requester)
cart = Cart.create(
proposal_attributes: {flow: 'linear', client_data: self,
requester: requester}
)
self.add_approvals(approver_email)
Dispatcher.deliver_new_proposal_emails(proposal)
cart
end
# A requester can change his/her approving official
def update_approver(approver_email)
first_approval = self.proposal.approvals.first
if first_approval.user_email_address != approver_email
first_approval.destroy
replacement = self.proposal.add_approver(approver_email)
replacement.move_to_top
Dispatcher.email_approver(replacement)
end
end
def add_approvals(approver_email)
emails = [approver_email] + self.system_approvers
if self.emergency
emails.each {|email| self.proposal.add_observer(email) }
# skip state machine
self.proposal.update_attribute(:status, 'approved')
else
emails.each {|email| self.proposal.add_approver(email) }
end
end
# Ignore values in certain fields if they aren't relevant. May want to
# split these into different models
def self.relevant_fields(expense_type)
fields = [:description, :amount, :expense_type, :vendor, :not_to_exceed,
:building_number, :office]
case expense_type
when "BA61"
fields + [:emergency]
when "BA80"
fields + [:rwa_number, :code]
end
end
def relevant_fields
Ncr::WorkOrder.relevant_fields(self.expense_type)
end
# Methods for Client Data interface
def fields_for_display
attributes = self.relevant_fields
attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]}
end
def client
"ncr"
end
def public_identifier
"FY" + self.fiscal_year.to_s.rjust(2, "0") + "-#{self.id}"
end
def total_price
self.amount || 0.0
end
# may be replaced with paper-trail or similar at some point
def version
self.updated_at.to_i
end
def name
self.project_title
end
protected
def fiscal_year
year = self.created_at.year
if self.created_at.month >= 10
year += 1
end
year % 100 # convert to two-digit
end
def system_approvers
if self.expense_type == 'BA61'
[
ENV["NCR_BA61_TIER1_BUDGET_MAILBOX"] || '[email protected]',
ENV["NCR_BA61_TIER2_BUDGET_MAILBOX"] || '[email protected]'
]
else
[ENV["NCR_BA80_BUDGET_MAILBOX"] || '[email protected]']
end
end
end
end
| 1 | 13,041 | @phirefly Can we look at a list of RWAs, or ask someone to double-check that this format is correct? Otherwise :shipit: | 18F-C2 | rb |
@@ -72,6 +72,7 @@ public class Spark3BinPackStrategy extends BinPackStrategy {
.format("iceberg")
.option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, groupID)
.option(SparkWriteOptions.TARGET_FILE_SIZE_BYTES, writeMaxFileSize())
+ .option(SparkWriteOptions.DISTRIBUTION_MODE, "none")
.mode("append")
.save(table.name());
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.actions;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.Table;
import org.apache.iceberg.actions.BinPackStrategy;
import org.apache.iceberg.spark.FileRewriteCoordinator;
import org.apache.iceberg.spark.FileScanTaskSetManager;
import org.apache.iceberg.spark.SparkReadOptions;
import org.apache.iceberg.spark.SparkWriteOptions;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.internal.SQLConf;
public class Spark3BinPackStrategy extends BinPackStrategy {
private final Table table;
private final SparkSession spark;
private final FileScanTaskSetManager manager = FileScanTaskSetManager.get();
private final FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get();
public Spark3BinPackStrategy(Table table, SparkSession spark) {
this.table = table;
this.spark = spark;
}
@Override
public Table table() {
return table;
}
@Override
public Set<DataFile> rewriteFiles(List<FileScanTask> filesToRewrite) {
String groupID = UUID.randomUUID().toString();
try {
manager.stageTasks(table, groupID, filesToRewrite);
// Disable Adaptive Query Execution as this may change the output partitioning of our write
SparkSession cloneSession = spark.cloneSession();
cloneSession.conf().set(SQLConf.ADAPTIVE_EXECUTION_ENABLED().key(), false);
Dataset<Row> scanDF = cloneSession.read().format("iceberg")
.option(SparkReadOptions.FILE_SCAN_TASK_SET_ID, groupID)
.option(SparkReadOptions.SPLIT_SIZE, splitSize(inputFileSize(filesToRewrite)))
.option(SparkReadOptions.FILE_OPEN_COST, "0")
.load(table.name());
// write the packed data into new files where each split becomes a new file
scanDF.write()
.format("iceberg")
.option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, groupID)
.option(SparkWriteOptions.TARGET_FILE_SIZE_BYTES, writeMaxFileSize())
.mode("append")
.save(table.name());
return rewriteCoordinator.fetchNewDataFiles(table, groupID);
} finally {
manager.removeTasks(table, groupID);
rewriteCoordinator.clearRewrite(table, groupID);
}
}
}
| 1 | 45,576 | Still request a local sort for bin-packing based on the defined table sort order. | apache-iceberg | java |
@@ -151,13 +151,16 @@ def request_model(rank, itr, lmbda, alpha):
@click.option("--days", type=int, default=7, help="Request recommendations to be generated on history of given number of days")
@click.option("--top", type=int, default=20, help="Calculate given number of top artist.")
@click.option("--similar", type=int, default=20, help="Calculate given number of similar artist.")
-def request_candidate_sets(days, top, similar):
[email protected]("--user-id", "users", callback=parse_list, default=[], multiple=True,
+ help="Generate candidate set for given users. Generate for all active users by default.")
+def request_candidate_sets(days, top, similar, users):
""" Send the cluster a request to generate candidate sets.
"""
params = {
'recommendation_generation_window': days,
"top_artist_limit": top,
"similar_artist_limit": similar,
+ "users": users
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.candidate_sets', params=params))
| 1 | import sys
import click
import listenbrainz.utils as utils
import os
import pika
import ujson
from flask import current_app
from listenbrainz.webserver import create_app
QUERIES_JSON_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'request_queries.json')
cli = click.Group()
class InvalidSparkRequestError(Exception):
pass
def _get_possible_queries():
""" Return the dict describing all possible queries that can
be sent to Spark. Listed in listenbrainz/spark/request_queries.json
"""
with open(QUERIES_JSON_PATH) as f:
return ujson.load(f)
def _prepare_query_message(query, params=None):
""" Prepare the JSON message that needs to be sent to the
spark cluster based on the query and the parameters the
query needs
Args:
query (str): the name of the query, should be in request_queries.json
params (dict): the parameters the query needs, should contain all the params
in the correspoding request_queries.json to be valid
Raises:
InvalidSparkRequestError if the query isn't in the list or if the parameters
don't match up
"""
if params is None:
params = {}
possible_queries = _get_possible_queries()
if query not in possible_queries:
raise InvalidSparkRequestError(query)
message = {'query': possible_queries[query]['name']}
required_params = set(possible_queries[query]['params'])
given_params = set(params.keys())
if required_params != given_params:
raise InvalidSparkRequestError
if params:
message['params'] = {}
for key, value in params.items():
message['params'][key] = value
return ujson.dumps(message)
def send_request_to_spark_cluster(message):
with create_app().app_context():
rabbitmq_connection = utils.connect_to_rabbitmq(
username=current_app.config['RABBITMQ_USERNAME'],
password=current_app.config['RABBITMQ_PASSWORD'],
host=current_app.config['RABBITMQ_HOST'],
port=current_app.config['RABBITMQ_PORT'],
virtual_host=current_app.config['RABBITMQ_VHOST'],
error_logger=current_app.logger,
)
try:
channel = rabbitmq_connection.channel()
channel.exchange_declare(exchange=current_app.config['SPARK_REQUEST_EXCHANGE'], exchange_type='fanout')
channel.basic_publish(
exchange=current_app.config['SPARK_REQUEST_EXCHANGE'],
routing_key='',
body=message,
properties=pika.BasicProperties(delivery_mode=2,),
)
except Exception:
# this is a relatively non critical part of LB for now, so just log the error and
# move ahead
current_app.logger.error('Could not send message to spark cluster: %s', ujson.dumps(message), exc_info=True)
@cli.command(name="request_user_stats")
@click.option("--type", 'type_', type=click.Choice(['entity', 'listening_activity']),
help="Type of statistics to calculate", required=True)
@click.option("--range", 'range_', type=click.Choice(['week', 'month', 'year', 'all_time']),
help="Time range of statistics to calculate", required=True)
@click.option("--entity", type=click.Choice(['artists', 'releases', 'recordings']),
help="Entity for which statistics should be calculated")
def request_user_stats(type_, range_, entity):
""" Send a user stats request to the spark cluster
"""
params = {}
if type_ == 'entity' and entity:
params['entity'] = entity
try:
send_request_to_spark_cluster(_prepare_query_message(
'stats.user.{type}.{range}'.format(range=range_, type=type_), params=params))
except InvalidSparkRequestError:
click.echo("Incorrect arguments provided")
@cli.command(name="request_import_full")
def request_import_new_full_dump():
""" Send the cluster a request to import a new full data dump
"""
send_request_to_spark_cluster(_prepare_query_message('import.dump.full'))
@cli.command(name="request_dataframes")
@click.option("--days", type=int, default=180, help="Request model to be trained on data of given number of days")
def request_dataframes(days):
""" Send the cluster a request to create dataframes.
"""
params = {
'train_model_window': days,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.create_dataframes', params=params))
def parse_list(ctx, args):
return list(args)
@cli.command(name='request_model')
@click.option("--rank", callback=parse_list, default=[5, 10], type=int, multiple=True, help="Number of hidden features")
@click.option("--itr", callback=parse_list, default=[5, 10], type=int, multiple=True, help="Number of iterations to run.")
@click.option("--lmbda", callback=parse_list, default=[0.1, 10.0], type=float, multiple=True, help="Controls over fitting.")
@click.option("--alpha", default=3.0, type=float, help="Baseline level of confidence weighting applied.")
def request_model(rank, itr, lmbda, alpha):
""" Send the cluster a request to train the model.
For more details refer to 'https://spark.apache.org/docs/2.1.0/mllib-collaborative-filtering.html'
"""
params = {
'ranks': rank,
'lambdas': lmbda,
'iterations': itr,
'alpha': alpha,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.train_model', params=params))
@cli.command(name='request_candidate_sets')
@click.option("--days", type=int, default=7, help="Request recommendations to be generated on history of given number of days")
@click.option("--top", type=int, default=20, help="Calculate given number of top artist.")
@click.option("--similar", type=int, default=20, help="Calculate given number of similar artist.")
def request_candidate_sets(days, top, similar):
""" Send the cluster a request to generate candidate sets.
"""
params = {
'recommendation_generation_window': days,
"top_artist_limit": top,
"similar_artist_limit": similar,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.candidate_sets', params=params))
@cli.command(name='request_recommendations')
@click.option("--top", type=int, default=200, help="Generate given number of top artist recommendations")
@click.option("--similar", type=int, default=200, help="Generate given number of similar artist recommendations")
@click.option("--user-name", 'users', callback=parse_list, default=[], multiple=True,
help="Generate recommendations for given users. Generate recommendations for all users by default.")
def request_recommendations(top, similar, users):
""" Send the cluster a request to generate recommendations.
"""
params = {
'recommendation_top_artist_limit': top,
'recommendation_similar_artist_limit': similar,
'users': users
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.recommend', params=params))
@cli.command(name='request_import_mapping')
def request_import_mapping():
""" Send the spark cluster a request to import msid mbid mapping.
"""
send_request_to_spark_cluster(_prepare_query_message('import.mapping'))
@cli.command(name='request_import_artist_relation')
def request_import_artist_relation():
""" Send the spark cluster a request to import artist relation.
"""
send_request_to_spark_cluster(_prepare_query_message('import.artist_relation'))
| 1 | 16,671 | As with the other PR, user name is better. | metabrainz-listenbrainz-server | py |
@@ -125,6 +125,12 @@ func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount uint64) er
if err != nil {
return err
}
+
+ balance, err := s.chequebook.AvailableBalance(ctx)
+ if err != nil {
+ return err
+ }
+ s.metrics.AvailableBalance.Set(float64(balance.Int64()))
s.metrics.TotalSent.Add(float64(amount))
return nil
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package swap
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/pkg/settlement/swap/swapprotocol"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
var (
// ErrWrongChequebook is the error if a peer uses a different chequebook from before.
ErrWrongChequebook = errors.New("wrong chequebook")
// ErrWrongBeneficiary is the error if a peer uses a different beneficiary than expected.
ErrWrongBeneficiary = errors.New("wrong beneficiary")
// ErrUnknownBeneficary is the error if a peer has never announced a beneficiary.
ErrUnknownBeneficary = errors.New("unknown beneficiary for peer")
)
type ApiInterface interface {
// LastSentCheque returns the last sent cheque for the peer
LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
// LastSentCheques returns the list of last sent cheques for all peers
LastSentCheques() (map[string]*chequebook.SignedCheque, error)
// LastReceivedCheque returns the last received cheque for the peer
LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error)
// LastReceivedCheques returns the list of last received cheques for all peers
LastReceivedCheques() (map[string]*chequebook.SignedCheque, error)
// CashCheque sends a cashing transaction for the last cheque of the peer
CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error)
// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error)
}
// Service is the implementation of the swap settlement layer.
type Service struct {
proto swapprotocol.Interface
logger logging.Logger
store storage.StateStorer
notifyPaymentFunc settlement.NotifyPaymentFunc
metrics metrics
chequebook chequebook.Service
chequeStore chequebook.ChequeStore
cashout chequebook.CashoutService
p2pService p2p.Service
addressbook Addressbook
networkID uint64
}
// New creates a new swap Service.
func New(proto swapprotocol.Interface, logger logging.Logger, store storage.StateStorer, chequebook chequebook.Service, chequeStore chequebook.ChequeStore, addressbook Addressbook, networkID uint64, cashout chequebook.CashoutService, p2pService p2p.Service) *Service {
return &Service{
proto: proto,
logger: logger,
store: store,
metrics: newMetrics(),
chequebook: chequebook,
chequeStore: chequeStore,
addressbook: addressbook,
networkID: networkID,
cashout: cashout,
p2pService: p2pService,
}
}
// ReceiveCheque is called by the swap protocol if a cheque is received.
func (s *Service) ReceiveCheque(ctx context.Context, peer swarm.Address, cheque *chequebook.SignedCheque) (err error) {
// check this is the same chequebook for this peer as previously
expectedChequebook, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return err
}
if known && expectedChequebook != cheque.Chequebook {
return ErrWrongChequebook
}
amount, err := s.chequeStore.ReceiveCheque(ctx, cheque)
if err != nil {
s.metrics.ChequesRejected.Inc()
return fmt.Errorf("rejecting cheque: %w", err)
}
if !known {
err = s.addressbook.PutChequebook(peer, cheque.Chequebook)
if err != nil {
return err
}
}
s.metrics.TotalReceived.Add(float64(amount.Uint64()))
return s.notifyPaymentFunc(peer, amount.Uint64())
}
// Pay initiates a payment to the given peer
func (s *Service) Pay(ctx context.Context, peer swarm.Address, amount uint64) error {
beneficiary, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return err
}
if !known {
s.logger.Warningf("disconnecting non-swap peer %v", peer)
err = s.p2pService.Disconnect(peer)
if err != nil {
return err
}
return ErrUnknownBeneficary
}
err = s.chequebook.Issue(ctx, beneficiary, big.NewInt(int64(amount)), func(signedCheque *chequebook.SignedCheque) error {
return s.proto.EmitCheque(ctx, peer, signedCheque)
})
if err != nil {
return err
}
s.metrics.TotalSent.Add(float64(amount))
return nil
}
// SetNotifyPaymentFunc sets the NotifyPaymentFunc to notify
func (s *Service) SetNotifyPaymentFunc(notifyPaymentFunc settlement.NotifyPaymentFunc) {
s.notifyPaymentFunc = notifyPaymentFunc
}
// TotalSent returns the total amount sent to a peer
func (s *Service) TotalSent(peer swarm.Address) (totalSent uint64, err error) {
beneficiary, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return 0, err
}
if !known {
return 0, settlement.ErrPeerNoSettlements
}
cheque, err := s.chequebook.LastCheque(beneficiary)
if err != nil {
if err == chequebook.ErrNoCheque {
return 0, settlement.ErrPeerNoSettlements
}
return 0, err
}
return cheque.CumulativePayout.Uint64(), nil
}
// TotalReceived returns the total amount received from a peer
func (s *Service) TotalReceived(peer swarm.Address) (totalReceived uint64, err error) {
chequebookAddress, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return 0, err
}
if !known {
return 0, settlement.ErrPeerNoSettlements
}
cheque, err := s.chequeStore.LastCheque(chequebookAddress)
if err != nil {
if err == chequebook.ErrNoCheque {
return 0, settlement.ErrPeerNoSettlements
}
return 0, err
}
return cheque.CumulativePayout.Uint64(), nil
}
// SettlementsSent returns sent settlements for each individual known peer
func (s *Service) SettlementsSent() (map[string]uint64, error) {
result := make(map[string]uint64)
cheques, err := s.chequebook.LastCheques()
if err != nil {
return nil, err
}
for beneficiary, cheque := range cheques {
peer, known, err := s.addressbook.BeneficiaryPeer(beneficiary)
if err != nil {
return nil, err
}
if !known {
continue
}
result[peer.String()] = cheque.CumulativePayout.Uint64()
}
return result, nil
}
// SettlementsReceived returns received settlements for each individual known peer.
func (s *Service) SettlementsReceived() (map[string]uint64, error) {
result := make(map[string]uint64)
cheques, err := s.chequeStore.LastCheques()
if err != nil {
return nil, err
}
for chequebook, cheque := range cheques {
peer, known, err := s.addressbook.ChequebookPeer(chequebook)
if err != nil {
return nil, err
}
if !known {
continue
}
result[peer.String()] = cheque.CumulativePayout.Uint64()
}
return result, err
}
// Handshake is called by the swap protocol when a handshake is received.
func (s *Service) Handshake(peer swarm.Address, beneficiary common.Address) error {
// check that the overlay address was derived from the beneficiary (implying they have the same private key)
// while this is not strictly necessary for correct functionality we need to ensure no two peers use the same beneficiary
// as long as we enforce this we might not need the handshake message if the p2p layer exposed the overlay public key
expectedOverlay := crypto.NewOverlayFromEthereumAddress(beneficiary[:], s.networkID)
if !expectedOverlay.Equal(peer) {
return ErrWrongBeneficiary
}
storedBeneficiary, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return err
}
if !known {
s.logger.Tracef("initial swap handshake peer: %v beneficiary: %x", peer, beneficiary)
return s.addressbook.PutBeneficiary(peer, beneficiary)
}
if storedBeneficiary != beneficiary {
return ErrWrongBeneficiary
}
return nil
}
// LastSentCheque returns the last sent cheque for the peer
func (s *Service) LastSentCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
common, known, err := s.addressbook.Beneficiary(peer)
if err != nil {
return nil, err
}
if !known {
return nil, chequebook.ErrNoCheque
}
return s.chequebook.LastCheque(common)
}
// LastReceivedCheque returns the last received cheque for the peer
func (s *Service) LastReceivedCheque(peer swarm.Address) (*chequebook.SignedCheque, error) {
common, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return nil, err
}
if !known {
return nil, chequebook.ErrNoCheque
}
return s.chequeStore.LastCheque(common)
}
// LastSentCheques returns the list of last sent cheques for all peers
func (s *Service) LastSentCheques() (map[string]*chequebook.SignedCheque, error) {
lastcheques, err := s.chequebook.LastCheques()
if err != nil {
return nil, err
}
resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
for i, j := range lastcheques {
addr, known, err := s.addressbook.BeneficiaryPeer(i)
if err == nil && known {
resultmap[addr.String()] = j
}
}
return resultmap, nil
}
// LastReceivedCheques returns the list of last received cheques for all peers
func (s *Service) LastReceivedCheques() (map[string]*chequebook.SignedCheque, error) {
lastcheques, err := s.chequeStore.LastCheques()
if err != nil {
return nil, err
}
resultmap := make(map[string]*chequebook.SignedCheque, len(lastcheques))
for i, j := range lastcheques {
addr, known, err := s.addressbook.ChequebookPeer(i)
if err == nil && known {
resultmap[addr.String()] = j
}
}
return resultmap, nil
}
// CashCheque sends a cashing transaction for the last cheque of the peer
func (s *Service) CashCheque(ctx context.Context, peer swarm.Address) (common.Hash, error) {
chequebookAddress, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return common.Hash{}, err
}
if !known {
return common.Hash{}, chequebook.ErrNoCheque
}
return s.cashout.CashCheque(ctx, chequebookAddress, s.chequebook.Address())
}
// CashoutStatus gets the status of the latest cashout transaction for the peers chequebook
func (s *Service) CashoutStatus(ctx context.Context, peer swarm.Address) (*chequebook.CashoutStatus, error) {
chequebookAddress, known, err := s.addressbook.Chequebook(peer)
if err != nil {
return nil, err
}
if !known {
return nil, chequebook.ErrNoCheque
}
return s.cashout.CashoutStatus(ctx, chequebookAddress)
}
| 1 | 13,715 | a peer's accounting lock is held during `Pay`. we should avoid adding additional blockchain calls here if possible. | ethersphere-bee | go |
@@ -66,6 +66,12 @@ module.exports = function(realmConstructor) {
setConstructorOnPrototype(realmConstructor.Sync.User);
setConstructorOnPrototype(realmConstructor.Sync.Session);
+ } else {
+ Object.defineProperty(realmConstructor, 'Sync', {
+ get: function () {
+ throw new Error("Sync is not enabled. Note that the developer edition of the Node.JS SDK for Realm does not include sync on Linux.");
+ }
+ })
}
// TODO: Remove this now useless object. | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
let getOwnPropertyDescriptors = Object.getOwnPropertyDescriptors || function(obj) {
return Object.getOwnPropertyNames(obj).reduce(function (descriptors, name) {
descriptors[name] = Object.getOwnPropertyDescriptor(obj, name);
return descriptors;
}, {});
};
function setConstructorOnPrototype(klass) {
if (klass.prototype.constructor !== klass) {
Object.defineProperty(klass.prototype, 'constructor', { value: klass, configurable: true, writable: true });
}
}
module.exports = function(realmConstructor) {
// Add the specified Array methods to the Collection prototype.
Object.defineProperties(realmConstructor.Collection.prototype, require('./collection-methods'));
setConstructorOnPrototype(realmConstructor.Collection);
setConstructorOnPrototype(realmConstructor.List);
setConstructorOnPrototype(realmConstructor.Results);
setConstructorOnPrototype(realmConstructor.Object);
// Add sync methods
if (realmConstructor.Sync) {
let userMethods = require('./user-methods');
Object.defineProperties(realmConstructor.Sync.User, getOwnPropertyDescriptors(userMethods.static));
Object.defineProperties(realmConstructor.Sync.User.prototype, getOwnPropertyDescriptors(userMethods.instance));
Object.defineProperty(realmConstructor.Sync.User, '_realmConstructor', { value: realmConstructor });
realmConstructor.Sync.AuthError = require('./errors').AuthError;
if (realmConstructor.Sync.cleanup) {
// FIXME: DOES THIS WORK ON BOTH NODE AND REACT NATIVE?
process.on('exit', realmConstructor.Sync.cleanup);
process.on('SIGINT', function () {
realmConstructor.Sync.cleanup();
process.exit(2);
});
process.on('uncaughtException', function(e) {
realmConstructor.Sync.cleanup();
/* eslint-disable no-console */
console.log(e.stack);
process.exit(99);
});
}
setConstructorOnPrototype(realmConstructor.Sync.User);
setConstructorOnPrototype(realmConstructor.Sync.Session);
}
// TODO: Remove this now useless object.
var types = Object.freeze({
'BOOL': 'bool',
'INT': 'int',
'FLOAT': 'float',
'DOUBLE': 'double',
'STRING': 'string',
'DATE': 'date',
'DATA': 'data',
'OBJECT': 'object',
'LIST': 'list',
});
Object.defineProperty(realmConstructor, 'Types', {
get: function() {
if (typeof console != 'undefined') {
/* global console */
/* eslint-disable no-console */
var stack = new Error().stack.split("\n").slice(2).join("\n");
var msg = '`Realm.Types` is deprecated! Please specify the type name as lowercase string instead!\n'+stack;
if (console.warn != undefined) {
console.warn(msg);
}
else {
console.log(msg);
}
/* eslint-enable no-console */
}
return types;
},
configurable: true
});
} | 1 | 15,939 | Maybe wording could be improved. Is this "not enabled" or it is "not available". Not sure about that. | realm-realm-js | js |
@@ -120,7 +120,11 @@ func (it *TaskReconciler) Reconcile(ctx context.Context, request reconcile.Reque
// update the status about conditional tasks
if len(pods) > 0 && (pods[0].Status.Phase == corev1.PodFailed || pods[0].Status.Phase == corev1.PodSucceeded) {
- if !conditionalBranchesEvaluated(node) {
+ evaluated, err := it.conditionalBranchesEvaluated(ctx, node)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+ if !evaluated {
it.eventRecorder.Event(&node, recorder.TaskPodPodCompleted{PodName: pods[0].Name})
// task pod is terminated
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error { | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package controllers
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/recorder"
"github.com/chaos-mesh/chaos-mesh/pkg/workflow/task"
"github.com/chaos-mesh/chaos-mesh/pkg/workflow/task/collector"
)
type TaskReconciler struct {
*ChildNodesFetcher
kubeClient client.Client
restConfig *rest.Config
eventRecorder recorder.ChaosRecorder
logger logr.Logger
}
func NewTaskReconciler(kubeClient client.Client, restConfig *rest.Config, eventRecorder recorder.ChaosRecorder, logger logr.Logger) *TaskReconciler {
return &TaskReconciler{
ChildNodesFetcher: NewChildNodesFetcher(kubeClient, logger),
kubeClient: kubeClient,
restConfig: restConfig,
eventRecorder: eventRecorder,
logger: logger,
}
}
func (it *TaskReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
startTime := time.Now()
defer func() {
it.logger.V(4).Info("Finished syncing for task node",
"node", request.NamespacedName,
"duration", time.Since(startTime),
)
}()
node := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &node)
if err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
// only resolve task nodes
if node.Spec.Type != v1alpha1.TypeTask {
return reconcile.Result{}, nil
}
it.logger.V(4).Info("resolve task node", "node", request)
pods, err := it.FetchPodControlledByThisWorkflowNode(ctx, node)
if err != nil {
return reconcile.Result{}, err
}
if len(pods) == 0 {
if workflowName, ok := node.Labels[v1alpha1.LabelWorkflow]; ok {
parentWorkflow := v1alpha1.Workflow{}
err := it.kubeClient.Get(ctx, types.NamespacedName{
Namespace: node.Namespace,
Name: workflowName,
}, &parentWorkflow)
if err != nil {
return reconcile.Result{}, err
}
spawnedPod, err := it.SpawnTaskPod(ctx, &node, &parentWorkflow)
if err != nil {
it.logger.Error(err, "failed to spawn pod for Task Node", "node", request)
it.eventRecorder.Event(&node, recorder.TaskPodSpawnFailed{})
return reconcile.Result{}, err
}
it.eventRecorder.Event(&node, recorder.TaskPodSpawned{PodName: spawnedPod.Name})
} else {
return reconcile.Result{}, errors.Errorf("node %s/%s does not contains label %s", node.Namespace, node.Name, v1alpha1.LabelWorkflow)
}
}
if len(pods) > 1 {
var podNames []string
for _, pod := range pods {
podNames = append(podNames, fmt.Sprintf("%s/%s", pod.Namespace, pod.Name))
}
it.logger.Info("unexpected more than 1 pod created by task node, it will pick random one",
"node", request,
"pods", podNames,
"picked", fmt.Sprintf("%s/%s", pods[0].Namespace, pods[0].Name),
)
}
// update the status about conditional tasks
if len(pods) > 0 && (pods[0].Status.Phase == corev1.PodFailed || pods[0].Status.Phase == corev1.PodSucceeded) {
if !conditionalBranchesEvaluated(node) {
it.eventRecorder.Event(&node, recorder.TaskPodPodCompleted{PodName: pods[0].Name})
// task pod is terminated
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeNeedUpdate := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &nodeNeedUpdate)
if err != nil {
return err
}
if nodeNeedUpdate.Status.ConditionalBranchesStatus == nil {
nodeNeedUpdate.Status.ConditionalBranchesStatus = &v1alpha1.ConditionalBranchesStatus{}
}
// TODO: update related condition
defaultCollector := collector.DefaultCollector(it.kubeClient, it.restConfig, pods[0].Namespace, pods[0].Name, nodeNeedUpdate.Spec.Task.Container.Name)
env, err := defaultCollector.CollectContext(ctx)
if err != nil {
it.logger.Error(err, "failed to fetch env from task",
"task", fmt.Sprintf("%s/%s", nodeNeedUpdate.Namespace, nodeNeedUpdate.Name),
)
return err
}
if env != nil {
jsonString, err := json.Marshal(env)
if err != nil {
it.logger.Error(err, "failed to convert env to json",
"task", fmt.Sprintf("%s/%s", nodeNeedUpdate.Namespace, nodeNeedUpdate.Name),
"env", env)
} else {
nodeNeedUpdate.Status.ConditionalBranchesStatus.Context = []string{string(jsonString)}
}
}
evaluator := task.NewEvaluator(it.logger, it.kubeClient)
evaluateConditionBranches, err := evaluator.EvaluateConditionBranches(nodeNeedUpdate.Spec.ConditionalBranches, env)
if err != nil {
it.logger.Error(err, "failed to evaluate expression",
"task", fmt.Sprintf("%s/%s", nodeNeedUpdate.Namespace, nodeNeedUpdate.Name),
)
return err
}
nodeNeedUpdate.Status.ConditionalBranchesStatus.Branches = evaluateConditionBranches
var selectedBranches []string
for _, item := range evaluateConditionBranches {
if item.EvaluationResult == corev1.ConditionTrue {
selectedBranches = append(selectedBranches, item.Target)
}
}
it.eventRecorder.Event(&nodeNeedUpdate, recorder.ConditionalBranchesSelected{SelectedBranches: selectedBranches})
err = it.kubeClient.Status().Update(ctx, &nodeNeedUpdate)
return err
})
if client.IgnoreNotFound(updateError) != nil {
it.logger.Error(updateError, "failed to update the condition status of task",
"task", request)
}
}
} else {
// task pod is still running or not exists
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeNeedUpdate := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &nodeNeedUpdate)
if err != nil {
return err
}
// TODO: update related condition
var branches []v1alpha1.ConditionalBranchStatus
if nodeNeedUpdate.Status.ConditionalBranchesStatus == nil {
nodeNeedUpdate.Status.ConditionalBranchesStatus = &v1alpha1.ConditionalBranchesStatus{}
}
for _, conditionalTask := range nodeNeedUpdate.Spec.ConditionalBranches {
branch := v1alpha1.ConditionalBranchStatus{
Target: conditionalTask.Target,
EvaluationResult: corev1.ConditionUnknown,
}
branches = append(branches, branch)
}
nodeNeedUpdate.Status.ConditionalBranchesStatus.Branches = branches
err = it.kubeClient.Status().Update(ctx, &nodeNeedUpdate)
return err
})
if client.IgnoreNotFound(updateError) != nil {
it.logger.Error(updateError, "k failed to update the condition status of task",
"task", request)
}
}
// update the status about children nodes
var evaluatedNode v1alpha1.WorkflowNode
err = it.kubeClient.Get(ctx, request.NamespacedName, &evaluatedNode)
if err != nil {
return reconcile.Result{}, err
}
if conditionalBranchesEvaluated(evaluatedNode) {
err = it.syncChildNodes(ctx, evaluatedNode)
if err != nil {
return reconcile.Result{}, err
}
// update the status of children workflow nodes
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeNeedUpdate := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &nodeNeedUpdate)
if err != nil {
return err
}
var tasks []string
for _, branch := range evaluatedNode.Status.ConditionalBranchesStatus.Branches {
if branch.EvaluationResult == corev1.ConditionTrue {
tasks = append(tasks, branch.Target)
}
}
activeChildren, finishedChildren, err := it.fetchChildNodes(ctx, nodeNeedUpdate)
if err != nil {
return err
}
nodeNeedUpdate.Status.FinishedChildren = nil
for _, finishedChild := range finishedChildren {
nodeNeedUpdate.Status.FinishedChildren = append(nodeNeedUpdate.Status.FinishedChildren,
corev1.LocalObjectReference{
Name: finishedChild.Name,
})
}
nodeNeedUpdate.Status.ActiveChildren = nil
for _, activeChild := range activeChildren {
nodeNeedUpdate.Status.ActiveChildren = append(nodeNeedUpdate.Status.ActiveChildren,
corev1.LocalObjectReference{
Name: activeChild.Name,
})
}
// TODO: also check the consistent between spec in task and the spec in child node
if conditionalBranchesEvaluated(nodeNeedUpdate) && len(finishedChildren) == len(tasks) {
SetCondition(&nodeNeedUpdate.Status, v1alpha1.WorkflowNodeCondition{
Type: v1alpha1.ConditionAccomplished,
Status: corev1.ConditionTrue,
Reason: "",
})
it.eventRecorder.Event(&nodeNeedUpdate, recorder.NodeAccomplished{})
} else {
SetCondition(&nodeNeedUpdate.Status, v1alpha1.WorkflowNodeCondition{
Type: v1alpha1.ConditionAccomplished,
Status: corev1.ConditionFalse,
Reason: "",
})
}
return it.kubeClient.Status().Update(ctx, &nodeNeedUpdate)
})
return reconcile.Result{}, client.IgnoreNotFound(updateError)
}
return reconcile.Result{}, nil
}
func (it *TaskReconciler) syncChildNodes(ctx context.Context, evaluatedNode v1alpha1.WorkflowNode) error {
var tasks []string
for _, branch := range evaluatedNode.Status.ConditionalBranchesStatus.Branches {
if branch.EvaluationResult == corev1.ConditionTrue {
tasks = append(tasks, branch.Target)
}
}
if len(tasks) == 0 {
it.logger.V(4).Info("0 condition of branch in task node is True, Noop",
"node", fmt.Sprintf("%s/%s", evaluatedNode.Namespace, evaluatedNode.Name),
)
return nil
}
activeChildNodes, finishedChildNodes, err := it.fetchChildNodes(ctx, evaluatedNode)
if err != nil {
return err
}
existsChildNodes := append(activeChildNodes, finishedChildNodes...)
var taskNamesOfNodes []string
for _, childNode := range existsChildNodes {
taskNamesOfNodes = append(taskNamesOfNodes, getTaskNameFromGeneratedName(childNode.GetName()))
}
// TODO: check the specific of task and workflow nodes
// the definition of tasks changed, remove all the existed nodes
if len(setDifference(taskNamesOfNodes, tasks)) > 0 ||
len(setDifference(tasks, taskNamesOfNodes)) > 0 {
var nodesToCleanup []string
for _, item := range existsChildNodes {
nodesToCleanup = append(nodesToCleanup, item.Name)
}
it.eventRecorder.Event(&evaluatedNode, recorder.RerunBySpecChanged{CleanedChildrenNode: nodesToCleanup})
for _, childNode := range existsChildNodes {
// best effort deletion
err := it.kubeClient.Delete(ctx, &childNode)
if err != nil {
it.logger.Error(err, "failed to delete outdated child node",
"node", fmt.Sprintf("%s/%s", evaluatedNode.Namespace, evaluatedNode.Name),
"child node", fmt.Sprintf("%s/%s", childNode.Namespace, childNode.Name),
)
}
}
} else {
// exactly same, NOOP
return nil
}
parentWorkflow := v1alpha1.Workflow{}
err = it.kubeClient.Get(ctx, types.NamespacedName{
Namespace: evaluatedNode.Namespace,
Name: evaluatedNode.Spec.WorkflowName,
}, &parentWorkflow)
if err != nil {
it.logger.Error(err, "failed to fetch parent workflow",
"node", fmt.Sprintf("%s/%s", evaluatedNode.Namespace, evaluatedNode.Name),
"workflow name", evaluatedNode.Spec.WorkflowName)
return err
}
childNodes, err := renderNodesByTemplates(&parentWorkflow, &evaluatedNode, tasks...)
if err != nil {
it.logger.Error(err, "failed to render children childNodes",
"node", fmt.Sprintf("%s/%s", evaluatedNode.Namespace, evaluatedNode.Name))
return err
}
// TODO: emit event
var childrenNames []string
for _, childNode := range childNodes {
err := it.kubeClient.Create(ctx, childNode)
if err != nil {
it.logger.Error(err, "failed to create child node",
"node", fmt.Sprintf("%s/%s", evaluatedNode.Namespace, evaluatedNode.Name),
"child node", childNode)
return err
}
childrenNames = append(childrenNames, childNode.Name)
}
it.eventRecorder.Event(&evaluatedNode, recorder.NodesCreated{ChildNodes: childrenNames})
it.logger.Info("task node spawn new child node",
"node", fmt.Sprintf("%s/%s", evaluatedNode.Namespace, evaluatedNode.Name),
"child node", childrenNames)
return nil
}
func (it *TaskReconciler) FetchPodControlledByThisWorkflowNode(ctx context.Context, node v1alpha1.WorkflowNode) ([]corev1.Pod, error) {
controlledByThisNode, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{
v1alpha1.LabelControlledBy: node.Name,
},
})
if err != nil {
it.logger.Error(err, "failed to build label selector with filtering children workflow node controlled by current node",
"current node", fmt.Sprintf("%s/%s", node.Namespace, node.Name))
return nil, err
}
var childPods corev1.PodList
err = it.kubeClient.List(ctx, &childPods, &client.ListOptions{
LabelSelector: controlledByThisNode,
})
if err != nil {
return nil, err
}
return childPods.Items, nil
}
func (it *TaskReconciler) SpawnTaskPod(ctx context.Context, node *v1alpha1.WorkflowNode, workflow *v1alpha1.Workflow) (*corev1.Pod, error) {
if node.Spec.Task == nil {
return nil, errors.Errorf("node %s/%s does not contains spec of Target", node.Namespace, node.Name)
}
podSpec, err := task.SpawnPodForTask(*node.Spec.Task)
if err != nil {
return nil, err
}
taskPod := corev1.Pod{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-", node.Name),
Namespace: node.Namespace,
Labels: map[string]string{
v1alpha1.LabelControlledBy: node.Name,
v1alpha1.LabelWorkflow: workflow.Name,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ApiVersion,
Kind: KindWorkflowNode,
Name: node.Name,
UID: node.UID,
Controller: &isController,
BlockOwnerDeletion: &blockOwnerDeletion,
},
},
Finalizers: []string{metav1.FinalizerDeleteDependents},
},
Spec: podSpec,
}
err = it.kubeClient.Create(ctx, &taskPod)
if err != nil {
return nil, err
}
return &taskPod, nil
}
func conditionalBranchesEvaluated(node v1alpha1.WorkflowNode) bool {
if node.Status.ConditionalBranchesStatus == nil {
return false
}
if len(node.Spec.ConditionalBranches) != len(node.Status.ConditionalBranchesStatus.Branches) {
return false
}
for _, branch := range node.Status.ConditionalBranchesStatus.Branches {
if branch.EvaluationResult == corev1.ConditionUnknown {
return false
}
}
return true
}
| 1 | 25,006 | I looked at the new `conditionalBranchesEvaluated` function and it looks like the part added is a duplicate of the line above? | chaos-mesh-chaos-mesh | go |
@@ -53,6 +53,9 @@ import (
// May be missing, but should be present when data is.
// - refs: The list of references to the block, encoded as a serialized
// blockRefInfo. May be missing.
+// - f: Presence of this file indicates the block has been successfully
+// flushed to the server. However, the data might still be
+// in the store.
//
// Future versions of the disk store might add more files to this
// directory; if any code is written to move blocks around, it should | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"path/filepath"
"strings"
"github.com/keybase/go-codec/codec"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/pkg/errors"
)
// blockDiskStore stores block data in flat files on disk.
//
// The directory layout looks like:
//
// dir/0100/0...01/data
// dir/0100/0...01/id
// dir/0100/0...01/ksh
// dir/0100/0...01/refs
// ...
// dir/01cc/5...55/id
// dir/01cc/5...55/refs
// ...
// dir/01dd/6...66/data
// dir/01dd/6...66/id
// dir/01dd/6...66/ksh
// ...
// dir/01ff/f...ff/data
// dir/01ff/f...ff/id
// dir/01ff/f...ff/ksh
// dir/01ff/f...ff/refs
//
// Each block has its own subdirectory with its ID truncated to 17
// bytes (34 characters) as a name. The block subdirectories are
// splayed over (# of possible hash types) * 256 subdirectories -- one
// byte for the hash type (currently only one) plus the first byte of
// the hash data -- using the first four characters of the name to
// keep the number of directories in dir itself to a manageable
// number, similar to git.
//
// Each block directory has the following files:
//
// - id: The full block ID in binary format. Always present.
// - data: The raw block data that should hash to the block ID.
// May be missing.
// - ksh: The raw data for the associated key server half.
// May be missing, but should be present when data is.
// - refs: The list of references to the block, encoded as a serialized
// blockRefInfo. May be missing.
//
// Future versions of the disk store might add more files to this
// directory; if any code is written to move blocks around, it should
// be careful to preserve any unknown files in a block directory.
//
// The maximum number of characters added to the root dir by a block
// disk store is 44:
//
// /01ff/f...(30 characters total)...ff/data
//
// blockDiskStore is not goroutine-safe, so any code that uses it must
// guarantee that only one goroutine at a time calls its functions.
type blockDiskStore struct {
codec kbfscodec.Codec
crypto cryptoPure
dir string
}
// makeBlockDiskStore returns a new blockDiskStore for the given
// directory.
func makeBlockDiskStore(codec kbfscodec.Codec, crypto cryptoPure,
dir string) *blockDiskStore {
return &blockDiskStore{
codec: codec,
crypto: crypto,
dir: dir,
}
}
// The functions below are for building various paths.
func (s *blockDiskStore) blockPath(id BlockID) string {
// Truncate to 34 characters, which corresponds to 16 random
// bytes (since the first byte is a hash type) or 128 random
// bits, which means that the expected number of blocks
// generated before getting a path collision is 2^64 (see
// https://en.wikipedia.org/wiki/Birthday_problem#Cast_as_a_collision_problem
// ).
idStr := id.String()
return filepath.Join(s.dir, idStr[:4], idStr[4:34])
}
func (s *blockDiskStore) dataPath(id BlockID) string {
return filepath.Join(s.blockPath(id), "data")
}
const idFilename = "id"
func (s *blockDiskStore) idPath(id BlockID) string {
return filepath.Join(s.blockPath(id), idFilename)
}
func (s *blockDiskStore) keyServerHalfPath(id BlockID) string {
return filepath.Join(s.blockPath(id), "ksh")
}
func (s *blockDiskStore) refsPath(id BlockID) string {
return filepath.Join(s.blockPath(id), "refs")
}
// makeDir makes the directory for the given block ID and writes the
// ID file, if necessary.
func (s *blockDiskStore) makeDir(id BlockID) error {
err := ioutil.MkdirAll(s.blockPath(id), 0700)
if err != nil {
return err
}
// TODO: Only write if the file doesn't exist.
return ioutil.WriteFile(s.idPath(id), []byte(id.String()), 0600)
}
// blockRefInfo is a wrapper around blockRefMap, in case we want to
// add more fields in the future.
type blockRefInfo struct {
Refs blockRefMap
codec.UnknownFieldSetHandler
}
// TODO: Add caching for refs
// getRefInfo returns the references for the given ID.
func (s *blockDiskStore) getRefInfo(id BlockID) (blockRefInfo, error) {
var refInfo blockRefInfo
err := kbfscodec.DeserializeFromFile(s.codec, s.refsPath(id), &refInfo)
if !ioutil.IsNotExist(err) && err != nil {
return blockRefInfo{}, err
}
if refInfo.Refs == nil {
refInfo.Refs = make(blockRefMap)
}
return refInfo, nil
}
// putRefInfo stores the given references for the given ID.
func (s *blockDiskStore) putRefInfo(id BlockID, refs blockRefInfo) error {
return kbfscodec.SerializeToFile(s.codec, refs, s.refsPath(id))
}
// addRefs adds references for the given contexts to the given ID, all
// with the same status and tag.
func (s *blockDiskStore) addRefs(id BlockID, contexts []BlockContext,
status blockRefStatus, tag string) error {
refInfo, err := s.getRefInfo(id)
if err != nil {
return err
}
if len(refInfo.Refs) > 0 {
// Check existing contexts, if any.
for _, context := range contexts {
_, err := refInfo.Refs.checkExists(context)
if err != nil {
return err
}
}
}
for _, context := range contexts {
err = refInfo.Refs.put(context, status, tag)
if err != nil {
return err
}
}
return s.putRefInfo(id, refInfo)
}
// getData returns the data and server half for the given ID, if
// present.
func (s *blockDiskStore) getData(id BlockID) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
data, err := ioutil.ReadFile(s.dataPath(id))
if ioutil.IsNotExist(err) {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
blockNonExistentError{id}
} else if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
keyServerHalfPath := s.keyServerHalfPath(id)
buf, err := ioutil.ReadFile(keyServerHalfPath)
if ioutil.IsNotExist(err) {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
blockNonExistentError{id}
} else if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
// Check integrity.
dataID, err := s.crypto.MakePermanentBlockID(data)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
if id != dataID {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, errors.Errorf(
"Block ID mismatch: expected %s, got %s", id, dataID)
}
var serverHalf kbfscrypto.BlockCryptKeyServerHalf
err = serverHalf.UnmarshalBinary(buf)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
return data, serverHalf, nil
}
// All functions below are public functions.
func (s *blockDiskStore) hasAnyRef(id BlockID) (bool, error) {
refInfo, err := s.getRefInfo(id)
if err != nil {
return false, err
}
return len(refInfo.Refs) > 0, nil
}
func (s *blockDiskStore) hasNonArchivedRef(id BlockID) (bool, error) {
refInfo, err := s.getRefInfo(id)
if err != nil {
return false, err
}
return refInfo.Refs.hasNonArchivedRef(), nil
}
func (s *blockDiskStore) hasContext(id BlockID, context BlockContext) (
bool, error) {
refInfo, err := s.getRefInfo(id)
if err != nil {
return false, err
}
return refInfo.Refs.checkExists(context)
}
func (s *blockDiskStore) hasData(id BlockID) error {
_, err := ioutil.Stat(s.dataPath(id))
return err
}
func (s *blockDiskStore) getDataSize(id BlockID) (int64, error) {
fi, err := ioutil.Stat(s.dataPath(id))
if ioutil.IsNotExist(err) {
return 0, nil
} else if err != nil {
return 0, err
}
return fi.Size(), nil
}
func (s *blockDiskStore) getDataWithContext(id BlockID, context BlockContext) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
hasContext, err := s.hasContext(id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
if !hasContext {
return nil, kbfscrypto.BlockCryptKeyServerHalf{},
blockNonExistentError{id}
}
return s.getData(id)
}
func (s *blockDiskStore) getAllRefsForTest() (map[BlockID]blockRefMap, error) {
res := make(map[BlockID]blockRefMap)
fileInfos, err := ioutil.ReadDir(s.dir)
if ioutil.IsNotExist(err) {
return res, nil
} else if err != nil {
return nil, err
}
for _, fi := range fileInfos {
name := fi.Name()
if !fi.IsDir() {
return nil, errors.Errorf("Unexpected non-dir %q", name)
}
subFileInfos, err := ioutil.ReadDir(filepath.Join(s.dir, name))
if err != nil {
return nil, err
}
for _, sfi := range subFileInfos {
subName := sfi.Name()
if !sfi.IsDir() {
return nil, errors.Errorf("Unexpected non-dir %q",
subName)
}
idPath := filepath.Join(
s.dir, name, subName, idFilename)
idBytes, err := ioutil.ReadFile(idPath)
if err != nil {
return nil, err
}
id, err := BlockIDFromString(string(idBytes))
if err != nil {
return nil, errors.WithStack(err)
}
if !strings.HasPrefix(id.String(), name+subName) {
return nil, errors.Errorf(
"%q unexpectedly not a prefix of %q",
name+subName, id.String())
}
refInfo, err := s.getRefInfo(id)
if err != nil {
return nil, err
}
if len(refInfo.Refs) > 0 {
res[id] = refInfo.Refs
}
}
}
return res, nil
}
// put puts the given data for the block, which may already exist, and
// adds a reference for the given context.
func (s *blockDiskStore) put(id BlockID, context BlockContext, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf, tag string) error {
err := validateBlockPut(s.crypto, id, context, buf)
if err != nil {
return err
}
// Check the data and retrieve the server half, if they exist.
_, existingServerHalf, err := s.getDataWithContext(id, context)
var exists bool
switch err.(type) {
case blockNonExistentError:
exists = false
case nil:
exists = true
default:
return err
}
if exists {
// If the entry already exists, everything should be
// the same, except for possibly additional
// references.
// We checked that both buf and the existing data hash
// to id, so no need to check that they're both equal.
if existingServerHalf != serverHalf {
return errors.Errorf(
"key server half mismatch: expected %s, got %s",
existingServerHalf, serverHalf)
}
} else {
err = s.makeDir(id)
if err != nil {
return err
}
err = ioutil.WriteFile(s.dataPath(id), buf, 0600)
if err != nil {
return err
}
// TODO: Add integrity-checking for key server half?
data, err := serverHalf.MarshalBinary()
if err != nil {
return err
}
err = ioutil.WriteFile(s.keyServerHalfPath(id), data, 0600)
if err != nil {
return err
}
}
err = s.addRefs(id, []BlockContext{context}, liveBlockRef, tag)
if err != nil {
return err
}
return nil
}
func (s *blockDiskStore) addReference(
id BlockID, context BlockContext, tag string) error {
err := s.makeDir(id)
if err != nil {
return err
}
return s.addRefs(id, []BlockContext{context}, liveBlockRef, tag)
}
func (s *blockDiskStore) archiveReferences(
contexts map[BlockID][]BlockContext, tag string) error {
for id, idContexts := range contexts {
err := s.makeDir(id)
if err != nil {
return err
}
err = s.addRefs(id, idContexts, archivedBlockRef, tag)
if err != nil {
return err
}
}
return nil
}
// removeReferences removes references for the given contexts from
// their respective IDs. If tag is non-empty, then a reference will be
// removed only if its most recent tag (passed in to addRefs) matches
// the given one.
func (s *blockDiskStore) removeReferences(
id BlockID, contexts []BlockContext, tag string) (
liveCount int, err error) {
refInfo, err := s.getRefInfo(id)
if err != nil {
return 0, err
}
if len(refInfo.Refs) == 0 {
return 0, nil
}
for _, context := range contexts {
err := refInfo.Refs.remove(context, tag)
if err != nil {
return 0, err
}
if len(refInfo.Refs) == 0 {
break
}
}
err = s.putRefInfo(id, refInfo)
if err != nil {
return 0, err
}
return len(refInfo.Refs), nil
}
// remove removes any existing data for the given ID, which must not
// have any references left.
func (s *blockDiskStore) remove(id BlockID) error {
hasAnyRef, err := s.hasAnyRef(id)
if err != nil {
return err
}
if hasAnyRef {
return errors.Errorf(
"Trying to remove data for referenced block %s", id)
}
path := s.blockPath(id)
err = ioutil.RemoveAll(path)
if err != nil {
return err
}
// Remove the parent (splayed) directory if it exists and is
// empty.
err = ioutil.Remove(filepath.Dir(path))
if ioutil.IsNotExist(err) || ioutil.IsExist(err) {
err = nil
}
return err
}
| 1 | 14,780 | can you put this flag in `blockRefInfo` instead? It would be a shame to add one more file per block, especially since we've run into inode limits. I guess `blockRefInfo` should maybe then be renamed to `blockInfo` or something. But we're stuck with the filename. | keybase-kbfs | go |
@@ -43,5 +43,9 @@ func (c *ConsumerConfigParser) Parse(config json.RawMessage) (ip string, port in
if err != nil {
return "", 0, "", errors.Wrap(err, "parsing consumer address:port failed")
}
- return cfg.IP, cfg.Port, openvpn.ServiceType, nil
+
+ if cfg.IP == nil {
+ return "", 0, "", nil
+ }
+ return *cfg.IP, cfg.Port, openvpn.ServiceType, nil
} | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package config
import (
"encoding/json"
"github.com/mysteriumnetwork/node/services"
"github.com/mysteriumnetwork/node/services/openvpn"
"github.com/pkg/errors"
)
// ConsumerConfigParser parses consumer configs
type ConsumerConfigParser struct {
}
// NewConfigParser returns a new ConsumerConfigParser
func NewConfigParser() *ConsumerConfigParser {
return &ConsumerConfigParser{}
}
// Parse parses the given configuration
func (c *ConsumerConfigParser) Parse(config json.RawMessage) (ip string, port int, serviceType services.ServiceType, err error) {
// TODO: since we are getting json.RawMessage here and not interface{} type not sure how to handle multiple services
// since NATPinger is one for all services and we get config from communication channel where service type is not know yet.
var cfg openvpn.ConsumerConfig
err = json.Unmarshal(config, &cfg)
if err != nil {
return "", 0, "", errors.Wrap(err, "parsing consumer address:port failed")
}
return cfg.IP, cfg.Port, openvpn.ServiceType, nil
}
| 1 | 14,178 | Shouldn't we keep other parameters if only IP is empty? Or maybe return an error if it's a mandatory argument? | mysteriumnetwork-node | go |
@@ -118,7 +118,8 @@ class NdIndexableMappingTest(ComparisonTestCase):
data = [((0, 0.5), 'a'), ((1, 0.5), 'b')]
ndmap = MultiDimensionalMapping(data, kdims=[self.dim1, self.dim2])
redimmed = ndmap.redim(intdim='Integer')
- self.assertEqual(redimmed.kdims, [Dimension('Integer'), Dimension('floatdim')])
+ self.assertEqual(redimmed.kdims, [Dimension('Integer', type=int),
+ Dimension('floatdim', type=float)])
def test_idxmapping_add_dimension(self):
ndmap = MultiDimensionalMapping(self.init_items_1D_list, kdims=[self.dim1]) | 1 | from collections import OrderedDict
from holoviews.core import Dimension
from holoviews.core.ndmapping import MultiDimensionalMapping
from holoviews.element.comparison import ComparisonTestCase
from holoviews import HoloMap, Dataset
import numpy as np
class DimensionTest(ComparisonTestCase):
def test_dimension_init(self):
Dimension('Test dimension')
Dimension('Test dimension', cyclic=True)
Dimension('Test dimension', cyclic=True, type=int)
Dimension('Test dimension', cyclic=True, type=int, unit='Twilight zones')
def test_dimension_call(self):
dim1 = Dimension('Test dimension')
dim2 = dim1(cyclic=True)
self.assertEqual(dim2.cyclic,True)
dim3 = dim1('New test dimension', unit='scovilles')
self.assertEqual(dim3.name, 'New test dimension')
self.assertEqual(dim3.unit, 'scovilles')
def test_dimension_pprint(self):
dim = Dimension('Test dimension', cyclic=True, type=float, unit='Twilight zones')
self.assertEqual(dim.pprint_value_string(3.23451), 'Test dimension: 3.2345 Twilight zones')
self.assertEqual(dim.pprint_value_string(4.23441), 'Test dimension: 4.2344 Twilight zones')
class NdIndexableMappingTest(ComparisonTestCase):
def setUp(self):
self.init_items_1D_list = [(1, 'a'), (5, 'b')]
self.init_item_list = [((1, 2.0), 'a'), ((5, 3.0), 'b')]
self.init_item_odict = OrderedDict([((1, 2.0), 'a'), ((5, 3.0), 'b')])
self.dimension_labels = ['intdim', 'floatdim']
self.dim1 = Dimension('intdim', type=int)
self.dim2 = Dimension('floatdim', type=float)
self.time_dimension = Dimension
def test_idxmapping_init(self):
MultiDimensionalMapping()
def test_idxmapping_init_item_odict(self):
MultiDimensionalMapping(self.init_item_odict, kdims=[self.dim1, self.dim2])
def test_idxmapping_init_item_list(self):
MultiDimensionalMapping(self.init_item_list, kdims=[self.dim1, self.dim2])
def test_idxmapping_init_dimstr(self):
MultiDimensionalMapping(self.init_item_odict, kdims=self.dimension_labels)
def test_idxmapping_init_dimensions(self):
MultiDimensionalMapping(self.init_item_odict, kdims=[self.dim1, self.dim2])
def test_idxmapping_dimension_labels(self):
idxmap = MultiDimensionalMapping(self.init_item_odict, kdims=[self.dim1, 'floatdim'])
self.assertEqual([d.name for d in idxmap.kdims], self.dimension_labels)
def test_idxmapping_ndims(self):
dims = [self.dim1, self.dim2, 'strdim']
idxmap = MultiDimensionalMapping(kdims=dims)
self.assertEqual(idxmap.ndims, len(dims))
def test_idxmapping_key_len_check(self):
try:
MultiDimensionalMapping(initial_items=self.init_item_odict)
raise AssertionError('Invalid key length check failed.')
except KeyError:
pass
def test_idxmapping_nested_update(self):
data1 = [(0, 'a'), (1, 'b')]
data2 = [(2, 'c'), (3, 'd')]
data3 = [(2, 'e'), (3, 'f')]
ndmap1 = MultiDimensionalMapping(data1, kdims=[self.dim1])
ndmap2 = MultiDimensionalMapping(data2, kdims=[self.dim1])
ndmap3 = MultiDimensionalMapping(data3, kdims=[self.dim1])
ndmap_list = [(0.5, ndmap1), (1.5, ndmap2)]
nested_ndmap = MultiDimensionalMapping(ndmap_list, kdims=[self.dim2])
nested_ndmap[(0.5,)].update(dict([(0, 'c'), (1, 'd')]))
self.assertEquals(list(nested_ndmap[0.5].values()), ['c', 'd'])
nested_ndmap[1.5] = ndmap3
self.assertEquals(list(nested_ndmap[1.5].values()), ['e', 'f'])
def test_idxmapping_unsorted(self):
data = [('B', 1), ('C', 2), ('A', 3)]
ndmap = MultiDimensionalMapping(data, sort=False)
self.assertEquals(ndmap.keys(), ['B', 'C', 'A'])
def test_idxmapping_unsorted_clone(self):
data = [('B', 1), ('C', 2), ('A', 3)]
ndmap = MultiDimensionalMapping(data, sort=False).clone()
self.assertEquals(ndmap.keys(), ['B', 'C', 'A'])
def test_idxmapping_groupby_unsorted(self):
data = [(('B', 2), 1), (('C', 2), 2), (('A', 1), 3)]
grouped = MultiDimensionalMapping(data, sort=False, kdims=['X', 'Y']).groupby('Y')
self.assertEquals(grouped.keys(), [1, 2])
self.assertEquals(grouped.values()[0].keys(), ['A'])
self.assertEquals(grouped.last.keys(), ['B', 'C'])
def test_idxmapping_reindex(self):
data = [((0, 0.5), 'a'), ((1, 0.5), 'b')]
ndmap = MultiDimensionalMapping(data, kdims=[self.dim1, self.dim2])
reduced_dims = ['intdim']
reduced_ndmap = ndmap.reindex(reduced_dims)
self.assertEqual([d.name for d in reduced_ndmap.kdims], reduced_dims)
def test_idxmapping_redim(self):
data = [((0, 0.5), 'a'), ((1, 0.5), 'b')]
ndmap = MultiDimensionalMapping(data, kdims=[self.dim1, self.dim2])
redimmed = ndmap.redim(intdim='Integer')
self.assertEqual(redimmed.kdims, [Dimension('Integer'), Dimension('floatdim')])
def test_idxmapping_add_dimension(self):
ndmap = MultiDimensionalMapping(self.init_items_1D_list, kdims=[self.dim1])
ndmap2d = ndmap.add_dimension(self.dim2, 0, 0.5)
self.assertEqual(list(ndmap2d.keys()), [(0.5, 1), (0.5, 5)])
self.assertEqual(ndmap2d.kdims, [self.dim2, self.dim1])
def test_idxmapping_apply_key_type(self):
data = dict([(0.5, 'a'), (1.5, 'b')])
ndmap = MultiDimensionalMapping(data, kdims=[self.dim1])
self.assertEqual(list(ndmap.keys()), [0, 1])
def test_setitem_nested_1(self):
nested1 = MultiDimensionalMapping([('B', 1)])
ndmap = MultiDimensionalMapping([('A', nested1)])
nested2 = MultiDimensionalMapping([('B', 2)])
ndmap['A'] = nested2
self.assertEqual(ndmap['A'], nested2)
def test_setitem_nested_2(self):
nested1 = MultiDimensionalMapping([('B', 1)])
ndmap = MultiDimensionalMapping([('A', nested1)])
nested2 = MultiDimensionalMapping([('C', 2)])
nested_clone = nested1.clone()
nested_clone.update(nested2)
ndmap.update({'A': nested2})
self.assertEqual(ndmap['A'].data, nested_clone.data)
class HoloMapTest(ComparisonTestCase):
def setUp(self):
self.xs = range(11)
self.y_ints = [i*2 for i in range(11)]
self.ys = np.linspace(0, 1, 11)
self.columns = Dataset(np.column_stack([self.xs, self.y_ints]),
kdims=['x'], vdims=['y'])
def test_holomap_redim(self):
hmap = HoloMap({i: Dataset({'x':self.xs, 'y': self.ys * i},
kdims=['x'], vdims=['y'])
for i in range(10)}, kdims=['z'])
redimmed = hmap.redim(x='Time')
self.assertEqual(redimmed.dimensions('all', True),
['z', 'Time', 'y'])
def test_holomap_redim_nested(self):
hmap = HoloMap({i: Dataset({'x':self.xs, 'y': self.ys * i},
kdims=['x'], vdims=['y'])
for i in range(10)}, kdims=['z'])
redimmed = hmap.redim(x='Time', z='Magnitude')
self.assertEqual(redimmed.dimensions('all', True),
['Magnitude', 'Time', 'y'])
def test_columns_collapse_heterogeneous(self):
collapsed = HoloMap({i: Dataset({'x':self.xs, 'y': self.ys * i},
kdims=['x'], vdims=['y'])
for i in range(10)}, kdims=['z']).collapse('z', np.mean)
expected = Dataset({'x':self.xs, 'y': self.ys * 4.5}, kdims=['x'], vdims=['y'])
self.compare_dataset(collapsed, expected)
def test_columns_sample_homogeneous(self):
samples = self.columns.sample([0, 5, 10]).dimension_values('y')
self.assertEqual(samples, np.array([0, 10, 20]))
| 1 | 15,962 | Was this just wrong before? The names indicated types but type wasn't specified. I guess the tests passed as comparison worked with ``type=None``? | holoviz-holoviews | py |
@@ -146,7 +146,8 @@ public class OrcMetrics {
if (icebergColOpt.isPresent()) {
final Types.NestedField icebergCol = icebergColOpt.get();
final int fieldId = icebergCol.fieldId();
- final MetricsMode metricsMode = effectiveMetricsConfig.columnMode(icebergCol.name());
+ final String colName = schema.findColumnName(icebergCol.fieldId());
+ final MetricsMode metricsMode = effectiveMetricsConfig.columnMode(colName);
columnSizes.put(fieldId, colStat.getBytesOnDisk());
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.orc;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.FieldMetrics;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.MetricsModes;
import org.apache.iceberg.MetricsModes.MetricsMode;
import org.apache.iceberg.MetricsUtil;
import org.apache.iceberg.Schema;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.expressions.Literal;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Conversions;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.DateTimeUtil;
import org.apache.iceberg.util.UnicodeUtil;
import org.apache.orc.BooleanColumnStatistics;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.DateColumnStatistics;
import org.apache.orc.DecimalColumnStatistics;
import org.apache.orc.DoubleColumnStatistics;
import org.apache.orc.IntegerColumnStatistics;
import org.apache.orc.Reader;
import org.apache.orc.StringColumnStatistics;
import org.apache.orc.TimestampColumnStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
public class OrcMetrics {
private enum Bound {
LOWER, UPPER
}
private OrcMetrics() {
}
public static Metrics fromInputFile(InputFile file) {
return fromInputFile(file, MetricsConfig.getDefault());
}
public static Metrics fromInputFile(InputFile file, MetricsConfig metricsConfig) {
return fromInputFile(file, metricsConfig, null);
}
public static Metrics fromInputFile(InputFile file, MetricsConfig metricsConfig, NameMapping mapping) {
final Configuration config = (file instanceof HadoopInputFile) ?
((HadoopInputFile) file).getConf() : new Configuration();
return fromInputFile(file, config, metricsConfig, mapping);
}
static Metrics fromInputFile(InputFile file, Configuration config, MetricsConfig metricsConfig, NameMapping mapping) {
try (Reader orcReader = ORC.newFileReader(file, config)) {
return buildOrcMetrics(orcReader.getNumberOfRows(), orcReader.getSchema(), orcReader.getStatistics(),
Stream.empty(), metricsConfig, mapping);
} catch (IOException ioe) {
throw new RuntimeIOException(ioe, "Failed to open file: %s", file.location());
}
}
static Metrics fromWriter(Writer writer, Stream<FieldMetrics<?>> fieldMetricsStream, MetricsConfig metricsConfig) {
try {
return buildOrcMetrics(writer.getNumberOfRows(), writer.getSchema(), writer.getStatistics(),
fieldMetricsStream, metricsConfig, null);
} catch (IOException ioe) {
throw new RuntimeIOException(ioe, "Failed to get statistics from writer");
}
}
private static Metrics buildOrcMetrics(final long numOfRows, final TypeDescription orcSchema,
final ColumnStatistics[] colStats,
final Stream<FieldMetrics<?>> fieldMetricsStream,
final MetricsConfig metricsConfig,
final NameMapping mapping) {
final TypeDescription orcSchemaWithIds = (!ORCSchemaUtil.hasIds(orcSchema) && mapping != null) ?
ORCSchemaUtil.applyNameMapping(orcSchema, mapping) : orcSchema;
final Set<Integer> statsColumns = statsColumns(orcSchemaWithIds);
final MetricsConfig effectiveMetricsConfig = Optional.ofNullable(metricsConfig)
.orElseGet(MetricsConfig::getDefault);
Map<Integer, Long> columnSizes = Maps.newHashMapWithExpectedSize(colStats.length);
Map<Integer, Long> valueCounts = Maps.newHashMapWithExpectedSize(colStats.length);
Map<Integer, Long> nullCounts = Maps.newHashMapWithExpectedSize(colStats.length);
if (!ORCSchemaUtil.hasIds(orcSchemaWithIds)) {
return new Metrics(numOfRows,
columnSizes,
valueCounts,
nullCounts,
null,
null,
null);
}
final Schema schema = ORCSchemaUtil.convert(orcSchemaWithIds);
Map<Integer, ByteBuffer> lowerBounds = Maps.newHashMap();
Map<Integer, ByteBuffer> upperBounds = Maps.newHashMap();
Map<Integer, FieldMetrics<?>> fieldMetricsMap = Optional.ofNullable(fieldMetricsStream)
.map(stream -> stream.collect(Collectors.toMap(FieldMetrics::id, Function.identity())))
.orElseGet(HashMap::new);
for (int i = 0; i < colStats.length; i++) {
final ColumnStatistics colStat = colStats[i];
final TypeDescription orcCol = orcSchemaWithIds.findSubtype(i);
final Optional<Types.NestedField> icebergColOpt = ORCSchemaUtil.icebergID(orcCol)
.map(schema::findField);
if (icebergColOpt.isPresent()) {
final Types.NestedField icebergCol = icebergColOpt.get();
final int fieldId = icebergCol.fieldId();
final MetricsMode metricsMode = effectiveMetricsConfig.columnMode(icebergCol.name());
columnSizes.put(fieldId, colStat.getBytesOnDisk());
if (metricsMode == MetricsModes.None.get()) {
continue;
}
if (statsColumns.contains(fieldId)) {
// Since ORC does not track null values nor repeated ones, the value count for columns in
// containers (maps, list) may be larger than what it actually is, however these are not
// used in experssions right now. For such cases, we use the value number of values
// directly stored in ORC.
if (colStat.hasNull()) {
nullCounts.put(fieldId, numOfRows - colStat.getNumberOfValues());
} else {
nullCounts.put(fieldId, 0L);
}
valueCounts.put(fieldId, colStat.getNumberOfValues() + nullCounts.get(fieldId));
if (metricsMode != MetricsModes.Counts.get()) {
Optional<ByteBuffer> orcMin = (colStat.getNumberOfValues() > 0) ?
fromOrcMin(icebergCol.type(), colStat, metricsMode, fieldMetricsMap.get(fieldId)) : Optional.empty();
orcMin.ifPresent(byteBuffer -> lowerBounds.put(icebergCol.fieldId(), byteBuffer));
Optional<ByteBuffer> orcMax = (colStat.getNumberOfValues() > 0) ?
fromOrcMax(icebergCol.type(), colStat, metricsMode, fieldMetricsMap.get(fieldId)) : Optional.empty();
orcMax.ifPresent(byteBuffer -> upperBounds.put(icebergCol.fieldId(), byteBuffer));
}
}
}
}
return new Metrics(numOfRows,
columnSizes,
valueCounts,
nullCounts,
MetricsUtil.createNanValueCounts(fieldMetricsMap.values().stream(), effectiveMetricsConfig, schema),
lowerBounds,
upperBounds);
}
private static Optional<ByteBuffer> fromOrcMin(Type type, ColumnStatistics columnStats,
MetricsMode metricsMode, FieldMetrics<?> fieldMetrics) {
Object min = null;
if (columnStats instanceof IntegerColumnStatistics) {
min = ((IntegerColumnStatistics) columnStats).getMinimum();
if (type.typeId() == Type.TypeID.INTEGER) {
min = Math.toIntExact((long) min);
}
} else if (columnStats instanceof DoubleColumnStatistics) {
// since Orc includes NaN for upper/lower bounds of floating point columns, and we don't want this behavior,
// we have tracked metrics for such columns ourselves and thus do not need to rely on Orc's column statistics.
Preconditions.checkNotNull(fieldMetrics,
"[BUG] Float or double type columns should have metrics being tracked by Iceberg Orc writers");
min = fieldMetrics.lowerBound();
} else if (columnStats instanceof StringColumnStatistics) {
min = ((StringColumnStatistics) columnStats).getMinimum();
} else if (columnStats instanceof DecimalColumnStatistics) {
min = Optional
.ofNullable(((DecimalColumnStatistics) columnStats).getMinimum())
.map(minStats -> minStats.bigDecimalValue()
.setScale(((Types.DecimalType) type).scale()))
.orElse(null);
} else if (columnStats instanceof DateColumnStatistics) {
min = (int) ((DateColumnStatistics) columnStats).getMinimumDayOfEpoch();
} else if (columnStats instanceof TimestampColumnStatistics) {
TimestampColumnStatistics tColStats = (TimestampColumnStatistics) columnStats;
Timestamp minValue = tColStats.getMinimumUTC();
min = Optional.ofNullable(minValue)
.map(v -> DateTimeUtil.microsFromInstant(v.toInstant()))
.orElse(null);
} else if (columnStats instanceof BooleanColumnStatistics) {
BooleanColumnStatistics booleanStats = (BooleanColumnStatistics) columnStats;
min = booleanStats.getFalseCount() <= 0;
}
return Optional.ofNullable(Conversions.toByteBuffer(type, truncateIfNeeded(Bound.LOWER, type, min, metricsMode)));
}
private static Optional<ByteBuffer> fromOrcMax(Type type, ColumnStatistics columnStats,
MetricsMode metricsMode, FieldMetrics<?> fieldMetrics) {
Object max = null;
if (columnStats instanceof IntegerColumnStatistics) {
max = ((IntegerColumnStatistics) columnStats).getMaximum();
if (type.typeId() == Type.TypeID.INTEGER) {
max = Math.toIntExact((long) max);
}
} else if (columnStats instanceof DoubleColumnStatistics) {
// since Orc includes NaN for upper/lower bounds of floating point columns, and we don't want this behavior,
// we have tracked metrics for such columns ourselves and thus do not need to rely on Orc's column statistics.
Preconditions.checkNotNull(fieldMetrics,
"[BUG] Float or double type columns should have metrics being tracked by Iceberg Orc writers");
max = fieldMetrics.upperBound();
} else if (columnStats instanceof StringColumnStatistics) {
max = ((StringColumnStatistics) columnStats).getMaximum();
} else if (columnStats instanceof DecimalColumnStatistics) {
max = Optional
.ofNullable(((DecimalColumnStatistics) columnStats).getMaximum())
.map(maxStats -> maxStats.bigDecimalValue()
.setScale(((Types.DecimalType) type).scale()))
.orElse(null);
} else if (columnStats instanceof DateColumnStatistics) {
max = (int) ((DateColumnStatistics) columnStats).getMaximumDayOfEpoch();
} else if (columnStats instanceof TimestampColumnStatistics) {
TimestampColumnStatistics tColStats = (TimestampColumnStatistics) columnStats;
Timestamp maxValue = tColStats.getMaximumUTC();
max = Optional.ofNullable(maxValue)
.map(v -> DateTimeUtil.microsFromInstant(v.toInstant()))
.orElse(null);
} else if (columnStats instanceof BooleanColumnStatistics) {
BooleanColumnStatistics booleanStats = (BooleanColumnStatistics) columnStats;
max = booleanStats.getTrueCount() > 0;
}
return Optional.ofNullable(Conversions.toByteBuffer(type, truncateIfNeeded(Bound.UPPER, type, max, metricsMode)));
}
private static Object truncateIfNeeded(Bound bound, Type type, Object value, MetricsMode metricsMode) {
// Out of the two types which could be truncated, string or binary, ORC only supports string bounds.
// Therefore, truncation will be applied if needed only on string type.
if (!(metricsMode instanceof MetricsModes.Truncate) || type.typeId() != Type.TypeID.STRING || value == null) {
return value;
}
CharSequence charSequence = (CharSequence) value;
MetricsModes.Truncate truncateMode = (MetricsModes.Truncate) metricsMode;
int truncateLength = truncateMode.length();
switch (bound) {
case UPPER:
return Optional.ofNullable(UnicodeUtil.truncateStringMax(Literal.of(charSequence), truncateLength))
.map(Literal::value).orElse(charSequence);
case LOWER:
return UnicodeUtil.truncateStringMin(Literal.of(charSequence), truncateLength).value();
default:
throw new RuntimeException("No other bound is defined.");
}
}
private static Set<Integer> statsColumns(TypeDescription schema) {
return OrcSchemaVisitor.visit(schema, new StatsColumnsVisitor());
}
private static class StatsColumnsVisitor extends OrcSchemaVisitor<Set<Integer>> {
@Override
public Set<Integer> record(TypeDescription record, List<String> names, List<Set<Integer>> fields) {
ImmutableSet.Builder<Integer> result = ImmutableSet.builder();
fields.stream().filter(Objects::nonNull).forEach(result::addAll);
record.getChildren().stream().map(ORCSchemaUtil::icebergID).filter(Optional::isPresent)
.map(Optional::get).forEach(result::add);
return result.build();
}
}
}
| 1 | 40,782 | icebergCol.name() is the unqualified column name | apache-iceberg | java |
@@ -45,7 +45,7 @@ class MetricAlarm(object):
def __init__(self, connection=None, name=None, metric=None,
namespace=None, statistic=None, comparison=None, threshold=None,
period=None, evaluation_periods=None, unit=None, description='',
- dimensions=[]):
+ dimensions=[], alarm_actions=[], insufficient_data_actions=[], ok_actions=[]):
"""
Creates a new Alarm.
| 1 | # Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from datetime import datetime
from boto.resultset import ResultSet
from boto.ec2.cloudwatch.listelement import ListElement
try:
import simplejson as json
except ImportError:
import json
class MetricAlarm(object):
OK = 'OK'
ALARM = 'ALARM'
INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'
_cmp_map = {
'>=' : 'GreaterThanOrEqualToThreshold',
'>' : 'GreaterThanThreshold',
'<' : 'LessThanThreshold',
'<=' : 'LessThanOrEqualToThreshold',
}
_rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())
def __init__(self, connection=None, name=None, metric=None,
namespace=None, statistic=None, comparison=None, threshold=None,
period=None, evaluation_periods=None, unit=None, description='',
dimensions=[]):
"""
Creates a new Alarm.
:type name: str
:param name: Name of alarm.
:type metric: str
:param metric: Name of alarm's associated metric.
:type namespace: str
:param namespace: The namespace for the alarm's metric.
:type statistic: str
:param statistic: The statistic to apply to the alarm's associated metric. Can
be one of 'SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum'
:type comparison: str
:param comparison: Comparison used to compare statistic with threshold. Can be
one of '>=', '>', '<', '<='
:type threshold: float
:param threshold: The value against which the specified statistic is compared.
:type period: int
:param period: The period in seconds over which teh specified statistic is applied.
:type evaluation_periods: int
:param evaluation_period: The number of periods over which data is compared to
the specified threshold
:type unit: str
:param unit: Allowed Values are: Seconds, Microseconds, Milliseconds, Bytes,
Kilobytes, Megabytes, Gigabytes, Terabytes, Bits, Kilobits, Megabits,
Gigabits, Terabits, Percent, Count, Bytes/Second, Kilobytes/Second,
Megabytes/Second, Gigabytes/Second, Terabytes/Second, Bits/Second,
Kilobits/Second, Megabits/Second, Gigabits/Second, Terabits/Second,
Count/Second, None
:type description: str
:param description: Description of MetricAlarm
:type dimensions list of dicts
:param description: Dimensions of alarm, such as:
[{'InstanceId':'i-0123456,i-0123457'}]
"""
self.name = name
self.connection = connection
self.metric = metric
self.namespace = namespace
self.statistic = statistic
self.threshold = float(threshold) if threshold is not None else None
self.comparison = self._cmp_map.get(comparison)
self.period = int(period) if period is not None else None
self.evaluation_periods = int(evaluation_periods) if evaluation_periods is not None else None
self.actions_enabled = None
self.alarm_arn = None
self.last_updated = None
self.description = description
self.dimensions = dimensions
self.insufficient_data_actions = []
self.ok_actions = []
self.state_reason = None
self.state_value = None
self.unit = unit
alarm_action = []
self.alarm_actions = ListElement(alarm_action)
def __repr__(self):
return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric, self.statistic, self.comparison, self.threshold)
def startElement(self, name, attrs, connection):
if name == 'AlarmActions':
return self.alarm_actions
else:
pass
def endElement(self, name, value, connection):
if name == 'ActionsEnabled':
self.actions_enabled = value
elif name == 'AlarmArn':
self.alarm_arn = value
elif name == 'AlarmConfigurationUpdatedTimestamp':
self.last_updated = value
elif name == 'AlarmDescription':
self.description = value
elif name == 'AlarmName':
self.name = value
elif name == 'ComparisonOperator':
setattr(self, 'comparison', self._rev_cmp_map[value])
elif name == 'EvaluationPeriods':
self.evaluation_periods = int(value)
elif name == 'MetricName':
self.metric = value
elif name == 'Namespace':
self.namespace = value
elif name == 'Period':
self.period = int(value)
elif name == 'StateReason':
self.state_reason = value
elif name == 'StateValue':
self.state_value = value
elif name == 'Statistic':
self.statistic = value
elif name == 'Threshold':
self.threshold = float(value)
elif name == 'Unit':
self.unit = value
else:
setattr(self, name, value)
def set_state(self, value, reason, data=None):
""" Temporarily sets the state of an alarm.
:type value: str
:param value: OK | ALARM | INSUFFICIENT_DATA
:type reason: str
:param reason: Reason alarm set (human readable).
:type data: str
:param data: Reason data (will be jsonified).
"""
return self.connection.set_alarm_state(self.name, reason, value, data)
def update(self):
return self.connection.update_alarm(self)
def enable_actions(self):
return self.connection.enable_alarm_actions([self.name])
def disable_actions(self):
return self.connection.disable_alarm_actions([self.name])
def describe_history(self, start_date=None, end_date=None, max_records=None, history_item_type=None, next_token=None):
return self.connection.describe_alarm_history(self.name, start_date, end_date,
max_records, history_item_type, next_token)
def add_alarm_action(self, sns_topic=None):
"""
Adds an alarm action, represented as an SNS topic, to this alarm.
What do do when alarm is triggered.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state ALARM.
"""
if not sns_topic:
return # Raise exception instead?
self.actions_enabled = 'true'
self.alarm_actions.append(sns_topic)
class AlarmHistoryItem(object):
def __init__(self, connection=None):
self.connection = connection
def __repr__(self):
return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'HistoryData':
self.data = json.loads(value)
elif name == 'HistoryItemType':
self.tem_type = value
elif name == 'HistorySummary':
self.summary = value
elif name == 'Timestamp':
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
| 1 | 7,943 | It's generally a bad idea to use mutable types like lists as default values for parameters. Lots of strange, difficult to debug side effects can occur. I see that there was already one example of this prior to this commit which probably explains why it seemed innocuous to add more but I'm going to rework this before committing to master. | boto-boto | py |
@@ -10,6 +10,7 @@ module.exports = {
'prettier',
'prettier/@typescript-eslint',
'plugin:prettier/recommended',
+ 'eslint-config-prettier'
],
globals: {
Atomics: 'readonly', | 1 | module.exports = {
env: {
browser: true,
es6: true,
'jest/globals': true,
},
extends: [
'airbnb',
'plugin:@typescript-eslint/recommended',
'prettier',
'prettier/@typescript-eslint',
'plugin:prettier/recommended',
],
globals: {
Atomics: 'readonly',
SharedArrayBuffer: 'readonly',
},
parser: '@typescript-eslint/parser',
parserOptions: {
project: './tsconfig.json',
tsconfigRootDir: './',
// TODO: we need this because of an issue with @typescript-eslint/parser: https://github.com/typescript-eslint/typescript-eslint/issues/864
createDefaultProgram: true,
},
settings: {
'import/resolver': {
node: {
extensions: ['.js', '.jsx', '.ts', '.tsx'],
},
},
},
plugins: ['react', '@typescript-eslint', 'prettier', 'jest'],
rules: {
'@typescript-eslint/member-delimiter-style': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-unused-vars': 'off',
'@typescript-eslint/unified-signatures': 'error',
'@typescript-eslint/no-inferrable-types': ['error', { ignoreParameters: true }],
'react/jsx-filename-extension': ['error', { extensions: ['.tsx'] }],
'react/jsx-one-expression-per-line': 'off',
'react/jsx-wrap-multilines': 'off',
'react/jsx-props-no-spreading': 'off',
'arrow-body-style': ['warn', 'as-needed'],
'no-param-reassign': ['error', { props: false }],
'import/prefer-default-export': 'off',
'no-console': 'off',
'eol-last': ['error', 'always'],
'no-debugger': 'error',
'no-nested-ternary': 'off',
'import/no-unresolved': 'off',
'import/extensions': ['error', 'never'],
curly: ['error', 'all'],
},
}
| 1 | 14,727 | We run prettier as an eslint plugin, so this harmful | HospitalRun-hospitalrun-frontend | js |
@@ -39,7 +39,7 @@ using namespace eprosima::fastrtps::types;
HelloWorldPublisher::HelloWorldPublisher()
: mp_participant(nullptr)
, mp_publisher(nullptr)
- , m_DynType(nullptr)
+ , m_DynType(DynamicType_ptr(nullptr))
{
}
| 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file HelloWorldPublisher.cpp
*
*/
#include "HelloWorldPublisher.h"
#include <fastrtps/attributes/ParticipantAttributes.h>
#include <fastrtps/attributes/PublisherAttributes.h>
#include <fastrtps/publisher/Publisher.h>
#include <fastrtps/Domain.h>
#include <fastrtps/types/DynamicTypeBuilderFactory.h>
#include <fastrtps/types/DynamicDataFactory.h>
#include <fastrtps/types/DynamicTypeBuilder.h>
#include <fastrtps/types/DynamicTypeBuilderPtr.h>
#include <fastrtps/types/TypeDescriptor.h>
#include <fastrtps/types/MemberDescriptor.h>
#include <fastrtps/types/DynamicType.h>
#include <thread>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
using namespace eprosima::fastrtps::types;
HelloWorldPublisher::HelloWorldPublisher()
: mp_participant(nullptr)
, mp_publisher(nullptr)
, m_DynType(nullptr)
{
}
bool HelloWorldPublisher::init()
{
// Create basic builders
DynamicTypeBuilder_ptr struct_type_builder(DynamicTypeBuilderFactory::get_instance()->create_struct_builder());
// Add members to the struct.
struct_type_builder->add_member(0, "index", DynamicTypeBuilderFactory::get_instance()->create_uint32_type());
struct_type_builder->add_member(1, "message", DynamicTypeBuilderFactory::get_instance()->create_string_type());
struct_type_builder->set_name("HelloWorld");
DynamicType_ptr dynType = struct_type_builder->build();
m_DynType.SetDynamicType(dynType);
m_DynHello = DynamicDataFactory::get_instance()->create_data(dynType);
m_DynHello->set_uint32_value(0, 0);
m_DynHello->set_string_value("HelloWorld", 1);
ParticipantAttributes PParam;
PParam.rtps.builtin.domainId = 0;
PParam.rtps.setName("DynHelloWorld_pub");
mp_participant = Domain::createParticipant(PParam, (ParticipantListener*)&m_part_list);
if(mp_participant==nullptr)
return false;
//REGISTER THE TYPE
Domain::registerDynamicType(mp_participant, &m_DynType);
//CREATE THE PUBLISHER
PublisherAttributes Wparam;
Wparam.topic.topicKind = NO_KEY;
Wparam.topic.topicDataType = "HelloWorld";
Wparam.topic.topicName = "HelloWorldTopic";
//Wparam.topic.topicDiscoveryKind = NO_CHECK; // Do it compatible with other HelloWorlds
mp_publisher = Domain::createPublisher(mp_participant,Wparam,(PublisherListener*)&m_listener);
if(mp_publisher == nullptr)
return false;
return true;
}
HelloWorldPublisher::~HelloWorldPublisher()
{
Domain::removeParticipant(mp_participant);
DynamicDataFactory::get_instance()->delete_data(m_DynHello);
Domain::stopAll();
}
void HelloWorldPublisher::PubListener::onPublicationMatched(Publisher* /*pub*/,MatchingInfo& info)
{
if(info.status == MATCHED_MATCHING)
{
n_matched++;
firstConnected = true;
std::cout << "Publisher matched"<<std::endl;
}
else
{
n_matched--;
std::cout << "Publisher unmatched"<<std::endl;
}
}
void HelloWorldPublisher::PartListener::onParticipantDiscovery(Participant*, ParticipantDiscoveryInfo&& info)
{
if(info.status == ParticipantDiscoveryInfo::DISCOVERED_PARTICIPANT)
{
std::cout << "Participant " << info.info.m_participantName << " discovered" << std::endl;
}
else if (info.status == ParticipantDiscoveryInfo::REMOVED_PARTICIPANT)
{
std::cout << "Participant " << info.info.m_participantName << " removed" << std::endl;
}
else if (info.status == ParticipantDiscoveryInfo::DROPPED_PARTICIPANT)
{
std::cout << "Participant " << info.info.m_participantName << " dropped" << std::endl;
}
}
void HelloWorldPublisher::runThread(uint32_t samples, uint32_t sleep)
{
uint32_t i = 0;
while(!stop && (i < samples || samples == 0))
{
if(publish(samples != 0))
{
std::string message;
m_DynHello->get_string_value(message, 1);
uint32_t index;
m_DynHello->get_uint32_value(index, 0);
std::cout << "Message: " << message << " with index: " << index << " SENT" << std::endl;
++i;
}
std::this_thread::sleep_for(std::chrono::milliseconds(sleep));
}
}
void HelloWorldPublisher::run(uint32_t samples, uint32_t sleep)
{
stop = false;
std::thread thread(&HelloWorldPublisher::runThread, this, samples, sleep);
if (samples == 0)
{
std::cout << "Publisher running. Please press enter to stop the Publisher at any time." << std::endl;
std::cin.ignore();
stop = true;
}
else
{
std::cout << "Publisher running " << samples << " samples." << std::endl;
}
thread.join();
}
bool HelloWorldPublisher::publish(bool waitForListener)
{
if(m_listener.firstConnected || !waitForListener || m_listener.n_matched>0)
{
uint32_t index;
m_DynHello->get_uint32_value(index, 0);
m_DynHello->set_uint32_value(index+1, 0);
mp_publisher->write((void*)m_DynHello);
return true;
}
return false;
}
| 1 | 16,024 | Check if the TypeDescriptor and MemberDescriptor includes are necessary | eProsima-Fast-DDS | cpp |
@@ -4,10 +4,11 @@ import (
gocontext "context"
"encoding/json"
"fmt"
- "k8s.io/apimachinery/pkg/api/errors"
"os"
"syscall"
+ "k8s.io/apimachinery/pkg/api/errors"
+
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" | 1 | package leaderelection
import (
gocontext "context"
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"os"
"syscall"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"github.com/kubeedge/beehive/pkg/core"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/utils"
config "github.com/kubeedge/kubeedge/pkg/apis/componentconfig/cloudcore/v1alpha1"
)
func Run(cfg *config.CloudCoreConfig, readyzAdaptor *ReadyzAdaptor) {
// To help debugging, immediately log config for LeaderElection
klog.Infof("Config for LeaderElection : %v", *cfg.LeaderElection)
// Init Context for leaderElection
beehiveContext.InitContext(beehiveContext.MsgCtxTypeChannel)
coreBroadcaster := record.NewBroadcaster()
cli, err := utils.KubeClient()
if err != nil {
klog.Warningf("Create kube client for leaderElection failed with error: %s", err)
return
}
if err = CreateNamespaceIfNeeded(cli, "kubeedge"); err != nil {
klog.Warningf("Create Namespace kubeedge failed with error: %s", err)
return
}
coreRecorder := coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "CloudCore"})
leaderElectionConfig, err := makeLeaderElectionConfig(*cfg.LeaderElection, cli, coreRecorder)
if err != nil {
klog.Errorf("couldn't create leaderElectorConfig: %v", err)
return
}
leaderElectionConfig.Callbacks = leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx gocontext.Context) {
// Start all modules,
core.StartModules()
// Patch PodReadinessGate if program run in pod
err := TryToPatchPodReadinessGate()
if err != nil {
// Terminate the program gracefully
klog.Errorf("Error patching pod readinessGate: %v", err)
TriggerGracefulShutdown()
}
},
OnStoppedLeading: func() {
// TODO: is it necessary to terminate the program gracefully?
//klog.Fatalf("leaderelection lost, rudely terminate program")
klog.Errorf("leaderelection lost, gracefully terminate program")
// Trigger core.GracefulShutdown()
TriggerGracefulShutdown()
},
}
leaderElector, err := leaderelection.NewLeaderElector(*leaderElectionConfig)
// Set readyzAdaptor manually
readyzAdaptor.SetLeaderElection(leaderElector)
if err != nil {
klog.Errorf("couldn't create leader elector: %v", err)
return
}
// Start leaderElection until becoming leader, terminate program if leader lost or context.cancel
go leaderElector.Run(beehiveContext.GetContext())
// Monitor system signal and shutdown gracefully and it should be in main gorutine
core.GracefulShutdown()
return
}
// makeLeaderElectionConfig builds a leader election configuration. It will
// create a new resource lock associated with the configuration.
func makeLeaderElectionConfig(config componentbaseconfig.LeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id := hostname + "_" + string(uuid.NewUUID())
rl, err := resourcelock.New(config.ResourceLock,
config.ResourceNamespace,
config.ResourceName,
client.CoreV1(),
client.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
})
if err != nil {
return nil, fmt.Errorf("couldn't create resource lock: %v", err)
}
return &leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: config.LeaseDuration.Duration,
RenewDeadline: config.RenewDeadline.Duration,
RetryPeriod: config.RetryPeriod.Duration,
WatchDog: nil,
Name: "cloudcore",
}, nil
}
// Try to patch PodReadinessGate if program runs in pod
func TryToPatchPodReadinessGate() error {
podname, isInPod := os.LookupEnv("CLOUDCORE_POD_NAME")
if isInPod == true {
namespace := os.Getenv("CLOUDCORE_POD_NAMESPACE")
klog.Infof("CloudCore is running in pod %v/%v, try to patch PodReadinessGate", namespace, podname)
//TODO: use specific clients
cli, err := utils.KubeClient()
if err != nil {
return fmt.Errorf("create kube client for patching podReadinessGate failed with error: %v", err)
}
//Creat patchBytes
getPod, err := cli.CoreV1().Pods(namespace).Get(podname, metav1.GetOptions{})
originalJSON, err := json.Marshal(getPod)
if err != nil {
return fmt.Errorf("failed to marshal modified pod %q into JSON: %v", podname, err)
}
//Todo: Read PodReadinessGate from CloudCore configuration or env
condition := corev1.PodCondition{Type: "kubeedge.io/CloudCoreIsLeader", Status: corev1.ConditionTrue}
podutil.UpdatePodCondition(&getPod.Status, &condition)
newJSON, err := json.Marshal(getPod)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSON, newJSON, corev1.Pod{})
if err != nil {
return fmt.Errorf("failed to create two way merge patch: %v", err)
}
var maxRetries = 3
var isPatchSuccess = false
for i := 1; i <= maxRetries; i++ {
_, err = cli.CoreV1().Pods(namespace).Patch(podname, types.StrategicMergePatchType, patchBytes, "status")
if err == nil {
isPatchSuccess = true
klog.Infof("Successfully patching podReadinessGate: kubeedge.io/CloudCoreIsLeader to pod %q through apiserver", podname)
break
}
if errors.IsConflict(err) {
// If the patch failure is due to update conflict, the necessary retransmission is performed
if i >= maxRetries {
klog.Errorf("updateMaxRetries(%d) has reached, failed to patching podReadinessGate: kubeedge.io/CloudCoreIsLeader because of update conflict", maxRetries)
}
continue
}
break
}
if !isPatchSuccess {
return err
}
} else {
klog.Infoln("CloudCore is not running in pod")
}
return nil
}
// Trigger core.GracefulShutdown()
func TriggerGracefulShutdown() {
if beehiveContext.GetContext().Err() != nil {
klog.Errorln("Program is in gracefully shutdown")
return
}
klog.Errorln("Trigger graceful shutdown!")
p, err := os.FindProcess(syscall.Getpid())
if err != nil {
klog.Errorf("Failed to find self process: %v", err)
}
err = p.Signal(os.Interrupt)
if err != nil {
klog.Errorf("Failed to trigger graceful shutdown: %v", err)
}
}
func CreateNamespaceIfNeeded(cli *kubernetes.Clientset, ns string) error {
c := cli.CoreV1()
if _, err := c.Namespaces().Get(ns, metav1.GetOptions{}); err == nil {
// the namespace already exists
return nil
}
newNs := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
Namespace: "",
},
}
_, err := c.Namespaces().Create(newNs)
if err != nil && errors.IsAlreadyExists(err) {
err = nil
}
return err
}
| 1 | 16,755 | delete the empty line here. | kubeedge-kubeedge | go |
@@ -302,6 +302,16 @@ describe Subscription do
end
end
+ describe "#reactivate" do
+ it "unsets scheduled_for_deactivation_on" do
+ subscription = create(:subscription, scheduled_for_deactivation_on: 1.day.from_now)
+ subscription.reactivate
+ subscription.reload
+
+ expect(subscription.scheduled_for_deactivation_on).to be_nil
+ end
+ end
+
def setup_cutomer
stripe_customer = double(
"StripeCustomer", | 1 | require "rails_helper"
describe Subscription do
it { should have_one(:team).dependent(:destroy) }
it { should belong_to(:plan) }
it { should belong_to(:user) }
it { should delegate(:stripe_customer_id).to(:user) }
it { should validate_presence_of(:plan_id) }
it { should validate_presence_of(:plan_type) }
it { should validate_presence_of(:user_id) }
describe ".next_payment_in_2_days" do
it "only includes subscription that will be billed in 2 days" do
billed_today = create(:subscription, next_payment_on: Date.current)
billed_tomorrow = create(:subscription, next_payment_on: Date.current + 1.day)
billed_2_days_from_now = create(:subscription, next_payment_on: Date.current + 2.days)
billed_3_days_from_now = create(:subscription, next_payment_on: Date.current + 3.days)
expect(Subscription.next_payment_in_2_days).to eq [billed_2_days_from_now]
end
end
describe "#active?" do
it "returns true if deactivated_on is nil" do
subscription = Subscription.new(deactivated_on: nil)
expect(subscription).to be_active
end
it "returns false if deactivated_on is not nil" do
subscription = Subscription.new(deactivated_on: Time.zone.today)
expect(subscription).not_to be_active
end
end
describe "#scheduled_for_deactivation?" do
it "returns false if scheduled_for_deactivation_on is nil" do
subscription = Subscription.new(scheduled_for_deactivation_on: nil)
expect(subscription).not_to be_scheduled_for_deactivation
end
it "returns true if scheduled_for_deactivation_on is not nil" do
subscription = Subscription.new(
scheduled_for_deactivation_on: Time.zone.today
)
expect(subscription).to be_scheduled_for_deactivation
end
end
describe "#deactivate" do
it "updates the subscription record by setting deactivated_on to today" do
subscription = create(:active_subscription, :purchased)
subscription.deactivate
subscription.reload
expect(subscription.deactivated_on).to eq Time.zone.today
end
it "unfulfills itself" do
fulfillment = spy("fulfillment")
subscription = create(:active_subscription)
allow(SubscriptionFulfillment).to receive(:new).
with(subscription.user, subscription.plan).
and_return(fulfillment)
subscription.deactivate
expect(fulfillment).to have_received(:remove)
end
it "unfulfills for a user who changed their plan" do
original_plan = create(:plan)
new_plan = create(:plan)
subscription = create(:active_subscription, plan: new_plan)
create(
:checkout,
user: subscription.user,
plan: original_plan
)
expect { subscription.deactivate }.not_to raise_error
end
end
describe "#change_plan" do
it "updates upcase and stripe plans" do
subscription = build(:subscription)
allow(subscription).to receive(:write_plan)
allow(subscription).to receive(:change_stripe_plan)
sku = "plan_sku"
subscription.change_plan(sku: sku)
expect(subscription).to have_received(:write_plan).with(sku: sku)
expect(subscription).to have_received(:change_stripe_plan).with(sku: sku)
end
end
describe "#change_stripe_plan" do
it "updates the plan in Stripe" do
different_plan = create(:plan, sku: "different")
stripe_customer = setup_cutomer
subscription = create(:active_subscription)
subscription.change_stripe_plan(sku: different_plan.sku)
expect(stripe_customer.subscriptions.first.plan).to eq different_plan.sku
end
end
describe "#write_plan" do
context "when subscription is for an individual" do
it "notifies Analytics for current user" do
analytics_updater = spy("AnalyticsUpdater")
allow(Analytics).to receive(:new).and_return(analytics_updater)
subscription = create(:subscription, team: nil)
subscription.write_plan(sku: create(:plan).sku)
expect(Analytics).to have_received(:new).with(subscription.user)
expect(analytics_updater).to have_received(:track_updated)
end
end
context "when subscription is for a team" do
it "notifies Analytics for all users in the team" do
analytics_updater = spy("AnalyticsUpdater")
allow(Analytics).to receive(:new).and_return(analytics_updater)
users = create_list(:user, 2)
team = create(:team, users: users)
subscription = create(:team_subscription, team: team, user: users.first)
subscription.write_plan(sku: create(:plan).sku)
expect(Analytics).to have_received(:new).with(users.first)
expect(Analytics).to have_received(:new).with(users.second)
expect(analytics_updater).to have_received(:track_updated).twice
end
end
it "changes the subscription plan to the given plan" do
different_plan = create(:plan, sku: "different")
subscription = create(:active_subscription)
subscription.write_plan(sku: different_plan.sku)
expect(subscription.plan).to eq different_plan
end
it "fulfills features gained by the new plan" do
subscription = create(:active_subscription)
feature_fulfillment = stub_feature_fulfillment
subscription.write_plan(sku: build_stubbed(:plan).sku)
expect(feature_fulfillment).to have_received(:fulfill_gained_features)
end
it "unfulfills features lost by the old plan" do
subscription = create(:active_subscription)
feature_fulfillment = stub_feature_fulfillment
subscription.write_plan(sku: build_stubbed(:plan).sku)
expect(feature_fulfillment).to have_received(:unfulfill_lost_features)
end
def stub_feature_fulfillment
fulfillment = spy("FeatureFulfillment")
allow(FeatureFulfillment).to receive(:new).and_return(fulfillment)
fulfillment
end
end
describe "#change_quantity" do
it "updates the plan in Stripe" do
stripe_customer = setup_cutomer
subscription = create(:active_subscription)
subscription.change_quantity(4)
expect(stripe_customer.subscriptions.first.quantity).to eq 4
end
end
describe "#plan_name" do
it "delegates to plan" do
plan = build_stubbed(:plan, name: "Individual")
subscription = build_stubbed(:subscription, plan: plan)
expect(subscription.plan_name).to eq "Individual"
end
end
describe ".canceled_in_last_30_days" do
it "returns nothing when none have been canceled within 30 days" do
create(:subscription, deactivated_on: 60.days.ago)
expect(Subscription.canceled_in_last_30_days).to be_empty
end
it "returns the subscriptions canceled within 30 days" do
subscription = create(:subscription, deactivated_on: 7.days.ago)
expect(Subscription.canceled_in_last_30_days).to eq [subscription]
end
end
describe ".active_as_of" do
it "returns nothing when no subscriptions canceled" do
expect(Subscription.active_as_of(Time.zone.now)).to be_empty
end
it "returns nothing when subscription canceled before the given date" do
create(:subscription, deactivated_on: 9.days.ago)
expect(Subscription.active_as_of(8.days.ago)).to be_empty
end
it "returns the subscriptions canceled after the given date" do
subscription = create(:subscription, deactivated_on: 7.days.ago)
expect(Subscription.active_as_of(8.days.ago)).to eq [subscription]
end
it "returns the subscriptions not canceled" do
subscription = create(:subscription)
expect(Subscription.active_as_of(8.days.ago)).to eq [subscription]
end
end
describe ".created_before" do
it "returns nothing when the are no subscriptions" do
expect(Subscription.created_before(Time.zone.now)).to be_empty
end
it "returns nothing when nothing has been created before the given date" do
create(:subscription, created_at: 1.day.ago)
expect(Subscription.created_before(2.days.ago)).to be_empty
end
it "returns the subscriptions created before the given date" do
subscription = create(:subscription, created_at: 2.days.ago)
expect(Subscription.created_before(1.day.ago)).to eq [subscription]
end
end
describe "#team?" do
it "returns true with a team" do
subscription = create(:team).subscription
expect(subscription).to be_team
end
it "returns false without a team" do
subscription = build_stubbed(:subscription)
expect(subscription).to_not be_team
end
end
describe "#last_charge" do
it "returns the last charge for the customer" do
charge = double("Stripe::Charge")
allow(Stripe::Charge).to receive(:all).and_return([charge])
subscription = build_stubbed(:subscription)
expect(subscription.last_charge).to eq charge
expect(Stripe::Charge).to have_received(:all)
.with(count: 1, customer: subscription.stripe_customer_id)
end
end
describe "owner?" do
context "when the given user is the owner" do
it "returns true" do
user = User.new
subscription = build_stubbed(:subscription, user: user)
expect(subscription.owner?(user)).to eq true
end
end
context "when the given user is not the owner" do
it "returns false" do
user = User.new
subscription = build_stubbed(:subscription)
expect(subscription.owner?(user)).to eq false
end
end
end
describe "next_payment_amount_in_dollars" do
it "returns the next payment amount in dollars" do
subscription = build(:subscription, next_payment_amount: 1000)
expect(subscription.next_payment_amount_in_dollars).to eq 10
end
end
def setup_cutomer
stripe_customer = double(
"StripeCustomer",
subscriptions: [FakeSubscription.new]
)
allow(Stripe::Customer).to receive(:retrieve).and_return(stripe_customer)
stripe_customer
end
end
| 1 | 16,874 | Line is too long. [89/80] | thoughtbot-upcase | rb |
@@ -200,7 +200,7 @@ public class JsonHttpRemoteConfig {
}
}
- private UrlMapper getUrlMapper(String method) {
+ protected UrlMapper getUrlMapper(String method) {
if ("DELETE".equals(method)) {
return deleteMapper;
} else if ("GET".equals(method)) { | 1 | /*
Copyright 2012 Selenium committers
Copyright 2012 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote.server;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.remote.SessionNotFoundException;
import org.openqa.selenium.remote.server.handler.AcceptAlert;
import org.openqa.selenium.remote.server.handler.AddConfig;
import org.openqa.selenium.remote.server.handler.AddCookie;
import org.openqa.selenium.remote.server.handler.CaptureScreenshot;
import org.openqa.selenium.remote.server.handler.ChangeUrl;
import org.openqa.selenium.remote.server.handler.ClearElement;
import org.openqa.selenium.remote.server.handler.ClickElement;
import org.openqa.selenium.remote.server.handler.CloseWindow;
import org.openqa.selenium.remote.server.handler.ConfigureTimeout;
import org.openqa.selenium.remote.server.handler.DeleteCookie;
import org.openqa.selenium.remote.server.handler.DeleteNamedCookie;
import org.openqa.selenium.remote.server.handler.DeleteSession;
import org.openqa.selenium.remote.server.handler.DescribeElement;
import org.openqa.selenium.remote.server.handler.DismissAlert;
import org.openqa.selenium.remote.server.handler.ElementEquality;
import org.openqa.selenium.remote.server.handler.ExecuteAsyncScript;
import org.openqa.selenium.remote.server.handler.ExecuteScript;
import org.openqa.selenium.remote.server.handler.FindActiveElement;
import org.openqa.selenium.remote.server.handler.FindChildElement;
import org.openqa.selenium.remote.server.handler.FindChildElements;
import org.openqa.selenium.remote.server.handler.FindElement;
import org.openqa.selenium.remote.server.handler.FindElements;
import org.openqa.selenium.remote.server.handler.GetAlertText;
import org.openqa.selenium.remote.server.handler.GetAllCookies;
import org.openqa.selenium.remote.server.handler.GetAllSessions;
import org.openqa.selenium.remote.server.handler.GetAllWindowHandles;
import org.openqa.selenium.remote.server.handler.GetAvailableLogTypesHandler;
import org.openqa.selenium.remote.server.handler.GetCssProperty;
import org.openqa.selenium.remote.server.handler.GetCurrentUrl;
import org.openqa.selenium.remote.server.handler.GetCurrentWindowHandle;
import org.openqa.selenium.remote.server.handler.GetElementAttribute;
import org.openqa.selenium.remote.server.handler.GetElementDisplayed;
import org.openqa.selenium.remote.server.handler.GetElementEnabled;
import org.openqa.selenium.remote.server.handler.GetElementLocation;
import org.openqa.selenium.remote.server.handler.GetElementLocationInView;
import org.openqa.selenium.remote.server.handler.GetElementSelected;
import org.openqa.selenium.remote.server.handler.GetElementSize;
import org.openqa.selenium.remote.server.handler.GetElementText;
import org.openqa.selenium.remote.server.handler.GetElementValue;
import org.openqa.selenium.remote.server.handler.GetPageSource;
import org.openqa.selenium.remote.server.handler.GetScreenOrientation;
import org.openqa.selenium.remote.server.handler.GetSessionLogsHandler;
import org.openqa.selenium.remote.server.handler.GetSessionCapabilities;
import org.openqa.selenium.remote.server.handler.GetTagName;
import org.openqa.selenium.remote.server.handler.GetTitle;
import org.openqa.selenium.remote.server.handler.GetWindowPosition;
import org.openqa.selenium.remote.server.handler.GetWindowSize;
import org.openqa.selenium.remote.server.handler.GoBack;
import org.openqa.selenium.remote.server.handler.GoForward;
import org.openqa.selenium.remote.server.handler.ImeActivateEngine;
import org.openqa.selenium.remote.server.handler.ImeDeactivate;
import org.openqa.selenium.remote.server.handler.ImeGetActiveEngine;
import org.openqa.selenium.remote.server.handler.ImeGetAvailableEngines;
import org.openqa.selenium.remote.server.handler.ImeIsActivated;
import org.openqa.selenium.remote.server.handler.ImplicitlyWait;
import org.openqa.selenium.remote.server.handler.GetLogHandler;
import org.openqa.selenium.remote.server.handler.MaximizeWindow;
import org.openqa.selenium.remote.server.handler.NewSession;
import org.openqa.selenium.remote.server.handler.RefreshPage;
import org.openqa.selenium.remote.server.handler.Rotate;
import org.openqa.selenium.remote.server.handler.SendKeys;
import org.openqa.selenium.remote.server.handler.SetAlertText;
import org.openqa.selenium.remote.server.handler.SetScriptTimeout;
import org.openqa.selenium.remote.server.handler.SetWindowPosition;
import org.openqa.selenium.remote.server.handler.SetWindowSize;
import org.openqa.selenium.remote.server.handler.Status;
import org.openqa.selenium.remote.server.handler.SubmitElement;
import org.openqa.selenium.remote.server.handler.SwitchToFrame;
import org.openqa.selenium.remote.server.handler.SwitchToWindow;
import org.openqa.selenium.remote.server.handler.UploadFile;
import org.openqa.selenium.remote.server.handler.html5.ClearLocalStorage;
import org.openqa.selenium.remote.server.handler.html5.ClearSessionStorage;
import org.openqa.selenium.remote.server.handler.html5.ExecuteSQL;
import org.openqa.selenium.remote.server.handler.html5.GetAppCacheStatus;
import org.openqa.selenium.remote.server.handler.html5.GetLocalStorageItem;
import org.openqa.selenium.remote.server.handler.html5.GetLocalStorageKeys;
import org.openqa.selenium.remote.server.handler.html5.GetLocalStorageSize;
import org.openqa.selenium.remote.server.handler.html5.GetLocationContext;
import org.openqa.selenium.remote.server.handler.html5.GetSessionStorageItem;
import org.openqa.selenium.remote.server.handler.html5.GetSessionStorageKeys;
import org.openqa.selenium.remote.server.handler.html5.GetSessionStorageSize;
import org.openqa.selenium.remote.server.handler.html5.IsBrowserOnline;
import org.openqa.selenium.remote.server.handler.html5.RemoveLocalStorageItem;
import org.openqa.selenium.remote.server.handler.html5.RemoveSessionStorageItem;
import org.openqa.selenium.remote.server.handler.html5.SetBrowserConnection;
import org.openqa.selenium.remote.server.handler.html5.SetLocalStorageItem;
import org.openqa.selenium.remote.server.handler.html5.SetLocationContext;
import org.openqa.selenium.remote.server.handler.html5.SetSessionStorageItem;
import org.openqa.selenium.remote.server.handler.interactions.ClickInSession;
import org.openqa.selenium.remote.server.handler.interactions.DoubleClickInSession;
import org.openqa.selenium.remote.server.handler.interactions.MouseDown;
import org.openqa.selenium.remote.server.handler.interactions.MouseMoveToLocation;
import org.openqa.selenium.remote.server.handler.interactions.MouseUp;
import org.openqa.selenium.remote.server.handler.interactions.SendKeyToActiveElement;
import org.openqa.selenium.remote.server.handler.interactions.touch.DoubleTapOnElement;
import org.openqa.selenium.remote.server.handler.interactions.touch.Down;
import org.openqa.selenium.remote.server.handler.interactions.touch.Flick;
import org.openqa.selenium.remote.server.handler.interactions.touch.LongPressOnElement;
import org.openqa.selenium.remote.server.handler.interactions.touch.Move;
import org.openqa.selenium.remote.server.handler.interactions.touch.Scroll;
import org.openqa.selenium.remote.server.handler.interactions.touch.SingleTapOnElement;
import org.openqa.selenium.remote.server.handler.interactions.touch.Up;
import org.openqa.selenium.remote.server.renderer.EmptyResult;
import org.openqa.selenium.remote.server.renderer.ForwardResult;
import org.openqa.selenium.remote.server.renderer.JsonErrorExceptionResult;
import org.openqa.selenium.remote.server.renderer.JsonResult;
import org.openqa.selenium.remote.server.renderer.RedirectResult;
import org.openqa.selenium.remote.server.renderer.ResourceCopyResult;
import org.openqa.selenium.remote.server.resource.StaticResource;
import org.openqa.selenium.remote.server.rest.RestishHandler;
import org.openqa.selenium.remote.server.rest.Result;
import org.openqa.selenium.remote.server.rest.ResultConfig;
import org.openqa.selenium.remote.server.rest.ResultType;
import org.openqa.selenium.remote.server.rest.UrlMapper;
import org.openqa.selenium.remote.server.xdrpc.CrossDomainRpcRenderer;
import java.util.EnumSet;
import java.util.logging.Logger;
import static org.openqa.selenium.remote.server.HttpStatusCodes.INTERNAL_SERVER_ERROR;
import static org.openqa.selenium.remote.server.HttpStatusCodes.NOT_FOUND;
public class JsonHttpRemoteConfig {
private static final String EXCEPTION = ":exception";
private static final String RESPONSE = ":response";
private UrlMapper getMapper;
private UrlMapper postMapper;
private UrlMapper deleteMapper;
private final Logger log;
public JsonHttpRemoteConfig(DriverSessions sessions, Logger log) {
this.log = log;
setUpMappings(sessions, log);
}
public void addGlobalHandler(ResultType type, Result result) {
getMapper.addGlobalHandler(type, result);
postMapper.addGlobalHandler(type, result);
deleteMapper.addGlobalHandler(type, result);
}
public ResultConfig addNewGetMapping(String path, Class<? extends RestishHandler> implementationClass) {
return getMapper.bind(path, implementationClass);
}
public ResultConfig addNewPostMapping(String path, Class<? extends RestishHandler> implementationClass) {
return postMapper.bind(path, implementationClass);
}
public ResultConfig addNewDeleteMapping(String path,
Class<? extends RestishHandler> implementationClass) {
return deleteMapper.bind(path, implementationClass);
}
public void handleRequest(HttpRequest request, HttpResponse response)
throws WebDriverException {
try {
UrlMapper mapper = getUrlMapper(request.getMethod());
if (mapper == null) {
response.setStatus(INTERNAL_SERVER_ERROR);
response.end();
return;
}
ResultConfig config = mapper.getConfig(request.getPath());
if (config == null) {
response.setStatus(NOT_FOUND);
response.end();
} else {
config.handle(request.getPath(), request, response);
}
} catch (SessionNotFoundException e){
response.setStatus(NOT_FOUND);
response.end();
} catch (Exception e) {
log.warning("Fatal, unhandled exception: " + request.getPath() + ": " + e);
throw new WebDriverException(e);
}
}
private UrlMapper getUrlMapper(String method) {
if ("DELETE".equals(method)) {
return deleteMapper;
} else if ("GET".equals(method)) {
return getMapper;
} else if ("POST".equals(method)) {
return postMapper;
} else {
throw new IllegalArgumentException("Unknown method: " + method);
}
}
private void setUpMappings(DriverSessions driverSessions, Logger logger) {
final EmptyResult emptyResponse = new EmptyResult();
final JsonResult jsonResponse = new JsonResult(RESPONSE);
getMapper = new UrlMapper(driverSessions, logger);
postMapper = new UrlMapper(driverSessions, logger);
deleteMapper = new UrlMapper(driverSessions, logger);
Result jsonErrorResult = new Result(MimeType.EMPTY,
new JsonErrorExceptionResult(EXCEPTION, RESPONSE));
addGlobalHandler(ResultType.EXCEPTION, jsonErrorResult);
addGlobalHandler(ResultType.ERROR, jsonErrorResult);
Result xdrpcResult = new Result(MimeType.CROSS_DOMAIN_RPC,
new CrossDomainRpcRenderer(RESPONSE, EXCEPTION), true);
for (ResultType resultType : EnumSet.allOf(ResultType.class)) {
addGlobalHandler(resultType, xdrpcResult);
}
// When requesting the command root from a JSON-client, just return the server
// status. For everyone else, redirect to the web front end.
getMapper.bind("/", Status.class)
.on(ResultType.SUCCESS, new RedirectResult("/static/resource/hub.html"))
.on(ResultType.SUCCESS, jsonResponse, "application/json");
getMapper.bind("/static/resource/:file", StaticResource.class)
.on(ResultType.SUCCESS, new ResourceCopyResult(RESPONSE))
// Nope, JSON clients don't get access to static resources.
.on(ResultType.SUCCESS, emptyResponse, "application/json");
postMapper.bind("/config/drivers", AddConfig.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/status", Status.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/sessions", GetAllSessions.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session", NewSession.class)
.on(ResultType.SUCCESS, new RedirectResult("/session/:sessionId"));
getMapper.bind("/session/:sessionId", GetSessionCapabilities.class)
.on(ResultType.SUCCESS, new ForwardResult("/WEB-INF/views/sessionCapabilities.jsp"))
.on(ResultType.SUCCESS, jsonResponse, "application/json");
deleteMapper.bind("/session/:sessionId", DeleteSession.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/window_handle", GetCurrentWindowHandle.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/window_handles", GetAllWindowHandles.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/dismiss_alert", DismissAlert.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/accept_alert", AcceptAlert.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/alert_text", GetAlertText.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/alert_text", SetAlertText.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/url", ChangeUrl.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/url", GetCurrentUrl.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/forward", GoForward.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/back", GoBack.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/refresh", RefreshPage.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/execute", ExecuteScript.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/execute_async", ExecuteAsyncScript.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/source", GetPageSource.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/screenshot", CaptureScreenshot.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/title", GetTitle.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element", FindElement.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id", DescribeElement.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/elements", FindElements.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/active", FindActiveElement.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/:id/element", FindChildElement.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/:id/elements", FindChildElements.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/:id/click", ClickElement.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/element/:id/text", GetElementText.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/:id/submit", SubmitElement.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/file", UploadFile.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/:id/value", SendKeys.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/element/:id/value", GetElementValue.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/name", GetTagName.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/element/:id/clear", ClearElement.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/element/:id/selected", GetElementSelected.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/enabled", GetElementEnabled.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/displayed", GetElementDisplayed.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/location", GetElementLocation.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/location_in_view",
GetElementLocationInView.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/size", GetElementSize.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/css/:propertyName", GetCssProperty.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/attribute/:name", GetElementAttribute.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/element/:id/equals/:other", ElementEquality.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/cookie", GetAllCookies.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/cookie", AddCookie.class)
.on(ResultType.SUCCESS, emptyResponse);
deleteMapper.bind("/session/:sessionId/cookie", DeleteCookie.class)
.on(ResultType.SUCCESS, emptyResponse);
deleteMapper.bind("/session/:sessionId/cookie/:name", DeleteNamedCookie.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/frame", SwitchToFrame.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/window", SwitchToWindow.class)
.on(ResultType.SUCCESS, emptyResponse);
deleteMapper.bind("/session/:sessionId/window", CloseWindow.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/window/:windowHandle/size", GetWindowSize.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/window/:windowHandle/size", SetWindowSize.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/window/:windowHandle/position", GetWindowPosition.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/window/:windowHandle/position", SetWindowPosition.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/window/:windowHandle/maximize", MaximizeWindow.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/timeouts", ConfigureTimeout.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/timeouts/implicit_wait", ImplicitlyWait.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/timeouts/async_script", SetScriptTimeout.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/execute_sql", ExecuteSQL.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/location", GetLocationContext.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/location", SetLocationContext.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/application_cache/status", GetAppCacheStatus.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/browser_connection", SetBrowserConnection.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/browser_connection", IsBrowserOnline.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/local_storage/key/:key", GetLocalStorageItem.class)
.on(ResultType.SUCCESS, jsonResponse);
deleteMapper.bind("/session/:sessionId/local_storage/key/:key", RemoveLocalStorageItem.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/local_storage", GetLocalStorageKeys.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/local_storage", SetLocalStorageItem.class)
.on(ResultType.SUCCESS, emptyResponse);
deleteMapper.bind("/session/:sessionId/local_storage", ClearLocalStorage.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/local_storage/size", GetLocalStorageSize.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/session_storage/key/:key", GetSessionStorageItem.class)
.on(ResultType.SUCCESS, jsonResponse);
deleteMapper.bind("/session/:sessionId/session_storage/key/:key", RemoveSessionStorageItem.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/session_storage", GetSessionStorageKeys.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/session_storage", SetSessionStorageItem.class)
.on(ResultType.SUCCESS, emptyResponse);
deleteMapper.bind("/session/:sessionId/session_storage", ClearSessionStorage.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/session_storage/size", GetSessionStorageSize.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/orientation", GetScreenOrientation.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/orientation", Rotate.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/moveto", MouseMoveToLocation.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/click", ClickInSession.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/doubleclick", DoubleClickInSession.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/buttondown", MouseDown.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/buttonup", MouseUp.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/keys", SendKeyToActiveElement.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/ime/available_engines", ImeGetAvailableEngines.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/ime/active_engine", ImeGetActiveEngine.class)
.on(ResultType.SUCCESS, jsonResponse);
getMapper.bind("/session/:sessionId/ime/activated", ImeIsActivated.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/ime/deactivate", ImeDeactivate.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/ime/activate", ImeActivateEngine.class)
.on(ResultType.SUCCESS, jsonResponse);
// Advanced Touch API
postMapper.bind("/session/:sessionId/touch/click", SingleTapOnElement.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/down", Down.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/up", Up.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/move", Move.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/scroll", Scroll.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/doubleclick", DoubleTapOnElement.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/longclick", LongPressOnElement.class)
.on(ResultType.SUCCESS, emptyResponse);
postMapper.bind("/session/:sessionId/touch/flick", Flick.class)
.on(ResultType.SUCCESS, emptyResponse);
getMapper.bind("/session/:sessionId/log/types", GetAvailableLogTypesHandler.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/session/:sessionId/log", GetLogHandler.class)
.on(ResultType.SUCCESS, jsonResponse);
postMapper.bind("/logs", GetSessionLogsHandler.class)
.on(ResultType.SUCCESS, jsonResponse);
}
}
| 1 | 10,333 | You don't need to expose this method to do what you want. There are already public addNewGetMapping, addNewPostMapping, and addNewDeleteMapping methods. | SeleniumHQ-selenium | java |
@@ -782,7 +782,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
}
[Fact]
- public async Task HeadResponseCanContainContentLengthHeaderButBodyNotWritten()
+ public async Task HeadResponseBodyNotWritten()
{
using (var server = new TestServer(async httpContext =>
{ | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Net.Sockets;
using System.Text;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Http;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Infrastructure;
using Microsoft.AspNetCore.Testing;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Internal;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Primitives;
using Moq;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
{
public class ResponseTests
{
[Fact]
public async Task LargeDownload()
{
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(async context =>
{
var bytes = new byte[1024];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = (byte)i;
}
context.Response.ContentLength = bytes.Length * 1024;
for (int i = 0; i < 1024; i++)
{
await context.Response.Body.WriteAsync(bytes, 0, bytes.Length);
}
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://localhost:{host.GetPort()}/");
response.EnsureSuccessStatusCode();
var responseBody = await response.Content.ReadAsStreamAsync();
// Read the full response body
var total = 0;
var bytes = new byte[1024];
var count = await responseBody.ReadAsync(bytes, 0, bytes.Length);
while (count > 0)
{
for (int i = 0; i < count; i++)
{
Assert.Equal(total % 256, bytes[i]);
total++;
}
count = await responseBody.ReadAsync(bytes, 0, bytes.Length);
}
}
}
}
[Theory, MemberData(nameof(NullHeaderData))]
public async Task IgnoreNullHeaderValues(string headerName, StringValues headerValue, string expectedValue)
{
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(async context =>
{
context.Response.Headers.Add(headerName, headerValue);
await context.Response.WriteAsync("");
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://localhost:{host.GetPort()}/");
response.EnsureSuccessStatusCode();
var headers = response.Headers;
if (expectedValue == null)
{
Assert.False(headers.Contains(headerName));
}
else
{
Assert.True(headers.Contains(headerName));
Assert.Equal(headers.GetValues(headerName).Single(), expectedValue);
}
}
}
}
[Fact]
public async Task OnCompleteCalledEvenWhenOnStartingNotCalled()
{
var onStartingCalled = false;
var onCompletedCalled = false;
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(context =>
{
context.Response.OnStarting(() => Task.Run(() => onStartingCalled = true));
context.Response.OnCompleted(() => Task.Run(() => onCompletedCalled = true));
// Prevent OnStarting call (see Frame<T>.RequestProcessingAsync()).
throw new Exception();
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://localhost:{host.GetPort()}/");
Assert.Equal(HttpStatusCode.InternalServerError, response.StatusCode);
Assert.False(onStartingCalled);
Assert.True(onCompletedCalled);
}
}
}
[Fact]
public async Task OnStartingThrowsWhenSetAfterResponseHasAlreadyStarted()
{
InvalidOperationException ex = null;
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(async context =>
{
await context.Response.WriteAsync("hello, world");
await context.Response.Body.FlushAsync();
ex = Assert.Throws<InvalidOperationException>(() => context.Response.OnStarting(_ => TaskCache.CompletedTask, null));
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://localhost:{host.GetPort()}/");
// Despite the error, the response had already started
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
Assert.NotNull(ex);
}
}
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposeAppException()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
throw new Exception();
},
expectedClientStatusCode: HttpStatusCode.InternalServerError,
expectedServerStatusCode: HttpStatusCode.InternalServerError);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposeRequestAborted()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
context.Abort();
return TaskCache.CompletedTask;
},
expectedClientStatusCode: null,
expectedServerStatusCode: 0);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposeRequestAbortedAppException()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
context.Abort();
throw new Exception();
},
expectedClientStatusCode: null,
expectedServerStatusCode: 0);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformed()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
return TaskCache.CompletedTask;
},
expectedClientStatusCode: null,
expectedServerStatusCode: HttpStatusCode.BadRequest,
sendMalformedRequest: true);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformedRead()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
async context =>
{
await context.Request.Body.ReadAsync(new byte[1], 0, 1);
},
expectedClientStatusCode: null,
expectedServerStatusCode: HttpStatusCode.BadRequest,
sendMalformedRequest: true);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformedReadIgnored()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
async context =>
{
try
{
await context.Request.Body.ReadAsync(new byte[1], 0, 1);
}
catch (BadHttpRequestException)
{
}
},
expectedClientStatusCode: null,
expectedServerStatusCode: HttpStatusCode.BadRequest,
sendMalformedRequest: true);
}
private static async Task ResponseStatusCodeSetBeforeHttpContextDispose(
RequestDelegate handler,
HttpStatusCode? expectedClientStatusCode,
HttpStatusCode expectedServerStatusCode,
bool sendMalformedRequest = false)
{
var mockHttpContextFactory = new Mock<IHttpContextFactory>();
mockHttpContextFactory.Setup(f => f.Create(It.IsAny<IFeatureCollection>()))
.Returns<IFeatureCollection>(fc => new DefaultHttpContext(fc));
var disposedTcs = new TaskCompletionSource<int>();
mockHttpContextFactory.Setup(f => f.Dispose(It.IsAny<HttpContext>()))
.Callback<HttpContext>(c =>
{
disposedTcs.TrySetResult(c.Response.StatusCode);
});
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0")
.ConfigureServices(services => services.AddSingleton<IHttpContextFactory>(mockHttpContextFactory.Object))
.Configure(app =>
{
app.Run(handler);
});
using (var host = hostBuilder.Build())
{
host.Start();
if (!sendMalformedRequest)
{
using (var client = new HttpClient())
{
try
{
var response = await client.GetAsync($"http://localhost:{host.GetPort()}/");
Assert.Equal(expectedClientStatusCode, response.StatusCode);
}
catch
{
if (expectedClientStatusCode != null)
{
throw;
}
}
}
}
else
{
using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp))
{
socket.Connect(new IPEndPoint(IPAddress.Loopback, host.GetPort()));
socket.Send(Encoding.ASCII.GetBytes(
"POST / HTTP/1.1\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n" +
"wrong"));
}
}
var disposedStatusCode = await disposedTcs.Task.TimeoutAfter(TimeSpan.FromSeconds(10));
Assert.Equal(expectedServerStatusCode, (HttpStatusCode)disposedStatusCode);
}
}
// https://github.com/aspnet/KestrelHttpServer/pull/1111/files#r80584475 explains the reason for this test.
[Fact]
public async Task SingleErrorResponseSentWhenAppSwallowsBadRequestException()
{
BadHttpRequestException readException = null;
using (var server = new TestServer(async httpContext =>
{
readException = await Assert.ThrowsAsync<BadHttpRequestException>(
async () => await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1));
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Transfer-Encoding: chunked",
"",
"gg");
await connection.ReceiveForcedEnd(
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
Assert.NotNull(readException);
}
[Fact]
public async Task TransferEncodingChunkedSetOnUnknownLengthHttp11Response()
{
using (var server = new TestServer(async httpContext =>
{
await httpContext.Response.WriteAsync("hello, ");
await httpContext.Response.WriteAsync("world");
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Transfer-Encoding: chunked",
"",
"7",
"hello, ",
"5",
"world",
"0",
"",
"");
}
}
}
[Theory]
[InlineData(204)]
[InlineData(205)]
[InlineData(304)]
public async Task TransferEncodingChunkedNotSetOnNonBodyResponse(int statusCode)
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.StatusCode = statusCode;
return TaskCache.CompletedTask;
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.Receive(
$"HTTP/1.1 {Encoding.ASCII.GetString(ReasonPhrases.ToStatusBytes(statusCode))}",
$"Date: {server.Context.DateHeaderValue}",
"",
"");
}
}
}
[Fact]
public async Task TransferEncodingNotSetOnHeadResponse()
{
using (var server = new TestServer(httpContext =>
{
return TaskCache.CompletedTask;
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"",
"");
}
}
}
[Fact]
public async Task ResponseBodyNotWrittenOnHeadResponseAndLoggedOnlyOnce()
{
var mockKestrelTrace = new Mock<IKestrelTrace>();
using (var server = new TestServer(async httpContext =>
{
await httpContext.Response.WriteAsync("hello, world");
await httpContext.Response.Body.FlushAsync();
}, new TestServiceContext { Log = mockKestrelTrace.Object }))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"",
"");
}
}
mockKestrelTrace.Verify(kestrelTrace =>
kestrelTrace.ConnectionHeadResponseBodyWrite(It.IsAny<string>(), "hello, world".Length), Times.Once);
}
[Fact]
public async Task WhenAppWritesMoreThanContentLengthWriteThrowsAndConnectionCloses()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 11;
httpContext.Response.Body.Write(Encoding.ASCII.GetBytes("hello,"), 0, 6);
httpContext.Response.Body.Write(Encoding.ASCII.GetBytes(" world"), 0, 6);
return TaskCache.CompletedTask;
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 11",
"",
"hello,");
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 11).",
logMessage.Exception.Message);
}
[Fact]
public async Task WhenAppWritesMoreThanContentLengthWriteAsyncThrowsAndConnectionCloses()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 11;
await httpContext.Response.WriteAsync("hello,");
await httpContext.Response.WriteAsync(" world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 11",
"",
"hello,");
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 11).",
logMessage.Exception.Message);
}
[Fact]
public async Task WhenAppWritesMoreThanContentLengthAndResponseNotStarted500ResponseSentAndConnectionCloses()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 5;
await httpContext.Response.WriteAsync("hello, world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
$"HTTP/1.1 500 Internal Server Error",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 5).",
logMessage.Exception.Message);
}
[Fact]
public async Task WhenAppWritesLessThanContentLengthErrorLogged()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 13;
await httpContext.Response.WriteAsync("hello, world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 13",
"",
"hello, world");
}
}
var errorMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too few bytes written (12 of 13).",
errorMessage.Exception.Message);
}
[Fact]
public async Task WhenAppSetsContentLengthButDoesNotWriteBody500ResponseSentAndConnectionCloses()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 5;
return TaskCache.CompletedTask;
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
$"HTTP/1.1 500 Internal Server Error",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
var errorMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too few bytes written (0 of 5).",
errorMessage.Exception.Message);
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task WhenAppSetsContentLengthToZeroAndDoesNotWriteNoErrorIsThrown(bool flushResponse)
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 0;
if (flushResponse)
{
await httpContext.Response.Body.FlushAsync();
}
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
Assert.Equal(0, testLogger.ApplicationErrorsLogged);
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// If a message is received with both a Transfer-Encoding and a
// Content-Length header field, the Transfer-Encoding overrides the
// Content-Length.
[Fact]
public async Task WhenAppSetsTransferEncodingAndContentLengthWritingLessIsNotAnError()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = "chunked";
httpContext.Response.ContentLength = 13;
await httpContext.Response.WriteAsync("hello, world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 13",
"Transfer-Encoding: chunked",
"",
"hello, world");
}
}
Assert.Equal(0, testLogger.ApplicationErrorsLogged);
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// If a message is received with both a Transfer-Encoding and a
// Content-Length header field, the Transfer-Encoding overrides the
// Content-Length.
[Fact]
public async Task WhenAppSetsTransferEncodingAndContentLengthWritingMoreIsNotAnError()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = "chunked";
httpContext.Response.ContentLength = 11;
await httpContext.Response.WriteAsync("hello, world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 11",
"Transfer-Encoding: chunked",
"",
"hello, world");
}
}
Assert.Equal(0, testLogger.ApplicationErrorsLogged);
}
[Fact]
public async Task HeadResponseCanContainContentLengthHeader()
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 42;
return TaskCache.CompletedTask;
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"HEAD / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 42",
"",
"");
}
}
}
[Fact]
public async Task HeadResponseCanContainContentLengthHeaderButBodyNotWritten()
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 12;
await httpContext.Response.WriteAsync("hello, world");
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"HEAD / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 12",
"",
"");
}
}
}
[Fact]
public async Task AppCanWriteOwnBadRequestResponse()
{
var expectedResponse = string.Empty;
var responseWrittenTcs = new TaskCompletionSource<object>();
using (var server = new TestServer(async httpContext =>
{
try
{
await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1);
}
catch (BadHttpRequestException ex)
{
expectedResponse = ex.Message;
httpContext.Response.StatusCode = 400;
httpContext.Response.ContentLength = ex.Message.Length;
await httpContext.Response.WriteAsync(ex.Message);
responseWrittenTcs.SetResult(null);
}
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"POST / HTTP/1.1",
"Transfer-Encoding: chunked",
"",
"bad");
await responseWrittenTcs.Task;
await connection.ReceiveEnd(
"HTTP/1.1 400 Bad Request",
$"Date: {server.Context.DateHeaderValue}",
$"Content-Length: {expectedResponse.Length}",
"",
expectedResponse);
}
}
}
[Theory]
[InlineData("gzip")]
[InlineData("chunked, gzip")]
[InlineData("gzip")]
[InlineData("chunked, gzip")]
public async Task ConnectionClosedWhenChunkedIsNotFinalTransferCoding(string responseTransferEncoding)
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding;
await httpContext.Response.WriteAsync("hello, world");
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
}
}
[Theory]
[InlineData("gzip")]
[InlineData("chunked, gzip")]
[InlineData("gzip")]
[InlineData("chunked, gzip")]
public async Task ConnectionClosedWhenChunkedIsNotFinalTransferCodingEvenIfConnectionKeepAliveSetInResponse(string responseTransferEncoding)
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Connection"] = "keep-alive";
httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding;
await httpContext.Response.WriteAsync("hello, world");
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
}
}
[Theory]
[InlineData("chunked")]
[InlineData("gzip, chunked")]
public async Task ConnectionKeptAliveWhenChunkedIsFinalTransferCoding(string responseTransferEncoding)
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding;
// App would have to chunk manually, but here we don't care
await httpContext.Response.WriteAsync("hello, world");
}, new TestServiceContext()))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
// Make sure connection was kept open
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
}
}
public static TheoryData<string, StringValues, string> NullHeaderData
{
get
{
var dataset = new TheoryData<string, StringValues, string>();
// Unknown headers
dataset.Add("NullString", (string)null, null);
dataset.Add("EmptyString", "", "");
dataset.Add("NullStringArray", new string[] { null }, null);
dataset.Add("EmptyStringArray", new string[] { "" }, "");
dataset.Add("MixedStringArray", new string[] { null, "" }, "");
// Known headers
dataset.Add("Location", (string)null, null);
dataset.Add("Location", "", "");
dataset.Add("Location", new string[] { null }, null);
dataset.Add("Location", new string[] { "" }, "");
dataset.Add("Location", new string[] { null, "" }, "");
return dataset;
}
}
}
}
| 1 | 10,779 | This test passes in `dev`. Why wouldn't this work? | aspnet-KestrelHttpServer | .cs |
@@ -1,6 +1,14 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
+"""
+Purpose
+
+Shows how to use the AWS SDK for Python (Boto3) with PyTest and the AWS Device Farm
+browser testing feature.
+"""
+
+# snippet-start:[python.example_code.device-farm.Scenario_BrowserTesting]
import datetime
import os
import subprocess | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import datetime
import os
import subprocess
import boto3
import pytest
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
def get_git_hash():
"""
Get the short Git hash of the current commit of the repository
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').strip()
except:
return "norepo"
class TestHelloSuite:
"""
Our test suite.
This style of test suite allows us to use setup_method and teardown_method.
"""
def save_screenshot(self, name):
self.driver.save_screenshot(os.path.join(self.screenshot_path, name))
def setup_method(self, method):
"""
Set up a test.
This makes sure that the session for an individual test is ready.
The AWS credentials are read from the default ~/.aws/credentials or from the command line
by setting the AWS_ACCESS_KEY_ID and AWS_SECRET_KEY environment variables.
The project ARN is determined by the PROJECT_ARN environment variable.
"""
devicefarm_client = boto3.client("devicefarm")
project_arn = os.environ.get("PROJECT_ARN", None)
if project_arn is None:
raise ValueError("Must set PROJECT_ARN")
# Request a driver hub URL for the Selenium client
testgrid_url_response = devicefarm_client.create_test_grid_url(
projectArn= project_arn,
expiresInSeconds=300
)
# We want a directory to save our files into. We're going to make a directory in the current directory that holds our results.
self.screenshot_path = os.path.join('.','results', get_git_hash() + '-' + (datetime.date.today().isoformat()))
if(not os.path.exists(self.screenshot_path)):
os.makedirs(self.screenshot_path,exist_ok=True)
# We want a Firefox instance on Windows
desired_cap = DesiredCapabilities.FIREFOX
desired_cap['platform'] = 'windows'
desired_cap['BrowserVersion'] = 'latest'
# Configure the webdriver with the appropriate remote endpoint.
self.driver = webdriver.Remote(testgrid_url_response['url'] , desired_cap)
#
# Auto-Tagging
#
# In order to get the Session ARN, we need to look up the session by the
# Project ARN and session ID (from the driver).
testgrid_session_arn_response = devicefarm_client.get_test_grid_session(
projectArn = project_arn,
sessionId=self.driver.session_id
)
# Save the session's ARN so we can tag the session.
self.session_arn = testgrid_session_arn_response['testGridSession']['arn']
# In order to tag it, we're going to use the resourcegroupstaggingapi client to
# add a tag to the session ARN that we just got.
tag_client = boto3.client('resourcegroupstaggingapi')
tag_client.tag_resources(ResourceARNList=[self.session_arn],
Tags={
"TestSuite":f"testsuite {method.__name__}",
"GitId": get_git_hash()
})
def teardown_method(self, method):
"""
Clean up resources used by each method.
"""
# End the selenium session so we're off the clock.
self.driver.quit()
@pytest.mark.parametrize('query,leading',
[
pytest.param('Seattle', 'Seattle (/siˈætəl/ (listen) see-AT-əl) is a seaport city on the West Coast of the United States.'),
pytest.param('Selenium', "Selenium is a chemical element with the symbol Se and atomic number 34."),
pytest.param('Amazon Locker', 'Amazon Locker is a self-service package delivery service offered by online retailer Amazon.'),
pytest.param('Kootenai Falls','Kootenai Falls is a waterfall on the Kootenay River located in Lincoln County, Montana, just off U.S. Route 2.'),
pytest.param('Dorayaki', 'Dorayaki (どら焼き, どらやき, 銅鑼焼き, ドラ焼き) is a type of Japanese confection.'),
pytest.param('Robot Face', '<|°_°|> (also known as Robot Face or Robot)')
])
def test_first_paragraph_text(self, query, leading):
"""
This test looks at the first paragraph of a page on Wikipedia, comparing it to a known leading sentence.
If the leading sentence matches, the test passes. A screenshot is taken before the final assertion is made, letting us debug if something isn't right.
"""
# Open the main page of Wikipedia
self.driver.get("https://en.wikipedia.org/wiki/Main_Page")
# Find the search box, enter a query, and press enter
search_input = self.driver.find_element(By.ID, "searchInput")
search_input.click()
search_input.send_keys(query)
search_input.send_keys(Keys.ENTER)
# Wait for the search box to go stale -- This means we've navigated fully.
WebDriverWait(self.driver, 5).until(expected_conditions.staleness_of(search_input))
# Get the leading paragraph of the article.
lead = leading.lower()
# Find the element...
lead_para = self.driver.find_element(By.XPATH, "//div[@class='mw-parser-output']//p[not(@class)]")
# ... and copy out its text.
our_text = lead_para.text.lower()
our_text = our_text[:len(lead)]
# Take a screenshot and compare the strings.
self.save_screenshot(f"leadingpara_{query}.png")
assert our_text.startswith(lead)
@pytest.mark.parametrize(
"query,expected", [
pytest.param("Automation Testing","Test Automation"),
pytest.param("DevOps","DevOps"),
pytest.param("Jackdaws Love My Big Sphinx Of Quartz", "Pangram"),
pytest.param("EarthBound","EarthBound"),
pytest.param("Covered Bridges Today", "Covered Bridges Today"),
pytest.param("Kurt Godel", "Kurt Gödel"),
pytest.param("N//ng language", "Nǁng language"),
pytest.param("Who the Fuck Is Jackson Pollock?", "Who the $&% Is Jackson Pollock?")
] )
def test_redirect_titles(self,query,expected):
"""
A test comparing pages we expect to (or not to) redirect on Wikipedia.
This test checks to see that the page ("query") redirects (or doesn't) to the "expected" page title. Several of these are common synonyms ("Jackdaws...")
while others are because of characters untypable by most keyboards ("Nǁng language")
A screenshot is taken just before the final assertion is made to aid in debugging and verification.
"""
# Open the main page of Wikipedia
self.driver.get("https://en.wikipedia.org/wiki/Main_Page")
# Find the search box, enter some text into it, and send an enter key.
search_input = self.driver.find_element(By.ID, "searchInput")
search_input.click()
search_input.send_keys(query)
search_input.send_keys(Keys.ENTER)
# wait until the page has rolled over -- once the search input handle is stale, the browser has navigated.
WebDriverWait(self.driver, 5).until(expected_conditions.staleness_of(search_input))
# Get the first heading & take a screenshot
our_text = self.driver.find_element(By.ID, "firstHeading").text.lower()
self.save_screenshot(f"redirect_{query}.png")
# did it match?
assert our_text == expected.lower()
| 1 | 21,549 | ARN -> Amazon Resource Number (ARN) | awsdocs-aws-doc-sdk-examples | rb |
@@ -153,10 +153,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
var hostBuilder = new WebHostBuilder()
.UseKestrel()
- .ConfigureServices(services =>
- {
- services.AddSingleton<ILoggerFactory>(new KestrelTestLoggerFactory(testLogger));
- })
+ .UseLoggerFactory(_ => new KestrelTestLoggerFactory(testLogger))
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build()) | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.NetworkInformation;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Hosting.Server.Features;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Extensions;
using Microsoft.AspNetCore.Server.Kestrel.Core;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
using Microsoft.AspNetCore.Testing;
using Microsoft.AspNetCore.Testing.xunit;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
{
public class AddressRegistrationTests
{
[Theory, MemberData(nameof(AddressRegistrationDataIPv4))]
public async Task RegisterAddresses_IPv4_Success(string addressInput, Func<IServerAddressesFeature, string[]> testUrls)
{
await RegisterAddresses_Success(addressInput, testUrls);
}
[ConditionalTheory, MemberData(nameof(AddressRegistrationDataIPv4Port80))]
[PortSupportedCondition(80)]
public async Task RegisterAddresses_IPv4Port80_Success(string addressInput, Func<IServerAddressesFeature, string[]> testUrls)
{
await RegisterAddresses_Success(addressInput, testUrls);
}
[ConditionalTheory, MemberData(nameof(IPEndPointRegistrationDataRandomPort))]
[IPv6SupportedCondition]
public async Task RegisterIPEndPoint_RandomPort_Success(IPEndPoint endPoint, Func<IPEndPoint, string> testUrl)
{
await RegisterIPEndPoint_Success(endPoint, testUrl);
}
[ConditionalTheory, MemberData(nameof(IPEndPointRegistrationDataPort443))]
[IPv6SupportedCondition]
[PortSupportedCondition(443)]
public async Task RegisterIPEndPoint_Port443_Success(IPEndPoint endpoint, Func<IPEndPoint, string> testUrl)
{
await RegisterIPEndPoint_Success(endpoint, testUrl);
}
[ConditionalTheory, MemberData(nameof(AddressRegistrationDataIPv6))]
[IPv6SupportedCondition]
public async Task RegisterAddresses_IPv6_Success(string addressInput, Func<IServerAddressesFeature, string[]> testUrls)
{
await RegisterAddresses_Success(addressInput, testUrls);
}
[ConditionalTheory, MemberData(nameof(AddressRegistrationDataIPv6Port80))]
[IPv6SupportedCondition]
[PortSupportedCondition(80)]
public async Task RegisterAddresses_IPv6Port80_Success(string addressInput, Func<IServerAddressesFeature, string[]> testUrls)
{
await RegisterAddresses_Success(addressInput, testUrls);
}
[ConditionalTheory, MemberData(nameof(AddressRegistrationDataIPv6ScopeId))]
[IPv6SupportedCondition]
[IPv6ScopeIdPresentCondition]
public async Task RegisterAddresses_IPv6ScopeId_Success(string addressInput, Func<IServerAddressesFeature, string[]> testUrls)
{
await RegisterAddresses_Success(addressInput, testUrls);
}
private async Task RegisterAddresses_Success(string addressInput, Func<IServerAddressesFeature, string[]> testUrls)
{
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls(addressInput)
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
host.Start();
foreach (var testUrl in testUrls(host.ServerFeatures.Get<IServerAddressesFeature>()))
{
var response = await HttpClientSlim.GetStringAsync(testUrl, validateCertificate: false);
// Compare the response with Uri.ToString(), rather than testUrl directly.
// Required to handle IPv6 addresses with zone index, like "fe80::3%1"
Assert.Equal(new Uri(testUrl).ToString(), response);
}
}
}
private async Task RegisterIPEndPoint_Success(IPEndPoint endPoint, Func<IPEndPoint, string> testUrl)
{
var hostBuilder = new WebHostBuilder()
.UseKestrel(options =>
{
options.Listen(endPoint, listenOptions =>
{
if (testUrl(listenOptions.IPEndPoint).StartsWith("https"))
{
listenOptions.UseHttps(TestResources.TestCertificatePath, "testPassword");
}
});
})
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
host.Start();
var options = ((IOptions<KestrelServerOptions>)host.Services.GetService(typeof(IOptions<KestrelServerOptions>))).Value;
Assert.Single(options.ListenOptions);
var listenOptions = options.ListenOptions[0];
var response = await HttpClientSlim.GetStringAsync(testUrl(listenOptions.IPEndPoint), validateCertificate: false);
// Compare the response with Uri.ToString(), rather than testUrl directly.
// Required to handle IPv6 addresses with zone index, like "fe80::3%1"
Assert.Equal(new Uri(testUrl(listenOptions.IPEndPoint)).ToString(), response);
}
}
[ConditionalFact]
[PortSupportedCondition(5000)]
public Task DefaultsServerAddress_BindsToIPv4()
{
return RegisterDefaultServerAddresses_Success(new[] { "http://127.0.0.1:5000" });
}
[ConditionalFact]
[IPv6SupportedCondition]
[PortSupportedCondition(5000)]
public Task DefaultsServerAddress_BindsToIPv6()
{
return RegisterDefaultServerAddresses_Success(new[] { "http://127.0.0.1:5000", "http://[::1]:5000" });
}
private async Task RegisterDefaultServerAddresses_Success(IEnumerable<string> addresses)
{
var testLogger = new TestApplicationErrorLogger();
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.ConfigureServices(services =>
{
services.AddSingleton<ILoggerFactory>(new KestrelTestLoggerFactory(testLogger));
})
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
host.Start();
Assert.Equal(5000, host.GetPort());
Assert.Single(testLogger.Messages, log => log.LogLevel == LogLevel.Debug &&
string.Equals($"No listening endpoints were configured. Binding to {Constants.DefaultServerAddress} by default.",
log.Message, StringComparison.Ordinal));
foreach (var address in addresses)
{
Assert.Equal(new Uri(address).ToString(), await HttpClientSlim.GetStringAsync(address));
}
}
}
[Fact]
public void ThrowsWhenBindingToIPv4AddressInUse()
{
using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp))
{
var port = GetNextPort();
socket.Bind(new IPEndPoint(IPAddress.Loopback, port));
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls($"http://127.0.0.1:{port}")
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
var exception = Assert.Throws<IOException>(() => host.Start());
Assert.Equal($"Failed to bind to address http://127.0.0.1:{port}: address already in use.", exception.Message);
}
}
}
[ConditionalFact]
[IPv6SupportedCondition]
public void ThrowsWhenBindingToIPv6AddressInUse()
{
using (var socket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp))
{
var port = GetNextPort();
socket.Bind(new IPEndPoint(IPAddress.IPv6Loopback, port));
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls($"http://[::1]:{port}")
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
var exception = Assert.Throws<IOException>(() => host.Start());
Assert.Equal($"Failed to bind to address http://[::1]:{port}: address already in use.", exception.Message);
}
}
}
[Fact]
public void ThrowsWhenBindingLocalhostToIPv4AddressInUse()
{
ThrowsWhenBindingLocalhostToAddressInUse(AddressFamily.InterNetwork, IPAddress.Loopback);
}
[ConditionalFact]
[IPv6SupportedCondition]
public void ThrowsWhenBindingLocalhostToIPv6AddressInUse()
{
ThrowsWhenBindingLocalhostToAddressInUse(AddressFamily.InterNetworkV6, IPAddress.IPv6Loopback);
}
[Fact]
public void ThrowsWhenBindingLocalhostToDynamicPort()
{
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://localhost:0")
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
Assert.Throws<InvalidOperationException>(() => host.Start());
}
}
private void ThrowsWhenBindingLocalhostToAddressInUse(AddressFamily addressFamily, IPAddress address)
{
using (var socket = new Socket(addressFamily, SocketType.Stream, ProtocolType.Tcp))
{
var port = GetNextPort();
socket.Bind(new IPEndPoint(address, port));
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls($"http://localhost:{port}")
.Configure(ConfigureEchoAddress);
using (var host = hostBuilder.Build())
{
var exception = Assert.Throws<IOException>(() => host.Start());
Assert.Equal(
$"Failed to bind to address http://localhost:{port} on the {(addressFamily == AddressFamily.InterNetwork ? "IPv4" : "IPv6")} loopback interface: port already in use.",
exception.Message);
}
}
}
public static TheoryData<string, Func<IServerAddressesFeature, string[]>> AddressRegistrationDataIPv4
{
get
{
var dataset = new TheoryData<string, Func<IServerAddressesFeature, string[]>>();
// Default host and port
dataset.Add(null, _ => new[] { "http://127.0.0.1:5000/" });
dataset.Add(string.Empty, _ => new[] { "http://127.0.0.1:5000/" });
// Static ports
var port = GetNextPort();
// Loopback
dataset.Add($"http://127.0.0.1:{port}", _ => new[] { $"http://127.0.0.1:{port}/" });
// Localhost
dataset.Add($"http://localhost:{port}", _ => new[] { $"http://localhost:{port}/", $"http://127.0.0.1:{port}/" });
// Any
dataset.Add($"http://*:{port}/", _ => new[] { $"http://127.0.0.1:{port}/" });
dataset.Add($"http://+:{port}/", _ => new[] { $"http://127.0.0.1:{port}/" });
// Dynamic port and non-loopback addresses
dataset.Add("http://127.0.0.1:0/", GetTestUrls);
dataset.Add($"http://{Dns.GetHostName()}:0/", GetTestUrls);
var ipv4Addresses = GetIPAddresses()
.Where(ip => ip.AddressFamily == AddressFamily.InterNetwork);
foreach (var ip in ipv4Addresses)
{
dataset.Add($"http://{ip}:0/", GetTestUrls);
}
return dataset;
}
}
public static TheoryData<IPEndPoint, Func<IPEndPoint, string>> IPEndPointRegistrationDataRandomPort
{
get
{
var dataset = new TheoryData<IPEndPoint, Func<IPEndPoint, string>>();
// Static port
var port = GetNextPort();
// Loopback
dataset.Add(new IPEndPoint(IPAddress.Loopback, port), _ => $"http://127.0.0.1:{port}/");
dataset.Add(new IPEndPoint(IPAddress.Loopback, port), _ => $"https://127.0.0.1:{port}/");
// IPv6 loopback
dataset.Add(new IPEndPoint(IPAddress.IPv6Loopback, port), _ => FixTestUrl($"http://[::1]:{port}/"));
dataset.Add(new IPEndPoint(IPAddress.IPv6Loopback, port), _ => FixTestUrl($"https://[::1]:{port}/"));
// Any
dataset.Add(new IPEndPoint(IPAddress.Any, port), _ => $"http://127.0.0.1:{port}/");
dataset.Add(new IPEndPoint(IPAddress.Any, port), _ => $"https://127.0.0.1:{port}/");
// IPv6 Any
dataset.Add(new IPEndPoint(IPAddress.IPv6Any, port), _ => $"http://127.0.0.1:{port}/");
dataset.Add(new IPEndPoint(IPAddress.IPv6Any, port), _ => FixTestUrl($"http://[::1]:{port}/"));
dataset.Add(new IPEndPoint(IPAddress.IPv6Any, port), _ => $"https://127.0.0.1:{port}/");
dataset.Add(new IPEndPoint(IPAddress.IPv6Any, port), _ => FixTestUrl($"https://[::1]:{port}/"));
// Dynamic port
dataset.Add(new IPEndPoint(IPAddress.Loopback, 0), endPoint => $"http://127.0.0.1:{endPoint.Port}/");
dataset.Add(new IPEndPoint(IPAddress.Loopback, 0), endPoint => $"https://127.0.0.1:{endPoint.Port}/");
var ipv4Addresses = GetIPAddresses()
.Where(ip => ip.AddressFamily == AddressFamily.InterNetwork);
foreach (var ip in ipv4Addresses)
{
dataset.Add(new IPEndPoint(ip, 0), endPoint => FixTestUrl($"http://{endPoint}/"));
dataset.Add(new IPEndPoint(ip, 0), endPoint => FixTestUrl($"https://{endPoint}/"));
}
return dataset;
}
}
public static TheoryData<string, Func<IServerAddressesFeature, string[]>> AddressRegistrationDataIPv4Port80
{
get
{
var dataset = new TheoryData<string, Func<IServerAddressesFeature, string[]>>();
// Default port for HTTP (80)
dataset.Add("http://127.0.0.1", _ => new[] { "http://127.0.0.1/" });
dataset.Add("http://localhost", _ => new[] { "http://127.0.0.1/" });
dataset.Add("http://*", _ => new[] { "http://127.0.0.1/" });
return dataset;
}
}
public static TheoryData<IPEndPoint, Func<IPEndPoint, string>> IPEndPointRegistrationDataPort443
{
get
{
var dataset = new TheoryData<IPEndPoint, Func<IPEndPoint, string>>();
dataset.Add(new IPEndPoint(IPAddress.Loopback, 443), _ => "https://127.0.0.1/");
dataset.Add(new IPEndPoint(IPAddress.IPv6Loopback, 443), _ => FixTestUrl("https://[::1]/"));
dataset.Add(new IPEndPoint(IPAddress.Any, 443), _ => "https://127.0.0.1/");
dataset.Add(new IPEndPoint(IPAddress.IPv6Any, 443), _ => FixTestUrl("https://[::1]/"));
return dataset;
}
}
public static TheoryData<string, Func<IServerAddressesFeature, string[]>> AddressRegistrationDataIPv6
{
get
{
var dataset = new TheoryData<string, Func<IServerAddressesFeature, string[]>>();
// Default host and port
dataset.Add(null, _ => new[] { "http://127.0.0.1:5000/", "http://[::1]:5000/" });
dataset.Add(string.Empty, _ => new[] { "http://127.0.0.1:5000/", "http://[::1]:5000/" });
// Static ports
var port = GetNextPort();
// Loopback
dataset.Add($"http://[::1]:{port}/",
_ => new[] { $"http://[::1]:{port}/" });
// Localhost
dataset.Add($"http://localhost:{port}",
_ => new[] { $"http://localhost:{port}/", $"http://127.0.0.1:{port}/", $"http://[::1]:{port}/" });
// Any
dataset.Add($"http://*:{port}/",
_ => new[] { $"http://127.0.0.1:{port}/", $"http://[::1]:{port}/" });
dataset.Add($"http://+:{port}/",
_ => new[] { $"http://127.0.0.1:{port}/", $"http://[::1]:{port}/" });
// Explicit IPv4 and IPv6 on same port
dataset.Add($"http://127.0.0.1:{port}/;http://[::1]:{port}/",
_ => new[] { $"http://127.0.0.1:{port}/", $"http://[::1]:{port}/" });
// Dynamic port and non-loopback addresses
var ipv6Addresses = GetIPAddresses()
.Where(ip => ip.AddressFamily == AddressFamily.InterNetworkV6)
.Where(ip => ip.ScopeId == 0);
foreach (var ip in ipv6Addresses)
{
dataset.Add($"http://[{ip}]:0/", GetTestUrls);
}
return dataset;
}
}
public static TheoryData<string, Func<IServerAddressesFeature, string[]>> AddressRegistrationDataIPv6Port80
{
get
{
var dataset = new TheoryData<string, Func<IServerAddressesFeature, string[]>>();
// Default port for HTTP (80)
dataset.Add("http://[::1]", _ => new[] { "http://[::1]/" });
dataset.Add("http://localhost", _ => new[] { "http://127.0.0.1/", "http://[::1]/" });
dataset.Add("http://*", _ => new[] { "http://[::1]/" });
return dataset;
}
}
public static TheoryData<string, Func<IServerAddressesFeature, string[]>> AddressRegistrationDataIPv6ScopeId
{
get
{
var dataset = new TheoryData<string, Func<IServerAddressesFeature, string[]>>();
// Dynamic port
var ipv6Addresses = GetIPAddresses()
.Where(ip => ip.AddressFamily == AddressFamily.InterNetworkV6)
.Where(ip => ip.ScopeId != 0)
.Where(ip => CanBindAndConnectToEndpoint(new IPEndPoint(ip, 0)));
foreach (var ip in ipv6Addresses)
{
dataset.Add($"http://[{ip}]:0/", GetTestUrls);
}
// There may be no addresses with scope IDs and we need at least one data item in the
// collection, otherwise xUnit fails the test run because a theory has no data.
dataset.Add("http://[::1]:0", GetTestUrls);
return dataset;
}
}
private static IEnumerable<IPAddress> GetIPAddresses()
{
return NetworkInterface.GetAllNetworkInterfaces()
.Where(i => i.OperationalStatus == OperationalStatus.Up)
.SelectMany(i => i.GetIPProperties().UnicastAddresses)
.Select(a => a.Address);
}
private static string[] GetTestUrls(IServerAddressesFeature addressesFeature)
{
return addressesFeature.Addresses
.Select(FixTestUrl)
.ToArray();
}
private static string FixTestUrl(string url)
{
var fixedUrl = url.Replace("://+", "://localhost")
.Replace("0.0.0.0", Dns.GetHostName())
.Replace("[::]", Dns.GetHostName());
if (!fixedUrl.EndsWith("/"))
{
fixedUrl = fixedUrl + "/";
}
return fixedUrl;
}
private void ConfigureEchoAddress(IApplicationBuilder app)
{
app.Run(context =>
{
return context.Response.WriteAsync(context.Request.GetDisplayUrl());
});
}
private static int GetNextPort()
{
using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp))
{
// Let the OS assign the next available port. Unless we cycle through all ports
// on a test run, the OS will always increment the port number when making these calls.
// This prevents races in parallel test runs where a test is already bound to
// a given port, and a new test is able to bind to the same port due to port
// reuse being enabled by default by the OS.
socket.Bind(new IPEndPoint(IPAddress.Loopback, 0));
return ((IPEndPoint)socket.LocalEndPoint).Port;
}
}
private static bool CanBindAndConnectToEndpoint(IPEndPoint endPoint)
{
try
{
using (var serverSocket = new Socket(endPoint.AddressFamily, SocketType.Stream, ProtocolType.Tcp))
{
serverSocket.Bind(endPoint);
serverSocket.Listen(1);
var socketArgs = new SocketAsyncEventArgs
{
RemoteEndPoint = serverSocket.LocalEndPoint
};
var mre = new ManualResetEventSlim();
socketArgs.Completed += (s, e) =>
{
mre.Set();
e.ConnectSocket?.Dispose();
};
// Connect can take a couple minutes to time out.
if (Socket.ConnectAsync(SocketType.Stream, ProtocolType.Tcp, socketArgs))
{
return mre.Wait(5000) && socketArgs.SocketError == SocketError.Success;
}
else
{
socketArgs.ConnectSocket?.Dispose();
return socketArgs.SocketError == SocketError.Success;
}
}
}
catch (SocketException)
{
return false;
}
}
[AttributeUsage(AttributeTargets.Method, AllowMultiple = false)]
private class PortSupportedConditionAttribute : Attribute, ITestCondition
{
private readonly int _port;
private readonly Lazy<bool> _portSupported;
public PortSupportedConditionAttribute(int port)
{
_port = port;
_portSupported = new Lazy<bool>(CanBindToPort);
}
public bool IsMet
{
get
{
return _portSupported.Value;
}
}
public string SkipReason
{
get
{
return $"Cannot bind to port {_port} on the host.";
}
}
private bool CanBindToPort()
{
try
{
using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp))
{
socket.Bind(new IPEndPoint(IPAddress.Loopback, _port));
return true;
}
}
catch (SocketException)
{
return false;
}
}
}
}
}
| 1 | 12,628 | Why not use the overload that takes an instance? | aspnet-KestrelHttpServer | .cs |
@@ -7,16 +7,13 @@
package account
import (
- "context"
"fmt"
+ "strings"
"github.com/spf13/cobra"
"go.uber.org/zap"
- grpc "google.golang.org/grpc"
- "github.com/iotexproject/iotex-core/cli/ioctl/cmd/config"
"github.com/iotexproject/iotex-core/pkg/log"
- pb "github.com/iotexproject/iotex-core/protogen/iotexapi"
)
// TODO: use wallet config later | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package account
import (
"context"
"fmt"
"github.com/spf13/cobra"
"go.uber.org/zap"
grpc "google.golang.org/grpc"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/config"
"github.com/iotexproject/iotex-core/pkg/log"
pb "github.com/iotexproject/iotex-core/protogen/iotexapi"
)
// TODO: use wallet config later
var configAddress = "ioaddress"
// accountBalanceCmd represents the account balance command
var accountBalanceCmd = &cobra.Command{
Use: "balance",
Short: "Get balance of an account",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(balance(args))
},
}
func init() {
AccountCmd.AddCommand(accountBalanceCmd)
}
// balance gets balance of an IoTex Blockchian address
func balance(args []string) string {
endpoint := config.GetEndpoint()
if endpoint == config.ErrEmptyEndpoint {
log.L().Error(config.ErrEmptyEndpoint)
return "use \"ioctl config set endpoint\" to config endpoint first."
}
conn, err := grpc.Dial(endpoint, grpc.WithInsecure())
if err != nil {
log.L().Error("failed to connect to server", zap.Error(err))
return err.Error()
}
defer conn.Close()
cli := pb.NewAPIServiceClient(conn)
ctx := context.Background()
request := pb.GetAccountRequest{}
res := ""
if len(args) == 0 {
request.Address = configAddress
response, err := cli.GetAccount(ctx, &request)
if err != nil {
log.L().Error("cannot get account from "+request.Address, zap.Error(err))
return err.Error()
}
accountMeta := response.AccountMeta
res += fmt.Sprintf("%s: %s\n", request.Address, accountMeta.Balance)
} else {
for _, addr := range args {
request.Address = addr
response, err := cli.GetAccount(ctx, &request)
if err != nil {
log.L().Error("cannot get account from "+request.Address, zap.Error(err))
return err.Error()
}
accountMeta := response.AccountMeta
res += fmt.Sprintf("%s: %s\n", request.Address, accountMeta.Balance)
}
}
return res
}
| 1 | 15,873 | `Blockchian` is a misspelling of `Blockchain` (from `misspell`) | iotexproject-iotex-core | go |
@@ -44,7 +44,7 @@ type NvidiaGPUManager struct {
DriverVersion string `json:"DriverVersion"`
GPUIDs []string `json:"GPUIDs"`
GPUDevices []*ecs.PlatformDevice `json:"-"`
- lock sync.RWMutex `json:"-"`
+ lock sync.RWMutex
}
const ( | 1 | // +build linux
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package gpu
import (
"encoding/json"
"io/ioutil"
"os"
"sync"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/aws-sdk-go/aws"
"github.com/cihub/seelog"
"github.com/pkg/errors"
)
// GPUManager encompasses methods to get information on GPUs and their driver
type GPUManager interface {
Initialize() error
SetGPUIDs([]string)
GetGPUIDsUnsafe() []string
SetDevices()
GetDevices() []*ecs.PlatformDevice
SetDriverVersion(string)
GetDriverVersion() string
}
// NvidiaGPUManager is used as a wrapper for NVML APIs and implements GPUManager
// interface
type NvidiaGPUManager struct {
DriverVersion string `json:"DriverVersion"`
GPUIDs []string `json:"GPUIDs"`
GPUDevices []*ecs.PlatformDevice `json:"-"`
lock sync.RWMutex `json:"-"`
}
const (
// GPUInfoDirPath is the directory where gpus and driver info are saved
GPUInfoDirPath = "/var/lib/ecs/gpu"
// NvidiaGPUInfoFilePath is the file path where gpus and driver info are saved
NvidiaGPUInfoFilePath = GPUInfoDirPath + "/nvidia-gpu-info.json"
)
// NewNvidiaGPUManager is used to obtain NvidiaGPUManager handle
func NewNvidiaGPUManager() GPUManager {
return &NvidiaGPUManager{}
}
// Initialize sets the fields of Nvidia GPU Manager struct
func (n *NvidiaGPUManager) Initialize() error {
if GPUInfoFileExists() {
// GPU info file found
gpuJSON, err := GetGPUInfoJSON()
if err != nil {
return errors.Wrapf(err, "could not read GPU file content")
}
var nvidiaGPUInfo NvidiaGPUManager
err = json.Unmarshal(gpuJSON, &nvidiaGPUInfo)
if err != nil {
return errors.Wrapf(err, "could not unmarshal GPU file content")
}
n.SetDriverVersion(nvidiaGPUInfo.GetDriverVersion())
nvidiaGPUInfo.lock.RLock()
gpuIDs := nvidiaGPUInfo.GetGPUIDsUnsafe()
nvidiaGPUInfo.lock.RUnlock()
n.SetGPUIDs(gpuIDs)
n.SetDevices()
} else {
seelog.Error("Config for GPU support is enabled, but GPU information is not found; continuing without it")
}
return nil
}
var GPUInfoFileExists = CheckForGPUInfoFile
func CheckForGPUInfoFile() bool {
_, err := os.Stat(NvidiaGPUInfoFilePath)
return !os.IsNotExist(err)
}
var GetGPUInfoJSON = GetGPUInfo
func GetGPUInfo() ([]byte, error) {
gpuInfo, err := os.Open(NvidiaGPUInfoFilePath)
if err != nil {
return nil, err
}
defer gpuInfo.Close()
gpuJSON, err := ioutil.ReadAll(gpuInfo)
if err != nil {
return nil, err
}
return gpuJSON, nil
}
// SetGPUIDs sets the GPUIDs
func (n *NvidiaGPUManager) SetGPUIDs(gpuIDs []string) {
n.lock.Lock()
defer n.lock.Unlock()
n.GPUIDs = gpuIDs
}
// GetGPUIDs returns the GPUIDs
func (n *NvidiaGPUManager) GetGPUIDsUnsafe() []string {
return n.GPUIDs
}
// SetDriverVersion is a setter for nvidia driver version
func (n *NvidiaGPUManager) SetDriverVersion(version string) {
n.lock.Lock()
defer n.lock.Unlock()
n.DriverVersion = version
}
// GetDriverVersion is a getter for nvidia driver version
func (n *NvidiaGPUManager) GetDriverVersion() string {
n.lock.RLock()
defer n.lock.RUnlock()
return n.DriverVersion
}
func (n *NvidiaGPUManager) SetDevices() {
n.lock.Lock()
defer n.lock.Unlock()
gpuIDs := n.GetGPUIDsUnsafe()
devices := make([]*ecs.PlatformDevice, 0)
for _, gpuID := range gpuIDs {
devices = append(devices, &ecs.PlatformDevice{
Id: aws.String(gpuID),
Type: aws.String(ecs.PlatformDeviceTypeGpu),
})
}
n.GPUDevices = devices
}
// GetDevices returns the GPU devices as PlatformDevices
func (n *NvidiaGPUManager) GetDevices() []*ecs.PlatformDevice {
n.lock.RLock()
defer n.lock.RUnlock()
return n.GPUDevices
}
| 1 | 21,841 | `agent/gpu/nvidia_gpu_manager_unix.go:47: struct field lock has json tag but is not exported` | aws-amazon-ecs-agent | go |
@@ -11,6 +11,9 @@ from localstack.utils.aws.aws_models import LambdaFunction
from localstack.constants import LAMBDA_TEST_ROLE
+TEST_ARN = 'arn:aws:sqs:eu-west-1:000000000000:testq'
+
+
class TestLambdaAPI(unittest.TestCase):
CODE_SIZE = 50
CODE_SHA_256 = '/u60ZpAA9bzZPVwb8d4390i5oqP1YAObUwV03CZvsWA=' | 1 | import os
import re
import json
import unittest
import mock
import time
import datetime
from localstack.utils.common import save_file, new_tmp_dir, mkdir
from localstack.services.awslambda import lambda_api, lambda_executors
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.constants import LAMBDA_TEST_ROLE
class TestLambdaAPI(unittest.TestCase):
CODE_SIZE = 50
CODE_SHA_256 = '/u60ZpAA9bzZPVwb8d4390i5oqP1YAObUwV03CZvsWA='
MEMORY_SIZE = 128
ROLE = 'arn:aws:iam::123456:role/role-name'
LAST_MODIFIED = '2019-05-25T17:00:48.260+0000'
TRACING_CONFIG = {'Mode': 'PassThrough'}
REVISION_ID = 'e54dbcf8-e3ef-44ab-9af7-8dbef510608a'
HANDLER = 'index.handler'
RUNTIME = 'node.js4.3'
TIMEOUT = 60 # Default value, hardcoded
FUNCTION_NAME = 'test1'
ALIAS_NAME = 'alias1'
ALIAS2_NAME = 'alias2'
RESOURCENOTFOUND_EXCEPTION = 'ResourceNotFoundException'
RESOURCENOTFOUND_MESSAGE = 'Function not found: %s'
ALIASEXISTS_EXCEPTION = 'ResourceConflictException'
ALIASEXISTS_MESSAGE = 'Alias already exists: %s'
ALIASNOTFOUND_EXCEPTION = 'ResourceNotFoundException'
ALIASNOTFOUND_MESSAGE = 'Alias not found: %s'
TEST_UUID = 'Test'
TAGS = {'hello': 'world', 'env': 'prod'}
def setUp(self):
lambda_api.cleanup()
self.maxDiff = None
self.app = lambda_api.app
self.app.testing = True
self.client = self.app.test_client()
def test_get_non_existent_function_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.get_function('non_existent_function_name').get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(
self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn('non_existent_function_name'),
result['message'])
def test_get_event_source_mapping(self):
with self.app.test_request_context():
lambda_api.event_source_mappings.append({'UUID': self.TEST_UUID})
result = lambda_api.get_event_source_mapping(self.TEST_UUID)
self.assertEqual(json.loads(result.get_data()).get('UUID'), self.TEST_UUID)
def test_get_event_sources(self):
with self.app.test_request_context():
lambda_api.event_source_mappings.append(
{
'UUID': self.TEST_UUID,
'EventSourceArn': 'the_arn'
})
# Match source ARN
result = lambda_api.get_event_sources(source_arn='the_arn')
self.assertEqual(len(result), 1)
self.assertEqual(result[0].get('UUID'), self.TEST_UUID)
# No partial match on source ARN
result = lambda_api.get_event_sources(source_arn='the_')
self.assertEqual(len(result), 0)
def test_get_event_sources_with_paths(self):
with self.app.test_request_context():
lambda_api.event_source_mappings.append(
{
'UUID': self.TEST_UUID,
'EventSourceArn': 'the_arn/path/subpath'
})
# Do partial match on paths
result = lambda_api.get_event_sources(source_arn='the_arn')
self.assertEqual(len(result), 1)
result = lambda_api.get_event_sources(source_arn='the_arn/path')
self.assertEqual(len(result), 1)
def test_delete_event_source_mapping(self):
with self.app.test_request_context():
lambda_api.event_source_mappings.append({'UUID': self.TEST_UUID})
result = lambda_api.delete_event_source_mapping(self.TEST_UUID)
self.assertEqual(json.loads(result.get_data()).get('UUID'), self.TEST_UUID)
self.assertEqual(0, len(lambda_api.event_source_mappings))
def test_invoke_RETURNS_415_WHEN_not_json_input(self):
with self.app.test_request_context() as context:
context.request._cached_data = '~notjsonrequest~'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual('415 UNSUPPORTED MEDIA TYPE', response.status)
def _request_response(self, context):
context.request._cached_data = '{}'
context.request.args = {'Qualifier': '$LATEST'}
context.request.environ['HTTP_X_AMZ_INVOCATION_TYPE'] = 'RequestResponse'
self._create_function(self.FUNCTION_NAME)
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_plain_text_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '~notjsonresponse~'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual('~notjsonresponse~', response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'text/plain'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_empty_plain_text_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = ''
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual('', response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'text/plain'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_empty_map_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '{}'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'{}\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEquals({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_populated_map_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '{"bool":true,"int":1}'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'{"bool":true,"int":1}\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEquals({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_empty_list_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '[]'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'[]\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEquals({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_populated_list_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '[true,1,"thing"]'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'[true,1,"thing"]\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEquals({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_string_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '"thing"'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'"thing"\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_integer_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '1234'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'1234\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_float_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = '1.3'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'1.3\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_boolean_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = 'true'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'true\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'application/json'}, response[2])
@mock.patch('localstack.services.awslambda.lambda_api.run_lambda')
def test_invoke_null_json_response(self, mock_run_lambda):
with self.app.test_request_context() as context:
self._request_response(context)
mock_run_lambda.return_value = 'null'
response = lambda_api.invoke_function(self.FUNCTION_NAME)
self.assertEqual(b'null\n', response[0].response[0])
self.assertEqual(200, response[1])
self.assertEqual({'Content-Type': 'application/json'}, response[2])
def test_create_event_source_mapping(self):
self.client.post('{0}/event-source-mappings/'.format(lambda_api.PATH_ROOT),
data=json.dumps({'FunctionName': 'test-lambda-function', 'EventSourceArn': 'fake-arn'}))
listResponse = self.client.get('{0}/event-source-mappings/'.format(lambda_api.PATH_ROOT))
listResult = json.loads(listResponse.get_data())
eventSourceMappings = listResult.get('EventSourceMappings')
self.assertEqual(1, len(eventSourceMappings))
self.assertEqual('Enabled', eventSourceMappings[0]['State'])
def test_create_disabled_event_source_mapping(self):
createResponse = self.client.post('{0}/event-source-mappings/'.format(lambda_api.PATH_ROOT),
data=json.dumps({'FunctionName': 'test-lambda-function',
'EventSourceArn': 'fake-arn',
'Enabled': 'false'}))
createResult = json.loads(createResponse.get_data())
self.assertEqual('Disabled', createResult['State'])
getResponse = self.client.get('{0}/event-source-mappings/{1}'.format(lambda_api.PATH_ROOT,
createResult.get('UUID')))
getResult = json.loads(getResponse.get_data())
self.assertEqual('Disabled', getResult['State'])
def test_update_event_source_mapping(self):
createResponse = self.client.post('{0}/event-source-mappings/'.format(lambda_api.PATH_ROOT),
data=json.dumps({'FunctionName': 'test-lambda-function',
'EventSourceArn': 'fake-arn',
'Enabled': 'true'}))
createResult = json.loads(createResponse.get_data())
putResponse = self.client.put('{0}/event-source-mappings/{1}'.format(lambda_api.PATH_ROOT,
createResult.get('UUID')), data=json.dumps({'Enabled': 'false'}))
putResult = json.loads(putResponse.get_data())
self.assertEqual('Disabled', putResult['State'])
getResponse = self.client.get('{0}/event-source-mappings/{1}'.format(lambda_api.PATH_ROOT,
createResult.get('UUID')))
getResult = json.loads(getResponse.get_data())
self.assertEqual('Disabled', getResult['State'])
def test_publish_function_version(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
result = json.loads(lambda_api.publish_version(self.FUNCTION_NAME).get_data())
result2 = json.loads(lambda_api.publish_version(self.FUNCTION_NAME).get_data())
result.pop('RevisionId', None) # we need to remove this, since this is random, so we cannot know its value
result2.pop('RevisionId', None) # we need to remove this, since this is random, so we cannot know its value
expected_result = dict()
expected_result['CodeSize'] = self.CODE_SIZE
expected_result['CodeSha256'] = self.CODE_SHA_256
expected_result['FunctionArn'] = str(lambda_api.func_arn(self.FUNCTION_NAME)) + ':1'
expected_result['FunctionName'] = str(self.FUNCTION_NAME)
expected_result['Handler'] = str(self.HANDLER)
expected_result['Runtime'] = str(self.RUNTIME)
expected_result['Timeout'] = self.TIMEOUT
expected_result['Description'] = ''
expected_result['MemorySize'] = self.MEMORY_SIZE
expected_result['Role'] = self.ROLE
expected_result['LastModified'] = self.LAST_MODIFIED
expected_result['TracingConfig'] = self.TRACING_CONFIG
expected_result['Version'] = '1'
expected_result['State'] = 'Active'
expected_result['LastUpdateStatus'] = 'Successful'
expected_result2 = dict(expected_result)
expected_result2['FunctionArn'] = str(lambda_api.func_arn(self.FUNCTION_NAME)) + ':2'
expected_result2['Version'] = '2'
self.assertDictEqual(expected_result, result)
self.assertDictEqual(expected_result2, result2)
def test_publish_non_existant_function_version_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.publish_version(self.FUNCTION_NAME).get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn(self.FUNCTION_NAME),
result['message'])
def test_list_function_versions(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
lambda_api.publish_version(self.FUNCTION_NAME)
lambda_api.publish_version(self.FUNCTION_NAME)
result = json.loads(lambda_api.list_versions(self.FUNCTION_NAME).get_data())
for version in result['Versions']:
# we need to remove this, since this is random, so we cannot know its value
version.pop('RevisionId', None)
latest_version = dict()
latest_version['CodeSize'] = self.CODE_SIZE
latest_version['CodeSha256'] = self.CODE_SHA_256
latest_version['FunctionArn'] = str(lambda_api.func_arn(self.FUNCTION_NAME)) + ':$LATEST'
latest_version['FunctionName'] = str(self.FUNCTION_NAME)
latest_version['Handler'] = str(self.HANDLER)
latest_version['Runtime'] = str(self.RUNTIME)
latest_version['Timeout'] = self.TIMEOUT
latest_version['Description'] = ''
latest_version['MemorySize'] = self.MEMORY_SIZE
latest_version['Role'] = self.ROLE
latest_version['LastModified'] = self.LAST_MODIFIED
latest_version['TracingConfig'] = self.TRACING_CONFIG
latest_version['Version'] = '$LATEST'
latest_version['State'] = 'Active'
latest_version['LastUpdateStatus'] = 'Successful'
version1 = dict(latest_version)
version1['FunctionArn'] = str(lambda_api.func_arn(self.FUNCTION_NAME)) + ':1'
version1['Version'] = '1'
version2 = dict(latest_version)
version2['FunctionArn'] = str(lambda_api.func_arn(self.FUNCTION_NAME)) + ':2'
version2['Version'] = '2'
expected_result = {'Versions': sorted([latest_version, version1, version2],
key=lambda k: str(k.get('Version')))}
self.assertDictEqual(expected_result, result)
def test_list_non_existant_function_versions_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.list_versions(self.FUNCTION_NAME).get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn(self.FUNCTION_NAME),
result['message'])
def test_create_alias(self):
self._create_function(self.FUNCTION_NAME)
self.client.post('{0}/functions/{1}/versions'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
response = self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME),
data=json.dumps({'Name': self.ALIAS_NAME, 'FunctionVersion': '1',
'Description': ''}))
result = json.loads(response.get_data())
result.pop('RevisionId', None) # we need to remove this, since this is random, so we cannot know its value
expected_result = {'AliasArn': lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME,
'FunctionVersion': '1', 'Description': '', 'Name': self.ALIAS_NAME}
self.assertDictEqual(expected_result, result)
def test_create_alias_on_non_existant_function_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.create_alias(self.FUNCTION_NAME).get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn(self.FUNCTION_NAME),
result['message'])
def test_create_alias_returns_error_if_already_exists(self):
self._create_function(self.FUNCTION_NAME)
self.client.post('{0}/functions/{1}/versions'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
data = json.dumps({'Name': self.ALIAS_NAME, 'FunctionVersion': '1', 'Description': ''})
self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME), data=data)
response = self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME),
data=data)
result = json.loads(response.get_data())
alias_arn = lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME
self.assertEqual(self.ALIASEXISTS_EXCEPTION, result['__type'])
self.assertEqual(self.ALIASEXISTS_MESSAGE % alias_arn,
result['message'])
def test_update_alias(self):
self._create_function(self.FUNCTION_NAME)
self.client.post('{0}/functions/{1}/versions'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME),
data=json.dumps({
'Name': self.ALIAS_NAME, 'FunctionVersion': '1', 'Description': ''}))
response = self.client.put('{0}/functions/{1}/aliases/{2}'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME,
self.ALIAS_NAME),
data=json.dumps({'FunctionVersion': '$LATEST', 'Description': 'Test-Description'}))
result = json.loads(response.get_data())
result.pop('RevisionId', None) # we need to remove this, since this is random, so we cannot know its value
expected_result = {'AliasArn': lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME,
'FunctionVersion': '$LATEST', 'Description': 'Test-Description',
'Name': self.ALIAS_NAME}
self.assertDictEqual(expected_result, result)
def test_update_alias_on_non_existant_function_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.update_alias(self.FUNCTION_NAME, self.ALIAS_NAME).get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn(self.FUNCTION_NAME),
result['message'])
def test_update_alias_on_non_existant_alias_returns_error(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
result = json.loads(lambda_api.update_alias(self.FUNCTION_NAME, self.ALIAS_NAME).get_data())
alias_arn = lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME
self.assertEqual(self.ALIASNOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.ALIASNOTFOUND_MESSAGE % alias_arn, result['message'])
def test_get_alias(self):
self._create_function(self.FUNCTION_NAME)
self.client.post('{0}/functions/{1}/versions'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME),
data=json.dumps({
'Name': self.ALIAS_NAME, 'FunctionVersion': '1', 'Description': ''}))
response = self.client.get('{0}/functions/{1}/aliases/{2}'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME,
self.ALIAS_NAME))
result = json.loads(response.get_data())
result.pop('RevisionId', None) # we need to remove this, since this is random, so we cannot know its value
expected_result = {'AliasArn': lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME,
'FunctionVersion': '1', 'Description': '',
'Name': self.ALIAS_NAME}
self.assertDictEqual(expected_result, result)
def test_get_alias_on_non_existant_function_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.get_alias(self.FUNCTION_NAME, self.ALIAS_NAME).get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn(self.FUNCTION_NAME),
result['message'])
def test_get_alias_on_non_existant_alias_returns_error(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
result = json.loads(lambda_api.get_alias(self.FUNCTION_NAME, self.ALIAS_NAME).get_data())
alias_arn = lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME
self.assertEqual(self.ALIASNOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.ALIASNOTFOUND_MESSAGE % alias_arn, result['message'])
def test_list_aliases(self):
self._create_function(self.FUNCTION_NAME)
self.client.post('{0}/functions/{1}/versions'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME),
data=json.dumps({'Name': self.ALIAS2_NAME, 'FunctionVersion': '$LATEST'}))
self.client.post('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME),
data=json.dumps({'Name': self.ALIAS_NAME, 'FunctionVersion': '1',
'Description': self.ALIAS_NAME}))
response = self.client.get('{0}/functions/{1}/aliases'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
result = json.loads(response.get_data())
for alias in result['Aliases']:
alias.pop('RevisionId', None) # we need to remove this, since this is random, so we cannot know its value
expected_result = {'Aliases': [
{
'AliasArn': lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS_NAME,
'FunctionVersion': '1',
'Name': self.ALIAS_NAME,
'Description': self.ALIAS_NAME
},
{
'AliasArn': lambda_api.func_arn(self.FUNCTION_NAME) + ':' + self.ALIAS2_NAME,
'FunctionVersion': '$LATEST',
'Name': self.ALIAS2_NAME,
'Description': ''
}
]}
self.assertDictEqual(expected_result, result)
def test_list_non_existant_function_aliases_returns_error(self):
with self.app.test_request_context():
result = json.loads(lambda_api.list_aliases(self.FUNCTION_NAME).get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(self.RESOURCENOTFOUND_MESSAGE % lambda_api.func_arn(self.FUNCTION_NAME),
result['message'])
def test_get_container_name(self):
executor = lambda_executors.EXECUTOR_CONTAINERS_REUSE
name = executor.get_container_name('arn:aws:lambda:us-east-1:00000000:function:my_function_name')
self.assertEqual(name, 'localstack_lambda_arn_aws_lambda_us-east-1_00000000_function_my_function_name')
def test_put_concurrency(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
# note: PutFunctionConcurrency is mounted at: /2017-10-31
# NOT lambda_api.PATH_ROOT
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
concurrency_data = {'ReservedConcurrentExecutions': 10}
response = self.client.put('/2017-10-31/functions/{0}/concurrency'.format(self.FUNCTION_NAME),
data=json.dumps(concurrency_data))
result = json.loads(response.get_data())
self.assertDictEqual(concurrency_data, result)
def test_concurrency_get_function(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
# note: PutFunctionConcurrency is mounted at: /2017-10-31
# NOT lambda_api.PATH_ROOT
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
concurrency_data = {'ReservedConcurrentExecutions': 10}
self.client.put('/2017-10-31/functions/{0}/concurrency'.format(self.FUNCTION_NAME),
data=json.dumps(concurrency_data))
response = self.client.get('{0}/functions/{1}'.format(lambda_api.PATH_ROOT, self.FUNCTION_NAME))
result = json.loads(response.get_data())
self.assertTrue('Concurrency' in result)
self.assertDictEqual(concurrency_data, result['Concurrency'])
def test_list_tags(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME, self.TAGS)
arn = lambda_api.func_arn(self.FUNCTION_NAME)
response = self.client.get('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn))
result = json.loads(response.get_data())
self.assertTrue('Tags' in result)
self.assertDictEqual(self.TAGS, result['Tags'])
def test_tag_resource(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME)
arn = lambda_api.func_arn(self.FUNCTION_NAME)
response = self.client.get('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn))
result = json.loads(response.get_data())
self.assertTrue('Tags' in result)
self.assertDictEqual({}, result['Tags'])
self.client.post('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn), data=json.dumps({'Tags': self.TAGS}))
response = self.client.get('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn))
result = json.loads(response.get_data())
self.assertTrue('Tags' in result)
self.assertDictEqual(self.TAGS, result['Tags'])
def test_tag_non_existent_function_returns_error(self):
with self.app.test_request_context():
arn = lambda_api.func_arn('non-existent-function')
response = self.client.post(
'{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn),
data=json.dumps({'Tags': self.TAGS}))
result = json.loads(response.get_data())
self.assertEqual(self.RESOURCENOTFOUND_EXCEPTION, result['__type'])
self.assertEqual(
self.RESOURCENOTFOUND_MESSAGE % arn,
result['message'])
def test_untag_resource(self):
with self.app.test_request_context():
self._create_function(self.FUNCTION_NAME, tags=self.TAGS)
arn = lambda_api.func_arn(self.FUNCTION_NAME)
response = self.client.get('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn))
result = json.loads(response.get_data())
self.assertTrue('Tags' in result)
self.assertDictEqual(self.TAGS, result['Tags'])
self.client.delete('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn), query_string={'tagKeys': 'env'})
response = self.client.get('{0}/tags/{1}'.format(lambda_api.PATH_ROOT, arn))
result = json.loads(response.get_data())
self.assertTrue('Tags' in result)
self.assertDictEqual({'hello': 'world'}, result['Tags'])
def test_java_options_empty_return_empty_value(self):
lambda_executors.config.LAMBDA_JAVA_OPTS = ''
result = lambda_executors.Util.get_java_opts()
self.assertFalse(result)
def test_java_options_with_only_memory_options(self):
expected = '-Xmx512M'
result = self.prepare_java_opts(expected)
self.assertEqual(expected, result)
def test_java_options_with_memory_options_and_agentlib_option(self):
expected = '.*transport=dt_socket,server=y,suspend=y,address=[0-9]+'
result = self.prepare_java_opts('-Xmx512M -agentlib:jdwp=transport=dt_socket,server=y'
',suspend=y,address=_debug_port_')
self.assertTrue(re.match(expected, result))
self.assertTrue(lambda_executors.Util.debug_java_port is not False)
def test_java_options_with_unset_debug_port_in_middle(self):
expected = '.*transport=dt_socket,server=y,address=[0-9]+,suspend=y'
result = self.prepare_java_opts('-Xmx512M -agentlib:jdwp=transport=dt_socket,server=y'
',address=_debug_port_,suspend=y')
self.assertTrue(re.match(expected, result))
self.assertTrue(lambda_executors.Util.debug_java_port is not False)
def test_java_options_with_configured_debug_port(self):
expected = '.*transport=dt_socket,server=y,suspend=y,address=1234'
result = self.prepare_java_opts('-Xmx512M -agentlib:jdwp=transport=dt_socket,server=y'
',suspend=y,address=1234')
self.assertTrue(re.match(expected, result))
self.assertEqual('1234', lambda_executors.Util.debug_java_port)
def test_java_options_with_configured_debug_port_in_middle(self):
expected = '.*transport=dt_socket,server=y,address=1234,suspend=y'
result = self.prepare_java_opts('-Xmx512M -agentlib:jdwp=transport=dt_socket,server=y'
',address=1234,suspend=y')
self.assertTrue(re.match(expected, result))
self.assertEqual('1234', lambda_executors.Util.debug_java_port)
def prepare_java_opts(self, java_opts):
lambda_executors.config.LAMBDA_JAVA_OPTS = java_opts
result = lambda_executors.Util.get_java_opts()
return result
def test_get_java_lib_folder_classpath(self):
jar_file = os.path.join(new_tmp_dir(), 'foo.jar')
save_file(jar_file, '')
self.assertEquals('.:foo.jar', lambda_executors.Util.get_java_classpath(jar_file))
def test_get_java_lib_folder_classpath_no_directories(self):
base_dir = new_tmp_dir()
jar_file = os.path.join(base_dir, 'foo.jar')
save_file(jar_file, '')
lib_file = os.path.join(base_dir, 'lib', 'lib.jar')
mkdir(os.path.dirname(lib_file))
save_file(lib_file, '')
self.assertEquals('.:lib/lib.jar:foo.jar', lambda_executors.Util.get_java_classpath(jar_file))
def test_get_java_lib_folder_classpath_archive_is_None(self):
self.assertRaises(TypeError, lambda_executors.Util.get_java_classpath, None)
@mock.patch('localstack.services.awslambda.lambda_executors.store_cloudwatch_logs')
def test_executor_store_logs_can_handle_milliseconds(self, mock_store_cloudwatch_logs):
mock_details = mock.Mock()
t_sec = time.time() # plain old epoch secs
t_ms = time.time() * 1000 # epoch ms as a long-int like AWS
# pass t_ms millisecs to _store_logs
lambda_executors._store_logs(mock_details, 'mock log output', t_ms)
# expect the computed log-stream-name to having a prefix matching the date derived from t_sec
today = datetime.datetime.utcfromtimestamp(t_sec).strftime('%Y/%m/%d')
log_stream_name = mock_store_cloudwatch_logs.call_args_list[0].args[1]
parts = log_stream_name.split('/')
date_part = '/'.join(parts[:3])
self.assertEqual(date_part, today)
def _create_function(self, function_name, tags={}):
arn = lambda_api.func_arn(function_name)
lambda_api.arn_to_lambda[arn] = LambdaFunction(arn)
lambda_api.arn_to_lambda[arn].versions = {
'$LATEST': {'CodeSize': self.CODE_SIZE, 'CodeSha256': self.CODE_SHA_256, 'RevisionId': self.REVISION_ID}
}
lambda_api.arn_to_lambda[arn].handler = self.HANDLER
lambda_api.arn_to_lambda[arn].runtime = self.RUNTIME
lambda_api.arn_to_lambda[arn].timeout = self.TIMEOUT
lambda_api.arn_to_lambda[arn].tags = tags
lambda_api.arn_to_lambda[arn].envvars = {}
lambda_api.arn_to_lambda[arn].last_modified = self.LAST_MODIFIED
lambda_api.arn_to_lambda[arn].role = self.ROLE
lambda_api.arn_to_lambda[arn].memory_size = self.MEMORY_SIZE
class TestLambdaEventInvokeConfig(unittest.TestCase):
CODE_SIZE = 50
CODE_SHA_256 = '/u60ZpAA9bzZPVwb8d4390i5oqP1YAObUwV03CZvsWA='
MEMORY_SIZE = 128
ROLE = LAMBDA_TEST_ROLE
LAST_MODIFIED = '2019-05-25T17:00:48.260+0000'
REVISION_ID = 'e54dbcf8-e3ef-44ab-9af7-8dbef510608a'
HANDLER = 'index.handler'
RUNTIME = 'node.js4.3'
TIMEOUT = 60
FUNCTION_NAME = 'test1'
RETRY_ATTEMPTS = 5
EVENT_AGE = 360
DL_QUEUE = 'arn:aws:sqs:us-east-1:000000000000:dlQueue'
LAMBDA_OBJ = LambdaFunction(lambda_api.func_arn('test1'))
def _create_function(self, function_name, tags={}):
self.LAMBDA_OBJ.versions = {
'$LATEST': {'CodeSize': self.CODE_SIZE, 'CodeSha256': self.CODE_SHA_256, 'RevisionId': self.REVISION_ID}
}
self.LAMBDA_OBJ.handler = self.HANDLER
self.LAMBDA_OBJ.runtime = self.RUNTIME
self.LAMBDA_OBJ.timeout = self.TIMEOUT
self.LAMBDA_OBJ.tags = tags
self.LAMBDA_OBJ.envvars = {}
self.LAMBDA_OBJ.last_modified = self.LAST_MODIFIED
self.LAMBDA_OBJ.role = self.ROLE
self.LAMBDA_OBJ.memory_size = self.MEMORY_SIZE
def test_put_function_event_invoke_config(self):
# creating a lambda function
self._create_function(self.FUNCTION_NAME)
# calling put_function_event_invoke_config
payload = {
'DestinationConfig': {
'OnFailure': {
'Destination': self.DL_QUEUE
}
},
'MaximumEventAgeInSeconds': self.EVENT_AGE,
'MaximumRetryAttempts': self.RETRY_ATTEMPTS
}
response = self.LAMBDA_OBJ.put_function_event_invoke_config(
payload
)
# checking if response is not None
self.assertIsNotNone(response)
# calling get_function_event_invoke_config
response = self.LAMBDA_OBJ.get_function_event_invoke_config()
# verifying set values
self.assertEqual(response['FunctionArn'], self.LAMBDA_OBJ.id)
self.assertEqual(response['MaximumRetryAttempts'], self.RETRY_ATTEMPTS)
self.assertEqual(response['MaximumEventAgeInSeconds'], self.EVENT_AGE)
self.assertEqual(response['DestinationConfig']['OnFailure']['Destination'], self.DL_QUEUE)
| 1 | 10,813 | nit: better rename to `TEST_QUEUE_ARN` or `TEST_EVENT_SOURCE_ARN` | localstack-localstack | py |
@@ -1097,9 +1097,9 @@ func (c *Operator) sync(key string) error {
return err
}
- // If no service monitor selectors are configured, the user wants to
- // manage configuration themselves.
- if p.Spec.ServiceMonitorSelector != nil {
+ // If no service monitor selectors or additional scrape configs are configured,
+ // the user wants to manage configuration themselves.
+ if p.Spec.ServiceMonitorSelector != nil || p.Spec.AdditionalScrapeConfigs != nil {
// We just always regenerate the configuration to be safe.
if err := c.createOrUpdateConfigurationSecret(p, ruleConfigMapNames); err != nil {
return errors.Wrap(err, "creating config failed") | 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"bytes"
"compress/gzip"
"fmt"
"reflect"
"strings"
"time"
monitoring "github.com/coreos/prometheus-operator/pkg/apis/monitoring"
monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
monitoringclient "github.com/coreos/prometheus-operator/pkg/client/versioned"
"github.com/coreos/prometheus-operator/pkg/k8sutil"
"github.com/coreos/prometheus-operator/pkg/listwatch"
"github.com/coreos/prometheus-operator/pkg/operator"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/mitchellh/hashstructure"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
extensionsobj "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
resyncPeriod = 5 * time.Minute
)
// Operator manages life cycle of Prometheus deployments and
// monitoring configurations.
type Operator struct {
kclient kubernetes.Interface
mclient monitoringclient.Interface
crdclient apiextensionsclient.Interface
logger log.Logger
promInf cache.SharedIndexInformer
smonInf cache.SharedIndexInformer
pmonInf cache.SharedIndexInformer
ruleInf cache.SharedIndexInformer
cmapInf cache.SharedIndexInformer
secrInf cache.SharedIndexInformer
ssetInf cache.SharedIndexInformer
nsInf cache.SharedIndexInformer
queue workqueue.RateLimitingInterface
metrics *operator.Metrics
nodeAddressLookupErrors prometheus.Counter
nodeEndpointSyncs prometheus.Counter
nodeEndpointSyncErrors prometheus.Counter
host string
kubeletObjectName string
kubeletObjectNamespace string
kubeletSyncEnabled bool
config Config
configGenerator *configGenerator
}
type Labels struct {
LabelsString string
LabelsMap map[string]string
}
// Implement the flag.Value interface
func (labels *Labels) String() string {
return labels.LabelsString
}
// Merge labels create a new map with labels merged.
func (labels *Labels) Merge(otherLabels map[string]string) map[string]string {
mergedLabels := map[string]string{}
for key, value := range otherLabels {
mergedLabels[key] = value
}
for key, value := range labels.LabelsMap {
mergedLabels[key] = value
}
return mergedLabels
}
// Set implements the flag.Set interface.
func (labels *Labels) Set(value string) error {
m := map[string]string{}
if value != "" {
splited := strings.Split(value, ",")
for _, pair := range splited {
sp := strings.Split(pair, "=")
m[sp[0]] = sp[1]
}
}
(*labels).LabelsMap = m
(*labels).LabelsString = value
return nil
}
// Config defines configuration parameters for the Operator.
type Config struct {
Host string
KubeletObject string
TLSInsecure bool
TLSConfig rest.TLSClientConfig
ConfigReloaderImage string
ConfigReloaderCPU string
ConfigReloaderMemory string
PrometheusConfigReloaderImage string
AlertmanagerDefaultBaseImage string
PrometheusDefaultBaseImage string
ThanosDefaultBaseImage string
Namespaces Namespaces
Labels Labels
CrdKinds monitoringv1.CrdKinds
EnableValidation bool
LocalHost string
LogLevel string
LogFormat string
ManageCRDs bool
PromSelector string
AlertManagerSelector string
ThanosRulerSelector string
}
type Namespaces struct {
// allow list/deny list for common custom resources
AllowList, DenyList []string
// allow list for prometheus/alertmanager custom resources
PrometheusAllowList, AlertmanagerAllowList, ThanosRulerAllowList []string
}
// BasicAuthCredentials represents a username password pair to be used with
// basic http authentication, see https://tools.ietf.org/html/rfc7617.
type BasicAuthCredentials struct {
username string
password string
}
// BearerToken represents a bearer token, see
// https://tools.ietf.org/html/rfc6750.
type BearerToken string
// TLSAsset represents any TLS related opaque string, e.g. CA files, client
// certificates.
type TLSAsset string
// New creates a new controller.
func New(conf Config, logger log.Logger, r prometheus.Registerer) (*Operator, error) {
cfg, err := k8sutil.NewClusterConfig(conf.Host, conf.TLSInsecure, &conf.TLSConfig)
if err != nil {
return nil, errors.Wrap(err, "instantiating cluster config failed")
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
}
crdclient, err := apiextensionsclient.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "instantiating apiextensions client failed")
}
mclient, err := monitoringclient.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "instantiating monitoring client failed")
}
if _, err := labels.Parse(conf.PromSelector); err != nil {
return nil, errors.Wrap(err, "can not parse prometheus selector value")
}
if _, err := labels.Parse(conf.AlertManagerSelector); err != nil {
return nil, errors.Wrap(err, "can not parse alertmanager selector value")
}
kubeletObjectName := ""
kubeletObjectNamespace := ""
kubeletSyncEnabled := false
if conf.KubeletObject != "" {
parts := strings.Split(conf.KubeletObject, "/")
if len(parts) != 2 {
return nil, fmt.Errorf("malformatted kubelet object string, must be in format \"namespace/name\"")
}
kubeletObjectNamespace = parts[0]
kubeletObjectName = parts[1]
kubeletSyncEnabled = true
}
c := &Operator{
kclient: client,
mclient: mclient,
crdclient: crdclient,
logger: logger,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "prometheus"),
host: cfg.Host,
kubeletObjectName: kubeletObjectName,
kubeletObjectNamespace: kubeletObjectNamespace,
kubeletSyncEnabled: kubeletSyncEnabled,
config: conf,
configGenerator: newConfigGenerator(logger),
metrics: operator.NewMetrics("prometheus", r),
nodeAddressLookupErrors: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_operator_node_address_lookup_errors_total",
Help: "Number of times a node IP address could not be determined",
}),
nodeEndpointSyncs: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_operator_node_syncs_total",
Help: "Number of node endpoints synchronisations",
}),
nodeEndpointSyncErrors: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_operator_node_syncs_failed_total",
Help: "Number of node endpoints synchronisation failures",
}),
}
c.metrics.MustRegister(c.nodeAddressLookupErrors, c.nodeEndpointSyncs, c.nodeEndpointSyncErrors)
c.promInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = c.config.PromSelector
return mclient.MonitoringV1().Prometheuses(namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = c.config.PromSelector
return mclient.MonitoringV1().Prometheuses(namespace).Watch(options)
},
}
}),
),
&monitoringv1.Prometheus{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.metrics.MustRegister(NewPrometheusCollector(c.promInf.GetStore()))
c.smonInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return mclient.MonitoringV1().ServiceMonitors(namespace).List(options)
},
WatchFunc: mclient.MonitoringV1().ServiceMonitors(namespace).Watch,
}
}),
),
&monitoringv1.ServiceMonitor{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.pmonInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return mclient.MonitoringV1().PodMonitors(namespace).List(options)
},
WatchFunc: mclient.MonitoringV1().PodMonitors(namespace).Watch,
}
}),
),
&monitoringv1.PodMonitor{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.ruleInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return mclient.MonitoringV1().PrometheusRules(namespace).List(options)
},
WatchFunc: mclient.MonitoringV1().PrometheusRules(namespace).Watch,
}
}),
),
&monitoringv1.PrometheusRule{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.cmapInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelPrometheusName
return c.kclient.CoreV1().ConfigMaps(namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelPrometheusName
return c.kclient.CoreV1().ConfigMaps(namespace).Watch(options)
},
}
}),
),
&v1.ConfigMap{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.secrInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return cache.NewListWatchFromClient(c.kclient.CoreV1().RESTClient(), "secrets", namespace, fields.Everything())
}),
),
&v1.Secret{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.ssetInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return cache.NewListWatchFromClient(c.kclient.AppsV1().RESTClient(), "statefulsets", namespace, fields.Everything())
}),
),
&appsv1.StatefulSet{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
// nsResyncPeriod is used to control how often the namespace informer
// should resync. If the unprivileged ListerWatcher is used, then the
// informer must resync more often because it cannot watch for
// namespace changes.
nsResyncPeriod := 15 * time.Second
// If the only namespace is v1.NamespaceAll, then the client must be
// privileged and a regular cache.ListWatch will be used. In this case
// watching works and we do not need to resync so frequently.
if listwatch.IsAllNamespaces(c.config.Namespaces.AllowList) {
nsResyncPeriod = resyncPeriod
}
c.nsInf = cache.NewSharedIndexInformer(
c.metrics.NewInstrumentedListerWatcher(
listwatch.NewUnprivilegedNamespaceListWatchFromClient(c.logger, c.kclient.CoreV1().RESTClient(), c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, fields.Everything()),
),
&v1.Namespace{}, nsResyncPeriod, cache.Indexers{},
)
return c, nil
}
// waitForCacheSync waits for the informers' caches to be synced.
func (c *Operator) waitForCacheSync(stopc <-chan struct{}) error {
ok := true
informers := []struct {
name string
informer cache.SharedIndexInformer
}{
{"Prometheus", c.promInf},
{"ServiceMonitor", c.smonInf},
{"PodMonitor", c.pmonInf},
{"PrometheusRule", c.ruleInf},
{"ConfigMap", c.cmapInf},
{"Secret", c.secrInf},
{"StatefulSet", c.ssetInf},
{"Namespace", c.nsInf},
}
for _, inf := range informers {
if !cache.WaitForCacheSync(stopc, inf.informer.HasSynced) {
level.Error(c.logger).Log("msg", fmt.Sprintf("failed to sync %s cache", inf.name))
ok = false
} else {
level.Debug(c.logger).Log("msg", fmt.Sprintf("successfully synced %s cache", inf.name))
}
}
if !ok {
return errors.New("failed to sync caches")
}
level.Info(c.logger).Log("msg", "successfully synced all caches")
return nil
}
// addHandlers adds the eventhandlers to the informers.
func (c *Operator) addHandlers() {
c.promInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handlePrometheusAdd,
DeleteFunc: c.handlePrometheusDelete,
UpdateFunc: c.handlePrometheusUpdate,
})
c.smonInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleSmonAdd,
DeleteFunc: c.handleSmonDelete,
UpdateFunc: c.handleSmonUpdate,
})
c.pmonInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handlePmonAdd,
DeleteFunc: c.handlePmonDelete,
UpdateFunc: c.handlePmonUpdate,
})
c.ruleInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleRuleAdd,
DeleteFunc: c.handleRuleDelete,
UpdateFunc: c.handleRuleUpdate,
})
c.cmapInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleConfigMapAdd,
DeleteFunc: c.handleConfigMapDelete,
UpdateFunc: c.handleConfigMapUpdate,
})
c.secrInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleSecretAdd,
DeleteFunc: c.handleSecretDelete,
UpdateFunc: c.handleSecretUpdate,
})
c.ssetInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.handleStatefulSetAdd,
DeleteFunc: c.handleStatefulSetDelete,
UpdateFunc: c.handleStatefulSetUpdate,
})
}
// Run the controller.
func (c *Operator) Run(stopc <-chan struct{}) error {
defer c.queue.ShutDown()
errChan := make(chan error)
go func() {
v, err := c.kclient.Discovery().ServerVersion()
if err != nil {
errChan <- errors.Wrap(err, "communicating with server failed")
return
}
level.Info(c.logger).Log("msg", "connection established", "cluster-version", v)
if c.config.ManageCRDs {
if err := c.createCRDs(); err != nil {
errChan <- errors.Wrap(err, "creating CRDs failed")
return
}
}
errChan <- nil
}()
select {
case err := <-errChan:
if err != nil {
return err
}
level.Info(c.logger).Log("msg", "CRD API endpoints ready")
case <-stopc:
return nil
}
go c.worker()
go c.promInf.Run(stopc)
go c.smonInf.Run(stopc)
go c.pmonInf.Run(stopc)
go c.ruleInf.Run(stopc)
go c.cmapInf.Run(stopc)
go c.secrInf.Run(stopc)
go c.ssetInf.Run(stopc)
go c.nsInf.Run(stopc)
if err := c.waitForCacheSync(stopc); err != nil {
return err
}
c.addHandlers()
if c.kubeletSyncEnabled {
go c.reconcileNodeEndpoints(stopc)
}
<-stopc
return nil
}
func (c *Operator) keyFunc(obj interface{}) (string, bool) {
k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
level.Error(c.logger).Log("msg", "creating key failed", "err", err)
return k, false
}
return k, true
}
func (c *Operator) handlePrometheusAdd(obj interface{}) {
key, ok := c.keyFunc(obj)
if !ok {
return
}
level.Debug(c.logger).Log("msg", "Prometheus added", "key", key)
c.metrics.TriggerByCounter(monitoringv1.PrometheusesKind, "add").Inc()
checkPrometheusSpecDeprecation(key, obj.(*monitoringv1.Prometheus), c.logger)
c.enqueue(key)
}
func (c *Operator) handlePrometheusDelete(obj interface{}) {
key, ok := c.keyFunc(obj)
if !ok {
return
}
level.Debug(c.logger).Log("msg", "Prometheus deleted", "key", key)
c.metrics.TriggerByCounter(monitoringv1.PrometheusesKind, "delete").Inc()
c.enqueue(key)
}
func (c *Operator) handlePrometheusUpdate(old, cur interface{}) {
if old.(*monitoringv1.Prometheus).ResourceVersion == cur.(*monitoringv1.Prometheus).ResourceVersion {
return
}
key, ok := c.keyFunc(cur)
if !ok {
return
}
level.Debug(c.logger).Log("msg", "Prometheus updated", "key", key)
c.metrics.TriggerByCounter(monitoringv1.PrometheusesKind, "update").Inc()
checkPrometheusSpecDeprecation(key, cur.(*monitoringv1.Prometheus), c.logger)
c.enqueue(key)
}
func (c *Operator) reconcileNodeEndpoints(stopc <-chan struct{}) {
c.syncNodeEndpointsWithLogError()
ticker := time.NewTicker(3 * time.Minute)
defer ticker.Stop()
for {
select {
case <-stopc:
return
case <-ticker.C:
c.syncNodeEndpointsWithLogError()
}
}
}
// nodeAddresses returns the provided node's address, based on the priority:
// 1. NodeInternalIP
// 2. NodeExternalIP
//
// Copied from github.com/prometheus/prometheus/discovery/kubernetes/node.go
func nodeAddress(node v1.Node) (string, map[v1.NodeAddressType][]string, error) {
m := map[v1.NodeAddressType][]string{}
for _, a := range node.Status.Addresses {
m[a.Type] = append(m[a.Type], a.Address)
}
if addresses, ok := m[v1.NodeInternalIP]; ok {
return addresses[0], m, nil
}
if addresses, ok := m[v1.NodeExternalIP]; ok {
return addresses[0], m, nil
}
return "", m, fmt.Errorf("host address unknown")
}
func getNodeAddresses(nodes *v1.NodeList) ([]v1.EndpointAddress, []error) {
addresses := make([]v1.EndpointAddress, 0)
errs := make([]error, 0)
for _, n := range nodes.Items {
address, _, err := nodeAddress(n)
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to determine hostname for node (%s)", n.Name))
continue
}
addresses = append(addresses, v1.EndpointAddress{
IP: address,
TargetRef: &v1.ObjectReference{
Kind: "Node",
Name: n.Name,
UID: n.UID,
APIVersion: n.APIVersion,
},
})
}
return addresses, errs
}
func (c *Operator) syncNodeEndpointsWithLogError() {
c.nodeEndpointSyncs.Inc()
err := c.syncNodeEndpoints()
if err != nil {
c.nodeEndpointSyncErrors.Inc()
level.Error(c.logger).Log("msg", "syncing nodes into Endpoints object failed", "err", err)
}
}
func (c *Operator) syncNodeEndpoints() error {
eps := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: c.kubeletObjectName,
Labels: c.config.Labels.Merge(map[string]string{
"k8s-app": "kubelet",
}),
},
Subsets: []v1.EndpointSubset{
{
Ports: []v1.EndpointPort{
{
Name: "https-metrics",
Port: 10250,
},
{
Name: "http-metrics",
Port: 10255,
},
{
Name: "cadvisor",
Port: 4194,
},
},
},
},
}
nodes, err := c.kclient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return errors.Wrap(err, "listing nodes failed")
}
addresses, errs := getNodeAddresses(nodes)
if len(errs) > 0 {
for _, err := range errs {
level.Warn(c.logger).Log("err", err)
}
c.nodeAddressLookupErrors.Add(float64(len(errs)))
}
eps.Subsets[0].Addresses = addresses
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: c.kubeletObjectName,
Labels: c.config.Labels.Merge(map[string]string{
"k8s-app": "kubelet",
}),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: "None",
Ports: []v1.ServicePort{
{
Name: "https-metrics",
Port: 10250,
},
{
Name: "http-metrics",
Port: 10255,
},
{
Name: "cadvisor",
Port: 4194,
},
},
},
}
err = k8sutil.CreateOrUpdateService(c.kclient.CoreV1().Services(c.kubeletObjectNamespace), svc)
if err != nil {
return errors.Wrap(err, "synchronizing kubelet service object failed")
}
err = k8sutil.CreateOrUpdateEndpoints(c.kclient.CoreV1().Endpoints(c.kubeletObjectNamespace), eps)
if err != nil {
return errors.Wrap(err, "synchronizing kubelet endpoints object failed")
}
return nil
}
// TODO: Don't enque just for the namespace
func (c *Operator) handleSmonAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "ServiceMonitor added")
c.metrics.TriggerByCounter(monitoringv1.ServiceMonitorsKind, "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handleSmonUpdate(old, cur interface{}) {
if old.(*monitoringv1.ServiceMonitor).ResourceVersion == cur.(*monitoringv1.ServiceMonitor).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "ServiceMonitor updated")
c.metrics.TriggerByCounter(monitoringv1.ServiceMonitorsKind, "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handleSmonDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "ServiceMonitor delete")
c.metrics.TriggerByCounter(monitoringv1.ServiceMonitorsKind, "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handlePmonAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "PodMonitor added")
c.metrics.TriggerByCounter(monitoringv1.PodMonitorsKind, "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handlePmonUpdate(old, cur interface{}) {
if old.(*monitoringv1.PodMonitor).ResourceVersion == cur.(*monitoringv1.PodMonitor).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "PodMonitor updated")
c.metrics.TriggerByCounter(monitoringv1.PodMonitorsKind, "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handlePmonDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "PodMonitor delete")
c.metrics.TriggerByCounter(monitoringv1.PodMonitorsKind, "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handleRuleAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "PrometheusRule added")
c.metrics.TriggerByCounter(monitoringv1.PrometheusRuleKind, "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handleRuleUpdate(old, cur interface{}) {
if old.(*monitoringv1.PrometheusRule).ResourceVersion == cur.(*monitoringv1.PrometheusRule).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "PrometheusRule updated")
c.metrics.TriggerByCounter(monitoringv1.PrometheusRuleKind, "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Don't enque just for the namespace
func (c *Operator) handleRuleDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "PrometheusRule deleted")
c.metrics.TriggerByCounter(monitoringv1.PrometheusRuleKind, "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Do we need to enque secrets just for the namespace or in general?
func (c *Operator) handleSecretDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "Secret deleted")
c.metrics.TriggerByCounter("Secret", "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleSecretUpdate(old, cur interface{}) {
if old.(*v1.Secret).ResourceVersion == cur.(*v1.Secret).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "Secret updated")
c.metrics.TriggerByCounter("Secret", "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleSecretAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "Secret added")
c.metrics.TriggerByCounter("Secret", "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
// TODO: Do we need to enque configmaps just for the namespace or in general?
func (c *Operator) handleConfigMapAdd(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "ConfigMap added")
c.metrics.TriggerByCounter("ConfigMap", "add").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleConfigMapDelete(obj interface{}) {
o, ok := c.getObject(obj)
if ok {
level.Debug(c.logger).Log("msg", "ConfigMap deleted")
c.metrics.TriggerByCounter("ConfigMap", "delete").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) handleConfigMapUpdate(old, cur interface{}) {
if old.(*v1.ConfigMap).ResourceVersion == cur.(*v1.ConfigMap).ResourceVersion {
return
}
o, ok := c.getObject(cur)
if ok {
level.Debug(c.logger).Log("msg", "ConfigMap updated")
c.metrics.TriggerByCounter("ConfigMap", "update").Inc()
c.enqueueForNamespace(o.GetNamespace())
}
}
func (c *Operator) getObject(obj interface{}) (metav1.Object, bool) {
ts, ok := obj.(cache.DeletedFinalStateUnknown)
if ok {
obj = ts.Obj
}
o, err := meta.Accessor(obj)
if err != nil {
level.Error(c.logger).Log("msg", "get object failed", "err", err)
return nil, false
}
return o, true
}
// enqueue adds a key to the queue. If obj is a key already it gets added
// directly. Otherwise, the key is extracted via keyFunc.
func (c *Operator) enqueue(obj interface{}) {
if obj == nil {
return
}
key, ok := obj.(string)
if !ok {
key, ok = c.keyFunc(obj)
if !ok {
return
}
}
c.queue.Add(key)
}
// enqueueForNamespace enqueues all Prometheus object keys that belong to the
// given namespace or select objects in the given namespace.
func (c *Operator) enqueueForNamespace(nsName string) {
nsObject, exists, err := c.nsInf.GetStore().GetByKey(nsName)
if err != nil {
level.Error(c.logger).Log(
"msg", "get namespace to enqueue Prometheus instances failed",
"err", err,
)
return
}
if !exists {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("get namespace to enqueue Prometheus instances failed: namespace %q does not exist", nsName),
)
return
}
ns := nsObject.(*v1.Namespace)
err = cache.ListAll(c.promInf.GetStore(), labels.Everything(), func(obj interface{}) {
// Check for Prometheus instances in the NS.
p := obj.(*monitoringv1.Prometheus)
if p.Namespace == nsName {
c.enqueue(p)
return
}
// Check for Prometheus instances selecting ServiceMonitors in
// the NS.
smNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert ServiceMonitorNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
}
if smNSSelector.Matches(labels.Set(ns.Labels)) {
c.enqueue(p)
return
}
// Check for Prometheus instances selecting PrometheusRules in
// the NS.
ruleNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.RuleNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert RuleNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
}
if ruleNSSelector.Matches(labels.Set(ns.Labels)) {
c.enqueue(p)
return
}
})
if err != nil {
level.Error(c.logger).Log(
"msg", "listing all Prometheus instances from cache failed",
"err", err,
)
}
}
// worker runs a worker thread that just dequeues items, processes them, and
// marks them done. It enforces that the syncHandler is never invoked
// concurrently with the same key.
func (c *Operator) worker() {
for c.processNextWorkItem() {
}
}
func (c *Operator) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.sync(key.(string))
if err == nil {
c.queue.Forget(key)
return true
}
c.metrics.ReconcileErrorsCounter().Inc()
utilruntime.HandleError(errors.Wrap(err, fmt.Sprintf("Sync %q failed", key)))
c.queue.AddRateLimited(key)
return true
}
func (c *Operator) prometheusForStatefulSet(sset interface{}) *monitoringv1.Prometheus {
key, ok := c.keyFunc(sset)
if !ok {
return nil
}
promKey := statefulSetKeyToPrometheusKey(key)
p, exists, err := c.promInf.GetStore().GetByKey(promKey)
if err != nil {
level.Error(c.logger).Log("msg", "Prometheus lookup failed", "err", err)
return nil
}
if !exists {
return nil
}
return p.(*monitoringv1.Prometheus)
}
func prometheusNameFromStatefulSetName(name string) string {
return strings.TrimPrefix(name, "prometheus-")
}
func statefulSetNameFromPrometheusName(name string) string {
return "prometheus-" + name
}
func statefulSetKeyToPrometheusKey(key string) string {
keyParts := strings.Split(key, "/")
return keyParts[0] + "/" + strings.TrimPrefix(keyParts[1], "prometheus-")
}
func prometheusKeyToStatefulSetKey(key string) string {
keyParts := strings.Split(key, "/")
return keyParts[0] + "/prometheus-" + keyParts[1]
}
func (c *Operator) handleStatefulSetDelete(obj interface{}) {
if ps := c.prometheusForStatefulSet(obj); ps != nil {
level.Debug(c.logger).Log("msg", "StatefulSet delete")
c.metrics.TriggerByCounter("StatefulSet", "delete").Inc()
c.enqueue(ps)
}
}
func (c *Operator) handleStatefulSetAdd(obj interface{}) {
if ps := c.prometheusForStatefulSet(obj); ps != nil {
level.Debug(c.logger).Log("msg", "StatefulSet added")
c.metrics.TriggerByCounter("StatefulSet", "add").Inc()
c.enqueue(ps)
}
}
func (c *Operator) handleStatefulSetUpdate(oldo, curo interface{}) {
old := oldo.(*appsv1.StatefulSet)
cur := curo.(*appsv1.StatefulSet)
level.Debug(c.logger).Log("msg", "update handler", "old", old.ResourceVersion, "cur", cur.ResourceVersion)
// Periodic resync may resend the StatefulSet without changes
// in-between. Also breaks loops created by updating the resource
// ourselves.
if old.ResourceVersion == cur.ResourceVersion {
return
}
if ps := c.prometheusForStatefulSet(cur); ps != nil {
level.Debug(c.logger).Log("msg", "StatefulSet updated")
c.metrics.TriggerByCounter("StatefulSet", "update").Inc()
c.enqueue(ps)
}
}
func (c *Operator) sync(key string) error {
obj, exists, err := c.promInf.GetIndexer().GetByKey(key)
if err != nil {
return err
}
if !exists {
// Dependent resources are cleaned up by K8s via OwnerReferences
return nil
}
p := obj.(*monitoringv1.Prometheus)
p = p.DeepCopy()
p.APIVersion = monitoringv1.SchemeGroupVersion.String()
p.Kind = monitoringv1.PrometheusesKind
if p.Spec.Paused {
return nil
}
level.Info(c.logger).Log("msg", "sync prometheus", "key", key)
ruleConfigMapNames, err := c.createOrUpdateRuleConfigMaps(p)
if err != nil {
return err
}
// If no service monitor selectors are configured, the user wants to
// manage configuration themselves.
if p.Spec.ServiceMonitorSelector != nil {
// We just always regenerate the configuration to be safe.
if err := c.createOrUpdateConfigurationSecret(p, ruleConfigMapNames); err != nil {
return errors.Wrap(err, "creating config failed")
}
}
// Create empty Secret if it doesn't exist. See comment above.
s, err := makeEmptyConfigurationSecret(p, c.config)
if err != nil {
return errors.Wrap(err, "generating empty config secret failed")
}
sClient := c.kclient.CoreV1().Secrets(p.Namespace)
_, err = sClient.Get(s.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
if _, err := c.kclient.CoreV1().Secrets(p.Namespace).Create(s); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrap(err, "creating empty config file failed")
}
}
if !apierrors.IsNotFound(err) && err != nil {
return err
}
if err := c.createOrUpdateTLSAssetSecret(p); err != nil {
return errors.Wrap(err, "creating tls asset secret failed")
}
// Create governing service if it doesn't exist.
svcClient := c.kclient.CoreV1().Services(p.Namespace)
if err := k8sutil.CreateOrUpdateService(svcClient, makeStatefulSetService(p, c.config)); err != nil {
return errors.Wrap(err, "synchronizing governing service failed")
}
ssetClient := c.kclient.AppsV1().StatefulSets(p.Namespace)
// Ensure we have a StatefulSet running Prometheus deployed.
obj, exists, err = c.ssetInf.GetIndexer().GetByKey(prometheusKeyToStatefulSetKey(key))
if err != nil {
return errors.Wrap(err, "retrieving statefulset failed")
}
spec := appsv1.StatefulSetSpec{}
if obj != nil {
ss := obj.(*appsv1.StatefulSet)
spec = ss.Spec
}
newSSetInputHash, err := createSSetInputHash(*p, c.config, ruleConfigMapNames, spec)
if err != nil {
return err
}
sset, err := makeStatefulSet(*p, &c.config, ruleConfigMapNames, newSSetInputHash)
if err != nil {
return errors.Wrap(err, "making statefulset failed")
}
operator.SanitizeSTS(sset)
if !exists {
level.Debug(c.logger).Log("msg", "no current Prometheus statefulset found")
level.Debug(c.logger).Log("msg", "creating Prometheus statefulset")
if _, err := ssetClient.Create(sset); err != nil {
return errors.Wrap(err, "creating statefulset failed")
}
return nil
}
oldSSetInputHash := obj.(*appsv1.StatefulSet).ObjectMeta.Annotations[sSetInputHashName]
if newSSetInputHash == oldSSetInputHash {
level.Debug(c.logger).Log("msg", "new statefulset generation inputs match current, skipping any actions")
return nil
}
level.Debug(c.logger).Log("msg", "updating current Prometheus statefulset")
_, err = ssetClient.Update(sset)
sErr, ok := err.(*apierrors.StatusError)
if ok && sErr.ErrStatus.Code == 422 && sErr.ErrStatus.Reason == metav1.StatusReasonInvalid {
c.metrics.StsDeleteCreateCounter().Inc()
level.Info(c.logger).Log("msg", "resolving illegal update of Prometheus StatefulSet", "details", sErr.ErrStatus.Details)
propagationPolicy := metav1.DeletePropagationForeground
if err := ssetClient.Delete(sset.GetName(), &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action")
}
return nil
}
if err != nil {
return errors.Wrap(err, "updating StatefulSet failed")
}
return nil
}
//checkPrometheusSpecDeprecation checks for deprecated fields in the prometheus spec and logs a warning if applicable
func checkPrometheusSpecDeprecation(key string, p *monitoringv1.Prometheus, logger log.Logger) {
deprecationWarningf := "prometheus key=%v, field %v is deprecated, '%v' field should be used instead"
if p.Spec.BaseImage != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.baseImage", "spec.image"))
}
if p.Spec.Tag != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.tag", "spec.image"))
}
if p.Spec.SHA != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.sha", "spec.image"))
}
if p.Spec.Thanos != nil {
if p.Spec.BaseImage != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.thanos.baseImage", "spec.thanos.image"))
}
if p.Spec.Tag != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.thanos.tag", "spec.thanos.image"))
}
if p.Spec.SHA != "" {
level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.thanos.sha", "spec.thanos.image"))
}
}
}
func createSSetInputHash(p monitoringv1.Prometheus, c Config, ruleConfigMapNames []string, ss interface{}) (string, error) {
hash, err := hashstructure.Hash(struct {
P monitoringv1.Prometheus
C Config
S interface{}
R []string `hash:"set"`
}{p, c, ss, ruleConfigMapNames},
nil,
)
if err != nil {
return "", errors.Wrap(
err,
"failed to calculate combined hash of Prometheus StatefulSet, Prometheus CRD, config and"+
" rule ConfigMap names",
)
}
return fmt.Sprintf("%d", hash), nil
}
func ListOptions(name string) metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: fields.SelectorFromSet(fields.Set(map[string]string{
"app": "prometheus",
"prometheus": name,
})).String(),
}
}
// PrometheusStatus evaluates the current status of a Prometheus deployment with
// respect to its specified resource object. It return the status and a list of
// pods that are not updated.
func PrometheusStatus(kclient kubernetes.Interface, p *monitoringv1.Prometheus) (*monitoringv1.PrometheusStatus, []v1.Pod, error) {
res := &monitoringv1.PrometheusStatus{Paused: p.Spec.Paused}
pods, err := kclient.CoreV1().Pods(p.Namespace).List(ListOptions(p.Name))
if err != nil {
return nil, nil, errors.Wrap(err, "retrieving pods of failed")
}
sset, err := kclient.AppsV1().StatefulSets(p.Namespace).Get(statefulSetNameFromPrometheusName(p.Name), metav1.GetOptions{})
if err != nil {
return nil, nil, errors.Wrap(err, "retrieving stateful set failed")
}
res.Replicas = int32(len(pods.Items))
var oldPods []v1.Pod
for _, pod := range pods.Items {
ready, err := k8sutil.PodRunningAndReady(pod)
if err != nil {
return nil, nil, errors.Wrap(err, "cannot determine pod ready state")
}
if ready {
res.AvailableReplicas++
// TODO(fabxc): detect other fields of the pod template
// that are mutable.
if needsUpdate(&pod, sset.Spec.Template) {
oldPods = append(oldPods, pod)
} else {
res.UpdatedReplicas++
}
continue
}
res.UnavailableReplicas++
}
return res, oldPods, nil
}
// needsUpdate checks whether the given pod conforms with the pod template spec
// for various attributes that are influenced by the Prometheus CRD settings.
func needsUpdate(pod *v1.Pod, tmpl v1.PodTemplateSpec) bool {
c1 := pod.Spec.Containers[0]
c2 := tmpl.Spec.Containers[0]
if c1.Image != c2.Image {
return true
}
if !reflect.DeepEqual(c1.Args, c2.Args) {
return true
}
return false
}
func (c *Operator) loadAdditionalScrapeConfigsSecret(additionalScrapeConfigs *v1.SecretKeySelector, s *v1.SecretList) ([]byte, error) {
if additionalScrapeConfigs != nil {
for _, secret := range s.Items {
if secret.Name == additionalScrapeConfigs.Name {
if c, ok := secret.Data[additionalScrapeConfigs.Key]; ok {
return c, nil
}
return nil, fmt.Errorf("key %v could not be found in Secret %v", additionalScrapeConfigs.Key, additionalScrapeConfigs.Name)
}
}
if additionalScrapeConfigs.Optional == nil || !*additionalScrapeConfigs.Optional {
return nil, fmt.Errorf("secret %v could not be found", additionalScrapeConfigs.Name)
}
level.Debug(c.logger).Log("msg", fmt.Sprintf("secret %v could not be found", additionalScrapeConfigs.Name))
}
return nil, nil
}
func extractCredKey(secret *v1.Secret, sel v1.SecretKeySelector, cred string) (string, error) {
if s, ok := secret.Data[sel.Key]; ok {
return string(s), nil
}
return "", fmt.Errorf("secret %s key %q in secret %q not found", cred, sel.Key, sel.Name)
}
func getCredFromSecret(c corev1client.SecretInterface, sel v1.SecretKeySelector, cred string, cacheKey string, cache map[string]*v1.Secret) (_ string, err error) {
var s *v1.Secret
var ok bool
if s, ok = cache[cacheKey]; !ok {
if s, err = c.Get(sel.Name, metav1.GetOptions{}); err != nil {
return "", fmt.Errorf("unable to fetch %s secret %q: %s", cred, sel.Name, err)
}
cache[cacheKey] = s
}
return extractCredKey(s, sel, cred)
}
func getCredFromConfigMap(c corev1client.ConfigMapInterface, sel v1.ConfigMapKeySelector, cred string, cacheKey string, cache map[string]*v1.ConfigMap) (_ string, err error) {
var s *v1.ConfigMap
var ok bool
if s, ok = cache[cacheKey]; !ok {
if s, err = c.Get(sel.Name, metav1.GetOptions{}); err != nil {
return "", fmt.Errorf("unable to fetch %s configmap %q: %s", cred, sel.Name, err)
}
cache[cacheKey] = s
}
if a, ok := s.Data[sel.Key]; ok {
return string(a), nil
}
return "", fmt.Errorf("config %s key %q in configmap %q not found", cred, sel.Key, sel.Name)
}
func loadBasicAuthSecretFromAPI(basicAuth *monitoringv1.BasicAuth, c corev1client.CoreV1Interface, ns string, cache map[string]*v1.Secret) (BasicAuthCredentials, error) {
var username string
var password string
var err error
sClient := c.Secrets(ns)
if username, err = getCredFromSecret(sClient, basicAuth.Username, "username", ns+"/"+basicAuth.Username.Name, cache); err != nil {
return BasicAuthCredentials{}, err
}
if password, err = getCredFromSecret(sClient, basicAuth.Password, "password", ns+"/"+basicAuth.Password.Name, cache); err != nil {
return BasicAuthCredentials{}, err
}
return BasicAuthCredentials{username: username, password: password}, nil
}
func loadBasicAuthSecret(basicAuth *monitoringv1.BasicAuth, s *v1.SecretList) (BasicAuthCredentials, error) {
var username string
var password string
var err error
for _, secret := range s.Items {
if secret.Name == basicAuth.Username.Name {
if username, err = extractCredKey(&secret, basicAuth.Username, "username"); err != nil {
return BasicAuthCredentials{}, err
}
}
if secret.Name == basicAuth.Password.Name {
if password, err = extractCredKey(&secret, basicAuth.Password, "password"); err != nil {
return BasicAuthCredentials{}, err
}
}
if username != "" && password != "" {
break
}
}
if username == "" && password == "" {
return BasicAuthCredentials{}, fmt.Errorf("basic auth username and password secret not found")
}
return BasicAuthCredentials{username: username, password: password}, nil
}
func gzipConfig(buf *bytes.Buffer, conf []byte) error {
w := gzip.NewWriter(buf)
defer w.Close()
if _, err := w.Write(conf); err != nil {
return err
}
return nil
}
func (c *Operator) loadBasicAuthSecrets(
mons map[string]*monitoringv1.ServiceMonitor,
remoteReads []monitoringv1.RemoteReadSpec,
remoteWrites []monitoringv1.RemoteWriteSpec,
apiserverConfig *monitoringv1.APIServerConfig,
SecretsInPromNS *v1.SecretList,
) (map[string]BasicAuthCredentials, error) {
secrets := map[string]BasicAuthCredentials{}
nsSecretCache := make(map[string]*v1.Secret)
for _, mon := range mons {
for i, ep := range mon.Spec.Endpoints {
if ep.BasicAuth != nil {
credentials, err := loadBasicAuthSecretFromAPI(ep.BasicAuth, c.kclient.CoreV1(), mon.Namespace, nsSecretCache)
if err != nil {
return nil, fmt.Errorf("could not generate basicAuth for servicemonitor %s. %s", mon.Name, err)
}
secrets[fmt.Sprintf("serviceMonitor/%s/%s/%d", mon.Namespace, mon.Name, i)] = credentials
}
}
}
for i, remote := range remoteReads {
if remote.BasicAuth != nil {
credentials, err := loadBasicAuthSecret(remote.BasicAuth, SecretsInPromNS)
if err != nil {
return nil, fmt.Errorf("could not generate basicAuth for remote_read config %d. %s", i, err)
}
secrets[fmt.Sprintf("remoteRead/%d", i)] = credentials
}
}
for i, remote := range remoteWrites {
if remote.BasicAuth != nil {
credentials, err := loadBasicAuthSecret(remote.BasicAuth, SecretsInPromNS)
if err != nil {
return nil, fmt.Errorf("could not generate basicAuth for remote_write config %d. %s", i, err)
}
secrets[fmt.Sprintf("remoteWrite/%d", i)] = credentials
}
}
// load apiserver basic auth secret
if apiserverConfig != nil && apiserverConfig.BasicAuth != nil {
credentials, err := loadBasicAuthSecret(apiserverConfig.BasicAuth, SecretsInPromNS)
if err != nil {
return nil, fmt.Errorf("could not generate basicAuth for apiserver config. %s", err)
}
secrets["apiserver"] = credentials
}
return secrets, nil
}
func (c *Operator) loadBearerTokensFromSecrets(mons map[string]*monitoringv1.ServiceMonitor) (map[string]BearerToken, error) {
tokens := map[string]BearerToken{}
nsSecretCache := make(map[string]*v1.Secret)
for _, mon := range mons {
for i, ep := range mon.Spec.Endpoints {
if ep.BearerTokenSecret.Name == "" {
continue
}
sClient := c.kclient.CoreV1().Secrets(mon.Namespace)
token, err := getCredFromSecret(
sClient,
ep.BearerTokenSecret,
"bearertoken",
mon.Namespace+"/"+ep.BearerTokenSecret.Name,
nsSecretCache,
)
if err != nil {
return nil, fmt.Errorf(
"failed to extract endpoint bearertoken for servicemonitor %v from secret %v in namespace %v",
mon.Name, ep.BearerTokenSecret.Name, mon.Namespace,
)
}
tokens[fmt.Sprintf("serviceMonitor/%s/%s/%d", mon.Namespace, mon.Name, i)] = BearerToken(token)
}
}
return tokens, nil
}
func (c *Operator) loadTLSAssets(mons map[string]*monitoringv1.ServiceMonitor) (map[string]TLSAsset, error) {
assets := map[string]TLSAsset{}
nsSecretCache := make(map[string]*v1.Secret)
nsConfigMapCache := make(map[string]*v1.ConfigMap)
for _, mon := range mons {
for _, ep := range mon.Spec.Endpoints {
if ep.TLSConfig == nil {
continue
}
prefix := mon.Namespace + "/"
secretSelectors := map[string]*v1.SecretKeySelector{}
configMapSelectors := map[string]*v1.ConfigMapKeySelector{}
if ep.TLSConfig.CA != (monitoringv1.SecretOrConfigMap{}) {
switch {
case ep.TLSConfig.CA.Secret != nil:
secretSelectors[prefix+ep.TLSConfig.CA.Secret.Name+"/"+ep.TLSConfig.CA.Secret.Key] = ep.TLSConfig.CA.Secret
case ep.TLSConfig.CA.ConfigMap != nil:
configMapSelectors[prefix+ep.TLSConfig.CA.ConfigMap.Name+"/"+ep.TLSConfig.CA.ConfigMap.Key] = ep.TLSConfig.CA.ConfigMap
}
}
if ep.TLSConfig.Cert != (monitoringv1.SecretOrConfigMap{}) {
switch {
case ep.TLSConfig.Cert.Secret != nil:
secretSelectors[prefix+ep.TLSConfig.Cert.Secret.Name+"/"+ep.TLSConfig.Cert.Secret.Key] = ep.TLSConfig.Cert.Secret
case ep.TLSConfig.Cert.ConfigMap != nil:
configMapSelectors[prefix+ep.TLSConfig.Cert.ConfigMap.Name+"/"+ep.TLSConfig.Cert.ConfigMap.Key] = ep.TLSConfig.Cert.ConfigMap
}
}
if ep.TLSConfig.KeySecret != nil {
secretSelectors[prefix+ep.TLSConfig.KeySecret.Name+"/"+ep.TLSConfig.KeySecret.Key] = ep.TLSConfig.KeySecret
}
for key, selector := range secretSelectors {
sClient := c.kclient.CoreV1().Secrets(mon.Namespace)
asset, err := getCredFromSecret(
sClient,
*selector,
"tls config",
key,
nsSecretCache,
)
if err != nil {
return nil, fmt.Errorf(
"failed to extract endpoint tls asset for servicemonitor %v from secret %v and key %v in namespace %v",
mon.Name, selector.Name, selector.Key, mon.Namespace,
)
}
assets[fmt.Sprintf(
"%v_%v_%v",
mon.Namespace,
selector.Name,
selector.Key,
)] = TLSAsset(asset)
}
for key, selector := range configMapSelectors {
sClient := c.kclient.CoreV1().ConfigMaps(mon.Namespace)
asset, err := getCredFromConfigMap(
sClient,
*selector,
"tls config",
key,
nsConfigMapCache,
)
if err != nil {
return nil, fmt.Errorf(
"failed to extract endpoint tls asset for servicemonitor %v from configmap %v and key %v in namespace %v",
mon.Name, selector.Name, selector.Key, mon.Namespace,
)
}
assets[fmt.Sprintf(
"%v_%v_%v",
mon.Namespace,
selector.Name,
selector.Key,
)] = TLSAsset(asset)
}
}
}
return assets, nil
}
func (c *Operator) createOrUpdateConfigurationSecret(p *monitoringv1.Prometheus, ruleConfigMapNames []string) error {
smons, err := c.selectServiceMonitors(p)
if err != nil {
return errors.Wrap(err, "selecting ServiceMonitors failed")
}
pmons, err := c.selectPodMonitors(p)
if err != nil {
return errors.Wrap(err, "selecting PodMonitors failed")
}
sClient := c.kclient.CoreV1().Secrets(p.Namespace)
SecretsInPromNS, err := sClient.List(metav1.ListOptions{})
if err != nil {
return err
}
basicAuthSecrets, err := c.loadBasicAuthSecrets(smons, p.Spec.RemoteRead, p.Spec.RemoteWrite, p.Spec.APIServerConfig, SecretsInPromNS)
if err != nil {
return err
}
bearerTokens, err := c.loadBearerTokensFromSecrets(smons)
if err != nil {
return err
}
additionalScrapeConfigs, err := c.loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalScrapeConfigs, SecretsInPromNS)
if err != nil {
return errors.Wrap(err, "loading additional scrape configs from Secret failed")
}
additionalAlertRelabelConfigs, err := c.loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalAlertRelabelConfigs, SecretsInPromNS)
if err != nil {
return errors.Wrap(err, "loading additional alert relabel configs from Secret failed")
}
additionalAlertManagerConfigs, err := c.loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalAlertManagerConfigs, SecretsInPromNS)
if err != nil {
return errors.Wrap(err, "loading additional alert manager configs from Secret failed")
}
// Update secret based on the most recent configuration.
conf, err := c.configGenerator.generateConfig(
p,
smons,
pmons,
basicAuthSecrets,
bearerTokens,
additionalScrapeConfigs,
additionalAlertRelabelConfigs,
additionalAlertManagerConfigs,
ruleConfigMapNames,
)
if err != nil {
return errors.Wrap(err, "generating config failed")
}
s := makeConfigSecret(p, c.config)
s.ObjectMeta.Annotations = map[string]string{
"generated": "true",
}
// Compress config to avoid 1mb secret limit for a while
var buf bytes.Buffer
if err = gzipConfig(&buf, conf); err != nil {
return errors.Wrap(err, "couldnt gzip config")
}
s.Data[configFilename] = buf.Bytes()
curSecret, err := sClient.Get(s.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
level.Debug(c.logger).Log("msg", "creating configuration")
_, err = sClient.Create(s)
return err
}
var (
generatedConf = s.Data[configFilename]
curConfig, curConfigFound = curSecret.Data[configFilename]
)
if curConfigFound {
if bytes.Equal(curConfig, generatedConf) {
level.Debug(c.logger).Log("msg", "updating Prometheus configuration secret skipped, no configuration change")
return nil
}
level.Debug(c.logger).Log("msg", "current Prometheus configuration has changed")
} else {
level.Debug(c.logger).Log("msg", "no current Prometheus configuration secret found", "currentConfigFound", curConfigFound)
}
level.Debug(c.logger).Log("msg", "updating Prometheus configuration secret")
_, err = sClient.Update(s)
return err
}
func (c *Operator) createOrUpdateTLSAssetSecret(p *monitoringv1.Prometheus) error {
boolTrue := true
sClient := c.kclient.CoreV1().Secrets(p.Namespace)
smons, err := c.selectServiceMonitors(p)
if err != nil {
return errors.Wrap(err, "selecting ServiceMonitors failed")
}
tlsAssets, err := c.loadTLSAssets(smons)
if err != nil {
return err
}
tlsAssetsSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: tlsAssetsSecretName(p.Name),
Labels: c.config.Labels.Merge(managedByOperatorLabels),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: p.APIVersion,
BlockOwnerDeletion: &boolTrue,
Controller: &boolTrue,
Kind: p.Kind,
Name: p.Name,
UID: p.UID,
},
},
},
Data: map[string][]byte{},
}
for key, asset := range tlsAssets {
tlsAssetsSecret.Data[key] = []byte(asset)
}
_, err = sClient.Get(tlsAssetsSecret.Name, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrapf(
err,
"failed to check whether tls assets secret already exists for Prometheus %v in namespace %v",
p.Name,
p.Namespace,
)
}
_, err = sClient.Create(tlsAssetsSecret)
level.Debug(c.logger).Log("msg", "created tlsAssetsSecret", "secretname", tlsAssetsSecret.Name)
} else {
_, err = sClient.Update(tlsAssetsSecret)
level.Debug(c.logger).Log("msg", "updated tlsAssetsSecret", "secretname", tlsAssetsSecret.Name)
}
if err != nil {
return errors.Wrapf(err, "failed to create TLS assets secret for Prometheus %v in namespace %v", p.Name, p.Namespace)
}
return nil
}
func (c *Operator) selectServiceMonitors(p *monitoringv1.Prometheus) (map[string]*monitoringv1.ServiceMonitor, error) {
namespaces := []string{}
// Selectors (<namespace>/<name>) might overlap. Deduplicate them along the keyFunc.
res := make(map[string]*monitoringv1.ServiceMonitor)
servMonSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorSelector)
if err != nil {
return nil, err
}
// If 'ServiceMonitorNamespaceSelector' is nil only check own namespace.
if p.Spec.ServiceMonitorNamespaceSelector == nil {
namespaces = append(namespaces, p.Namespace)
} else {
servMonNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorNamespaceSelector)
if err != nil {
return nil, err
}
namespaces, err = c.listMatchingNamespaces(servMonNSSelector)
if err != nil {
return nil, err
}
}
level.Debug(c.logger).Log("msg", "filtering namespaces to select ServiceMonitors from", "namespaces", strings.Join(namespaces, ","), "namespace", p.Namespace, "prometheus", p.Name)
for _, ns := range namespaces {
cache.ListAllByNamespace(c.smonInf.GetIndexer(), ns, servMonSelector, func(obj interface{}) {
k, ok := c.keyFunc(obj)
if ok {
res[k] = obj.(*monitoringv1.ServiceMonitor)
}
})
}
// If denied by Prometheus spec, filter out all service monitors that access
// the file system.
if p.Spec.ArbitraryFSAccessThroughSMs.Deny {
for namespaceAndName, sm := range res {
for _, endpoint := range sm.Spec.Endpoints {
if err := testForArbitraryFSAccess(endpoint); err != nil {
delete(res, namespaceAndName)
level.Warn(c.logger).Log(
"msg", "skipping servicemonitor",
"error", err.Error(),
"servicemonitor", namespaceAndName,
"namespace", p.Namespace,
"prometheus", p.Name,
)
}
}
}
}
serviceMonitors := []string{}
for k := range res {
serviceMonitors = append(serviceMonitors, k)
}
level.Debug(c.logger).Log("msg", "selected ServiceMonitors", "servicemonitors", strings.Join(serviceMonitors, ","), "namespace", p.Namespace, "prometheus", p.Name)
return res, nil
}
func (c *Operator) selectPodMonitors(p *monitoringv1.Prometheus) (map[string]*monitoringv1.PodMonitor, error) {
namespaces := []string{}
// Selectors might overlap. Deduplicate them along the keyFunc.
res := make(map[string]*monitoringv1.PodMonitor)
podMonSelector, err := metav1.LabelSelectorAsSelector(p.Spec.PodMonitorSelector)
if err != nil {
return nil, err
}
// If 'PodMonitorNamespaceSelector' is nil only check own namespace.
if p.Spec.PodMonitorNamespaceSelector == nil {
namespaces = append(namespaces, p.Namespace)
} else {
podMonNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.PodMonitorNamespaceSelector)
if err != nil {
return nil, err
}
namespaces, err = c.listMatchingNamespaces(podMonNSSelector)
if err != nil {
return nil, err
}
}
level.Debug(c.logger).Log("msg", "filtering namespaces to select PodMonitors from", "namespaces", strings.Join(namespaces, ","), "namespace", p.Namespace, "prometheus", p.Name)
podMonitors := []string{}
for _, ns := range namespaces {
cache.ListAllByNamespace(c.pmonInf.GetIndexer(), ns, podMonSelector, func(obj interface{}) {
k, ok := c.keyFunc(obj)
if ok {
res[k] = obj.(*monitoringv1.PodMonitor)
podMonitors = append(podMonitors, k)
}
})
}
level.Debug(c.logger).Log("msg", "selected PodMonitors", "podmonitors", strings.Join(podMonitors, ","), "namespace", p.Namespace, "prometheus", p.Name)
return res, nil
}
func testForArbitraryFSAccess(e monitoringv1.Endpoint) error {
if e.BearerTokenFile != "" {
return errors.New("it accesses file system via bearer token file which Prometheus specification prohibits")
}
tlsConf := e.TLSConfig
if tlsConf == nil {
return nil
}
if err := e.TLSConfig.Validate(); err != nil {
return err
}
if tlsConf.CAFile != "" || tlsConf.CertFile != "" || tlsConf.KeyFile != "" {
return errors.New("it accesses file system via tls config which Prometheus specification prohibits")
}
return nil
}
// listMatchingNamespaces lists all the namespaces that match the provided
// selector.
func (c *Operator) listMatchingNamespaces(selector labels.Selector) ([]string, error) {
var ns []string
err := cache.ListAll(c.nsInf.GetStore(), selector, func(obj interface{}) {
ns = append(ns, obj.(*v1.Namespace).Name)
})
if err != nil {
return nil, errors.Wrap(err, "failed to list namespaces")
}
return ns, nil
}
func (c *Operator) createCRDs() error {
crds := []*extensionsobj.CustomResourceDefinition{
k8sutil.NewCustomResourceDefinition(c.config.CrdKinds.Prometheus, monitoring.GroupName, c.config.Labels.LabelsMap, c.config.EnableValidation),
k8sutil.NewCustomResourceDefinition(c.config.CrdKinds.ServiceMonitor, monitoring.GroupName, c.config.Labels.LabelsMap, c.config.EnableValidation),
k8sutil.NewCustomResourceDefinition(c.config.CrdKinds.PodMonitor, monitoring.GroupName, c.config.Labels.LabelsMap, c.config.EnableValidation),
k8sutil.NewCustomResourceDefinition(c.config.CrdKinds.PrometheusRule, monitoring.GroupName, c.config.Labels.LabelsMap, c.config.EnableValidation),
}
crdClient := c.crdclient.ApiextensionsV1beta1().CustomResourceDefinitions()
for _, crd := range crds {
oldCRD, err := crdClient.Get(crd.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "getting CRD: %s", crd.Spec.Names.Kind)
}
if apierrors.IsNotFound(err) {
if _, err := crdClient.Create(crd); err != nil {
return errors.Wrapf(err, "creating CRD: %s", crd.Spec.Names.Kind)
}
level.Info(c.logger).Log("msg", "CRD created", "crd", crd.Spec.Names.Kind)
}
if err == nil {
crd.ResourceVersion = oldCRD.ResourceVersion
if _, err := crdClient.Update(crd); err != nil {
return errors.Wrapf(err, "creating CRD: %s", crd.Spec.Names.Kind)
}
level.Info(c.logger).Log("msg", "CRD updated", "crd", crd.Spec.Names.Kind)
}
}
crdListFuncs := []struct {
name string
listFunc func(opts metav1.ListOptions) (runtime.Object, error)
}{
{
monitoringv1.PrometheusesKind,
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.PrometheusAllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return c.mclient.MonitoringV1().Prometheuses(namespace).List(options)
},
}
}).List,
},
{
monitoringv1.ServiceMonitorsKind,
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return c.mclient.MonitoringV1().ServiceMonitors(namespace).List(options)
},
}
}).List,
},
{
monitoringv1.PodMonitorsKind,
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return c.mclient.MonitoringV1().PodMonitors(namespace).List(options)
},
}
}).List,
},
{
monitoringv1.PrometheusRuleKind,
listwatch.MultiNamespaceListerWatcher(c.logger, c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, func(namespace string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return c.mclient.MonitoringV1().PrometheusRules(namespace).List(options)
},
}
}).List,
},
}
for _, crdListFunc := range crdListFuncs {
err := k8sutil.WaitForCRDReady(crdListFunc.listFunc)
if err != nil {
return errors.Wrapf(err, "waiting for %v crd failed", crdListFunc.name)
}
}
return nil
}
| 1 | 13,476 | Would this mean that podMonitorSelectors suffer from the same issue if they are the only configuration set? | prometheus-operator-prometheus-operator | go |
@@ -110,6 +110,17 @@ func (c *fakeClient) ReportPipedMeta(ctx context.Context, req *pipedservice.Repo
return &pipedservice.ReportPipedMetaResponse{}, nil
}
+// GetEnvironment finds and returns the environment for the specified ID.
+func (c *fakeClient) GetEnvironment(ctx context.Context, req *pipedservice.GetEnvironmentRequest, opts ...grpc.CallOption) (*pipedservice.GetEnvironmentResponse, error) {
+ c.logger.Info("fake client received GetEnvironment rpc", zap.Any("request", req))
+ return &pipedservice.GetEnvironmentResponse{
+ Environment: &model.Environment{
+ Id: "dev",
+ Name: "dev",
+ },
+ }, nil
+}
+
// ListApplications returns a list of registered applications
// that should be managed by the requested piped.
// Disabled applications should not be included in the response. | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pipedclientfake
import (
"context"
"fmt"
"sync"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/service/pipedservice"
"github.com/pipe-cd/pipe/pkg/model"
)
type fakeClient struct {
applications map[string]*model.Application
deployments map[string]*model.Deployment
mu sync.RWMutex
logger *zap.Logger
}
// NewClient returns a new fakeClient.
func NewClient(logger *zap.Logger) *fakeClient {
var (
projectID = "local-project"
envID = "dev"
pipedID = "local-piped"
apps = make(map[string]*model.Application, 0)
k8sAppNames = map[string]bool{
"analysis-by-http": false,
"analysis-by-log": false,
"analysis-by-metrics": false,
"analysis-with-baseline": false,
"bluegreen": false,
"canary": true,
"helm-local-chart": false,
"helm-remote-chart": false,
"helm-remote-git-chart": false,
"kustomize-local-base": false,
"kustomize-remote-base": false,
"mesh-envoy-bluegreen": false,
"mesh-envoy-canary": false,
"mesh-istio-bluegreen": false,
"mesh-istio-canary": false,
"multi-steps-canary": false,
"simple": false,
"wait-approval": false,
}
)
// Register applications for debug repository.
for name, enable := range k8sAppNames {
app := &model.Application{
Id: projectID + "/" + envID + "/" + name,
Name: name,
EnvId: envID,
PipedId: pipedID,
ProjectId: projectID,
Kind: model.ApplicationKind_KUBERNETES,
CloudProvider: "kubernetes-default",
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "kubernetes/" + name,
},
Disabled: !enable,
}
apps[app.Id] = app
}
return &fakeClient{
applications: apps,
deployments: map[string]*model.Deployment{},
logger: logger.Named("fake-piped-client"),
}
}
// Close closes the connection to server.
func (c *fakeClient) Close() error {
c.logger.Info("fakeClient client is closing")
return nil
}
// Ping is periodically sent to report its realtime status/stats to control-plane.
// The received stats will be pushed to the metrics collector.
func (c *fakeClient) Ping(ctx context.Context, req *pipedservice.PingRequest, opts ...grpc.CallOption) (*pipedservice.PingResponse, error) {
c.logger.Info("fake client received Ping rpc", zap.Any("request", req))
return &pipedservice.PingResponse{}, nil
}
// ReportPipedMeta is sent by piped while starting up to report its metadata
// such as configured cloud providers.
func (c *fakeClient) ReportPipedMeta(ctx context.Context, req *pipedservice.ReportPipedMetaRequest, opts ...grpc.CallOption) (*pipedservice.ReportPipedMetaResponse, error) {
c.logger.Info("fake client received ReportPipedMeta rpc", zap.Any("request", req))
return &pipedservice.ReportPipedMetaResponse{}, nil
}
// ListApplications returns a list of registered applications
// that should be managed by the requested piped.
// Disabled applications should not be included in the response.
// Piped uses this RPC to fetch and sync the application configuration into its local database.
func (c *fakeClient) ListApplications(ctx context.Context, req *pipedservice.ListApplicationsRequest, opts ...grpc.CallOption) (*pipedservice.ListApplicationsResponse, error) {
c.logger.Info("fake client received ListApplications rpc", zap.Any("request", req))
apps := make([]*model.Application, 0, len(c.applications))
for _, app := range c.applications {
if app.Disabled {
continue
}
apps = append(apps, app)
}
return &pipedservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
// ReportApplicationSyncState is used to update the sync status of an application.
func (c *fakeClient) ReportApplicationSyncState(ctx context.Context, req *pipedservice.ReportApplicationSyncStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationSyncStateResponse, error) {
c.logger.Info("fake client received ReportApplicationSyncState rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
app.SyncState = req.State
return &pipedservice.ReportApplicationSyncStateResponse{}, nil
}
// ReportApplicationMostRecentDeployment is used to update the basic information about
// the most recent deployment of a specific application.
func (c *fakeClient) ReportApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.ReportApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationMostRecentDeploymentResponse, error) {
c.logger.Info("fake client received ReportApplicationMostRecentDeployment rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
switch req.Status {
case model.DeploymentStatus_DEPLOYMENT_SUCCESS:
app.MostRecentlySuccessfulDeployment = req.Deployment
case model.DeploymentStatus_DEPLOYMENT_PENDING:
app.MostRecentlyTriggeredDeployment = req.Deployment
}
return &pipedservice.ReportApplicationMostRecentDeploymentResponse{}, nil
}
// GetApplicationMostRecentDeployment returns the most recent deployment of the given application.
func (c *fakeClient) GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error) {
c.logger.Info("fake client received GetApplicationMostRecentDeployment rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
if req.Status == model.DeploymentStatus_DEPLOYMENT_SUCCESS && app.MostRecentlySuccessfulDeployment != nil {
return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlySuccessfulDeployment}, nil
}
if req.Status == model.DeploymentStatus_DEPLOYMENT_PENDING && app.MostRecentlyTriggeredDeployment != nil {
return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlyTriggeredDeployment}, nil
}
return nil, status.Error(codes.NotFound, "")
}
// ListNotCompletedDeployments returns a list of not completed deployments
// which are managed by this piped.
// DeploymentController component uses this RPC to spawns/syncs its local deployment executors.
func (c *fakeClient) ListNotCompletedDeployments(ctx context.Context, req *pipedservice.ListNotCompletedDeploymentsRequest, opts ...grpc.CallOption) (*pipedservice.ListNotCompletedDeploymentsResponse, error) {
c.logger.Info("fake client received ListNotCompletedDeployments rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
deployments := make([]*model.Deployment, 0, len(c.deployments))
for _, d := range c.deployments {
if model.IsCompletedDeployment(d.Status) {
continue
}
deployments = append(deployments, d.Clone())
}
return &pipedservice.ListNotCompletedDeploymentsResponse{
Deployments: deployments,
}, nil
}
// CreateDeployment creates/triggers a new deployment for an application
// that is managed by this piped.
// This will be used by DeploymentTrigger component.
func (c *fakeClient) CreateDeployment(ctx context.Context, req *pipedservice.CreateDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.CreateDeploymentResponse, error) {
c.logger.Info("fake client received CreateDeployment rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.deployments[req.Deployment.Id]; ok {
return nil, status.Error(codes.AlreadyExists, "")
}
c.deployments[req.Deployment.Id] = req.Deployment
return &pipedservice.CreateDeploymentResponse{}, nil
}
// ReportDeploymentPlanned used by piped to update the status
// of a specific deployment to PLANNED.
func (c *fakeClient) ReportDeploymentPlanned(ctx context.Context, req *pipedservice.ReportDeploymentPlannedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentPlannedResponse, error) {
c.logger.Info("fake client received ReportDeploymentPlanned rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
s := model.DeploymentStatus_DEPLOYMENT_PLANNED
if !model.CanUpdateDeploymentStatus(d.Status, s) {
msg := fmt.Sprintf("invalid status, cur = %s, req = %s", d.Status.String(), s.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
if req.Summary != "" {
d.Summary = req.Summary
}
d.Status = s
d.StatusReason = req.StatusReason
d.RunningCommitHash = req.RunningCommitHash
d.Version = req.Version
if len(req.Stages) > 0 {
d.Stages = req.Stages
}
return &pipedservice.ReportDeploymentPlannedResponse{}, nil
}
// ReportDeploymentStatusChanged is used to update the status
// of a specific deployment to RUNNING or ROLLING_BACK.
func (c *fakeClient) ReportDeploymentStatusChanged(ctx context.Context, req *pipedservice.ReportDeploymentStatusChangedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentStatusChangedResponse, error) {
c.logger.Info("fake client received ReportDeploymentStatusChanged rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
if !model.CanUpdateDeploymentStatus(d.Status, req.Status) {
msg := fmt.Sprintf("invalid status, cur = %s, req = %s", d.Status.String(), req.Status.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
d.Status = req.Status
d.StatusReason = req.StatusReason
return &pipedservice.ReportDeploymentStatusChangedResponse{}, nil
}
// ReportDeploymentCompleted used by piped to update the status
// of a specific deployment to SUCCESS | FAILURE | CANCELLED.
func (c *fakeClient) ReportDeploymentCompleted(ctx context.Context, req *pipedservice.ReportDeploymentCompletedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentCompletedResponse, error) {
c.logger.Info("fake client received ReportDeploymentCompleted rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
if !model.IsCompletedDeployment(req.Status) {
msg := fmt.Sprintf("invalid status, expected a completed one but got %s", req.Status.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
if !model.CanUpdateDeploymentStatus(d.Status, req.Status) {
msg := fmt.Sprintf("invalid status, cur = %s, req = %s", d.Status.String(), req.Status.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
d.Status = req.Status
d.StatusReason = req.StatusReason
d.CompletedAt = req.CompletedAt
for _, stage := range d.Stages {
if status, ok := req.StageStatuses[stage.Id]; ok {
stage.Status = status
}
}
return &pipedservice.ReportDeploymentCompletedResponse{}, nil
}
// SaveDeploymentMetadata used by piped to persist the metadata of a specific deployment.
func (c *fakeClient) SaveDeploymentMetadata(ctx context.Context, req *pipedservice.SaveDeploymentMetadataRequest, opts ...grpc.CallOption) (*pipedservice.SaveDeploymentMetadataResponse, error) {
c.logger.Info("fake client received SaveDeploymentMetadata rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
d.Metadata = req.Metadata
return &pipedservice.SaveDeploymentMetadataResponse{}, nil
}
// SaveStageMetadata used by piped to persist the metadata
// of a specific stage of a deployment.
func (c *fakeClient) SaveStageMetadata(ctx context.Context, req *pipedservice.SaveStageMetadataRequest, opts ...grpc.CallOption) (*pipedservice.SaveStageMetadataResponse, error) {
c.logger.Info("fake client received SaveStageMetadata rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
for _, s := range d.Stages {
if s.Id != req.StageId {
continue
}
s.Metadata = req.Metadata
return &pipedservice.SaveStageMetadataResponse{}, nil
}
return nil, status.Error(codes.NotFound, "stage was not found")
}
// ReportStageLogs is sent by piped to save the log of a pipeline stage.
func (c *fakeClient) ReportStageLogs(ctx context.Context, req *pipedservice.ReportStageLogsRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageLogsResponse, error) {
c.logger.Info("fake client received ReportStageLogs rpc", zap.Any("request", req))
return &pipedservice.ReportStageLogsResponse{}, nil
}
// ReportStageLogsFromLastCheckpoint is used to save the full logs from the most recently saved point.
func (c *fakeClient) ReportStageLogsFromLastCheckpoint(ctx context.Context, req *pipedservice.ReportStageLogsFromLastCheckpointRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageLogsFromLastCheckpointResponse, error) {
c.logger.Info("fake client received ReportStageLogsFromLastCheckpoint rpc", zap.Any("request", req))
return &pipedservice.ReportStageLogsFromLastCheckpointResponse{}, nil
}
// ReportStageStatusChanged used by piped to update the status
// of a specific stage of a deployment.
func (c *fakeClient) ReportStageStatusChanged(ctx context.Context, req *pipedservice.ReportStageStatusChangedRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageStatusChangedResponse, error) {
c.logger.Info("fake client received ReportStageStatusChanged rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
for _, s := range d.Stages {
if s.Id != req.StageId {
continue
}
s.Status = req.Status
s.RetriedCount = req.RetriedCount
s.Visible = req.Visible
s.CompletedAt = req.CompletedAt
return &pipedservice.ReportStageStatusChangedResponse{}, nil
}
return nil, status.Error(codes.NotFound, "stage was not found")
}
// ListUnhandledCommands is periodically called by piped to obtain the commands
// that should be handled.
// Whenever an user makes an interaction from WebUI (cancel/approve/retry/sync)
// a new command with a unique identifier will be generated an saved into the datastore.
// Piped uses this RPC to list all still-not-handled commands to handle them,
// then report back the result to server.
// On other side, the web will periodically check the command status and feedback the result to user.
// In the future, we may need a solution to remove all old-handled commands from datastore for space.
func (c *fakeClient) ListUnhandledCommands(ctx context.Context, req *pipedservice.ListUnhandledCommandsRequest, opts ...grpc.CallOption) (*pipedservice.ListUnhandledCommandsResponse, error) {
c.logger.Info("fake client received ListUnhandledCommands rpc", zap.Any("request", req))
return &pipedservice.ListUnhandledCommandsResponse{}, nil
}
// ReportCommandHandled is called by piped to mark a specific command as handled.
// The request payload will contain the handle status as well as any additional result data.
// The handle result should be updated to both datastore and cache (for reading from web).
func (c *fakeClient) ReportCommandHandled(ctx context.Context, req *pipedservice.ReportCommandHandledRequest, opts ...grpc.CallOption) (*pipedservice.ReportCommandHandledResponse, error) {
c.logger.Info("fake client received ReportCommandHandled rpc", zap.Any("request", req))
return &pipedservice.ReportCommandHandledResponse{}, nil
}
// ReportApplicationLiveState is periodically sent to correct full state of an application.
// For kubernetes application, this contains a full tree of its kubernetes resources.
// The tree data should be written into filestore immediately and then the state in cache should be refreshsed too.
func (c *fakeClient) ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateResponse, error) {
c.logger.Info("fake client received ReportApplicationLiveState rpc", zap.Any("request", req))
return &pipedservice.ReportApplicationLiveStateResponse{}, nil
}
// ReportApplicationLiveStateEvents is sent by piped to submit one or multiple events
// about the changes of application state.
// Control plane uses the received events to update the state of application-resource-tree.
// We want to start by a simple solution at this initial stage of development,
// so the API server just handles as below:
// - loads the releated application-resource-tree from filestore
// - checks and builds new state for the application-resource-tree
// - updates new state into fielstore and cache (cache data is for reading while handling web requests)
// In the future, we may want to redesign the behavior of this RPC by using pubsub/queue pattern.
// After receiving the events, all of them will be publish into a queue immediately,
// and then another Handler service will pick them inorder to apply to build new state.
// By that way we can control the traffic to the datastore in a better way.
func (c *fakeClient) ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) {
c.logger.Info("fake client received ReportApplicationLiveStateEvents rpc", zap.Any("request", req))
return &pipedservice.ReportApplicationLiveStateEventsResponse{}, nil
}
var _ pipedservice.PipedServiceClient = (*fakeClient)(nil)
| 1 | 8,789 | `ctx` is unused in GetEnvironment | pipe-cd-pipe | go |
@@ -89,6 +89,11 @@ namespace pwiz.Skyline.FileUI
}
}
+ public void OKDialog()
+ {
+ DialogResult = DialogResult.OK;
+ }
+
private void cbShowText_CheckedChanged(object sender, System.EventArgs e)
{
LineText.Visible = cbShowText.Checked; | 1 | /*
* Original author: Dario Amodei <damodei .at. stanford.edu>,
* Mallick Lab, Department of Radiology, Stanford
*
* Copyright 2013 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System.Collections.Generic;
using System.Windows.Forms;
using pwiz.Common.DataBinding;
using pwiz.Skyline.Controls;
using pwiz.Skyline.Model;
using pwiz.Skyline.Properties;
using pwiz.Skyline.SettingsUI;
using pwiz.Skyline.Util;
namespace pwiz.Skyline.FileUI
{
public partial class ImportTransitionListErrorDlg : FormEx
{
public ImportTransitionListErrorDlg(List<TransitionImportErrorInfo> errorList, bool isErrorAll, bool offerCancelButton)
{
InitializeComponent();
Icon = Resources.Skyline;
SimpleGridViewDriver<TransitionImportErrorInfo> compareGridViewDriver = new ImportErrorGridViewDriver(
dataGridViewErrors,
bindingSourceGrid, new SortableBindingList<TransitionImportErrorInfo>());
ErrorList = errorList;
foreach (var error in errorList)
{
compareGridViewDriver.Items.Add(error);
}
// If all of the transitions were errors, canceling and accepting are the same
// so give a different message and disable the cancel button
string errorListMessage;
if (isErrorAll)
{
errorListMessage = errorList.Count == 1 ? Resources.ImportTransitionListErrorDlg_ImportTransitionListErrorDlg_The_imported_transition_contains_an_error__Please_check_the_transition_list_and_the_Skyline_settings_and_try_importing_again_ :
string.Format(Resources.ImportTransitionListErrorDlg_ImportTransitionListErrorDlg_All__0__transitions_contained_errors___Please_check_the_transition_list_for_errors_and_try_importing_again_, errorList.Count);
buttonCancel.Visible = false;
// In this case, the OK button should close the error dialog but not the column select dialog
// Simplest way to do this is to treat it as a cancel button
buttonOk.DialogResult = DialogResult.Cancel;
}
else if (offerCancelButton)
{
errorListMessage = errorList.Count == 1 ? Resources.ImportTransitionListErrorDlg_ImportTransitionListErrorDlg_A_transition_contained_an_error__Skip_this_transition_and_import_the_rest_ :
string.Format(Resources.SkylineWindow_ImportMassList__0__transitions_contained_errors__Skip_these__0__transitions_and_import_the_rest_, errorList.Count);
}
else
{
errorListMessage = errorList.Count == 1 ? Resources.ImportTransitionListErrorDlg_ImportTransitionListErrorDlg_A_transition_contained_an_error_ :
string.Format(Resources.SkylineWindow_ImportMassList__0__transitions_contained_errors_, errorList.Count);
buttonCancel.Visible = false;
}
labelErrors.Text = errorListMessage;
}
public List<TransitionImportErrorInfo> ErrorList { get; private set; }
private class ImportErrorGridViewDriver : SimpleGridViewDriver<TransitionImportErrorInfo>
{
public ImportErrorGridViewDriver(DataGridViewEx gridView,
BindingSource bindingSource,
SortableBindingList<TransitionImportErrorInfo> items)
: base(gridView, bindingSource, items)
{
}
protected override void DoPaste()
{
// No pasting.
}
}
private void cbShowText_CheckedChanged(object sender, System.EventArgs e)
{
LineText.Visible = cbShowText.Checked;
}
}
}
| 1 | 14,903 | We usually use OkDialog() | ProteoWizard-pwiz | .cs |
@@ -970,6 +970,12 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})")
private final Boolean isFlexiblePrivacyGroupsEnabled = false;
+ @Option(
+ names = {"--privacy-unrestricted-enabled"},
+ description =
+ "Enable unrestricted (stores payload onchain) privacy (default: ${DEFAULT-VALUE})")
+ private final Boolean isUnrestrictedPrivacyEnabled = false;
+
@Option(
names = {"--target-gas-limit"},
description = | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainexport.RlpBlockExporter;
import org.hyperledger.besu.chainimport.JsonBlockImporter;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.stable.EthstatsOptions;
import org.hyperledger.besu.cli.options.unstable.DataStorageOptions;
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
import org.hyperledger.besu.cli.options.unstable.LauncherOptions;
import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.unstable.MiningOptions;
import org.hyperledger.besu.cli.options.unstable.NatOptions;
import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.cli.options.unstable.RPCOptions;
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions;
import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner;
import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.TargetingGasLimitCalculator;
import org.hyperledger.besu.crypto.KeyPair;
import org.hyperledger.besu.crypto.KeyPairSecurityModule;
import org.hyperledger.besu.crypto.KeyPairUtil;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.crypto.SignatureAlgorithmFactory;
import org.hyperledger.besu.crypto.SignatureAlgorithmType;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.enclave.GoQuorumEnclave;
import org.hyperledger.besu.ethereum.api.ApiConfiguration;
import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider;
import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.GoQuorumPrivacyParameters;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.MetricsProtocol;
import org.hyperledger.besu.metrics.MetricsSystemFactory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.data.EnodeURL;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PermissioningService;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.SecurityModuleService;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PermissioningServiceImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.SecurityModuleServiceImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.services.kvstore.InMemoryStoragePlugin;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import net.consensys.quorum.mainnet.launcher.LauncherManager;
import net.consensys.quorum.mainnet.launcher.config.ImmutableLauncherConfig;
import net.consensys.quorum.mainnet.launcher.exception.LauncherException;
import net.consensys.quorum.mainnet.launcher.util.ParseArgsHelper;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
@SuppressWarnings("PrivateStaticFinalLoggers")
// non-static for testing
private final Logger logger;
private CommandLine commandLine;
private final Supplier<RlpBlockImporter> rlpBlockImporter;
private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory;
private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory;
// Unstable CLI options
final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create();
final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create();
private final DataStorageOptions unstableDataStorageOptions = DataStorageOptions.create();
private final DnsOptions unstableDnsOptions = DnsOptions.create();
private final MiningOptions unstableMiningOptions = MiningOptions.create();
private final NatOptions unstableNatOptions = NatOptions.create();
private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create();
private final RPCOptions unstableRPCOptions = RPCOptions.create();
final LauncherOptions unstableLauncherOptions = LauncherOptions.create();
// stable CLI options
private final EthstatsOptions ethstatsOptions = EthstatsOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final SecurityModuleServiceImpl securityModuleService;
private final PermissioningServiceImpl permissioningService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
private final PreSynchronizationTaskRunner preSynchronizationTaskRunner =
new PreSynchronizationTaskRunner();
private final Set<Integer> allocatedPorts = new HashSet<>();
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
// While this variable is never read it is needed for the PicoCLI to create
// the config file option that is read elsewhere.
@SuppressWarnings("UnusedVariable")
@CommandLine.Option(
names = {CONFIG_FILE_OPTION_NAME},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "TOML config file (default: none)")
private final File configFile = null;
@CommandLine.Option(
names = {"--data-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "The path to Besu data directory (default: ${DEFAULT-VALUE})")
final Path dataPath = getDefaultBesuDataPath(this);
// Genesis file path with null default option if the option
// is not defined on command line as this default is handled by Runner
// to use mainnet json file from resources as indicated in the
// default network option
// Then we have no control over genesis default value here.
@CommandLine.Option(
names = {"--genesis-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.")
private final File genesisFile = null;
@CommandLine.Option(
names = {"--node-private-key-file"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description =
"The node's private key file (default: a file named \"key\" in the Besu data folder)")
private final File nodePrivateKeyFile = null;
@Option(
names = "--identity",
paramLabel = "<String>",
description = "Identification for this node in the Client ID",
arity = "1")
private final Optional<String> identityString = Optional.empty();
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
private final List<String> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--random-peer-priority-enabled"},
description =
"Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})")
private final Boolean randomPeerPriority = false;
private GenesisConfigOptions genesisConfigOptions;
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURLImpl::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<Bytes> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)")
private SyncMode syncMode = null;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURLImpl.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
@Option(
names = {"--rpc-http-max-active-connections"},
description =
"Maximum number of HTTP connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcHttpMaxConnections = DEFAULT_HTTP_MAX_CONNECTIONS;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-http-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-http-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC HTTP authentication",
arity = "1")
private final File rpcHttpAuthenticationPublicKeyFile = null;
@Option(
names = {"--rpc-http-tls-enabled"},
description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsEnabled = false;
@Option(
names = {"--rpc-http-tls-keystore-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStoreFile = null;
@Option(
names = {"--rpc-http-tls-keystore-password-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStorePasswordFile = null;
@Option(
names = {"--rpc-http-tls-client-auth-enabled"},
description =
"Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsClientAuthEnabled = false;
@Option(
names = {"--rpc-http-tls-known-clients-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to file containing clients certificate common name and fingerprint for client authentication")
private final Path rpcHttpTlsKnownClientsFile = null;
@Option(
names = {"--rpc-http-tls-ca-clients-enabled"},
description =
"Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsCAClientsEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-max-active-connections"},
description =
"Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcWsMaxConnections = DEFAULT_WS_MAX_CONNECTIONS;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
@Option(
names = {"--privacy-tls-enabled"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyTlsEnabled = false;
@Option(
names = "--privacy-tls-keystore-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.")
private final Path privacyKeyStoreFile = null;
@Option(
names = "--privacy-tls-keystore-password-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the password used to decrypt the keystore.")
private final Path privacyKeyStorePasswordFile = null;
@Option(
names = "--privacy-tls-known-enclave-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the fingerprints of the authorized privacy enclave.")
private final Path privacyTlsKnownEnclaveFile = null;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-protocol"},
description =
"Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})")
private MetricsProtocol metricsProtocol = PROMETHEUS;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-allowlist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--host-whitelist"},
hidden = true,
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})")
private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL")
private final Level logLevel = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--color-enabled"},
description =
"Force color output to be enabled/disabled (default: colorized only if printing to console)")
private static Boolean colorEnabled = null;
@Option(
names = {"--reorg-logging-threshold"},
description =
"How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})")
private final Long reorgLoggingThreshold = 6L;
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-stratum-enabled"},
description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})")
private final Boolean iStratumMiningEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--miner-stratum-host"},
description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})")
private String stratumNetworkInterface = "0.0.0.0";
@Option(
names = {"--miner-stratum-port"},
description = "Stratum port binding (default: ${DEFAULT-VALUE})")
private final Integer stratumPort = 8008;
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--rpc-tx-feecap"},
description =
"Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Bytes extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})")
private final Boolean pruningEnabled = false;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-nodes-config-file"},
description =
"Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String nodePermissionsConfigFile = null;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-accounts-config-file"},
description =
"Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String accountPermissionsConfigFile = null;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-version"},
description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})")
private final Integer permissionsNodesContractVersion = 1;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--privacy-multi-tenancy-enabled"},
description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyMultiTenancyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@Option(
names = {"--privacy-public-key-file"},
description = "The enclave's public key file")
private final File privacyPublicKeyFile = null;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})",
hidden = true)
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privateMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--privacy-enable-database-migration"},
description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})")
private final Boolean migratePrivateDatabase = false;
@Option(
names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"},
description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})")
private final Boolean isFlexiblePrivacyGroupsEnabled = false;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-hashes-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pooledTransactionHashesSize =
TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@Option(
names = {"--tx-pool-price-bump"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
converter = PercentageConverter.class,
description =
"Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--security-module"},
paramLabel = "<NAME>",
description = "Identity for the Security Module to be used.",
arity = "1")
private String securityModuleName = DEFAULT_SECURITY_MODULE;
@Option(
names = {"--auto-log-bloom-caching-enabled"},
description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean autoLogBloomCachingEnabled = true;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@Option(
names = {"--pruning-blocks-retained"},
defaultValue = "1024",
paramLabel = "<INTEGER>",
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
defaultValue = "10",
paramLabel = "<INTEGER>",
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlockConfirmations =
PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@CommandLine.Option(
names = {"--pid-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Path to PID file (optional)")
private final Path pidPath = null;
@CommandLine.Option(
names = {"--api-gas-price-blocks"},
description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceBlocks = 100L;
@CommandLine.Option(
names = {"--api-gas-price-percentile"},
description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Double apiGasPricePercentile = 50.0;
@CommandLine.Option(
names = {"--api-gas-price-max"},
description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceMax = 500_000_000_000L;
@CommandLine.Option(
names = {"--static-nodes-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Specifies the static node file containing the static nodes for this node to connect to")
private final Path staticNodesFile = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--discovery-dns-url"},
description = "Specifies the URL to use for DNS discovery")
private String discoveryDnsUrl = null;
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private ApiConfiguration apiConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController besuController;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration()));
private Vertx vertx;
private EnodeDnsConfiguration enodeDnsConfiguration;
private KeyValueStorageProvider keyValueStorageProvider;
private Boolean isGoQuorumCompatibilityMode = false;
public BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl(),
new SecurityModuleServiceImpl(),
new PermissioningServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService,
final SecurityModuleServiceImpl securityModuleService,
final PermissioningServiceImpl permissioningService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
this.securityModuleService = securityModuleService;
this.permissioningService = permissioningService;
pluginCommonConfiguration = new BesuCommandConfigurationService();
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
handleStableOptions();
enableExperimentalEIPs();
addSubCommands(resultHandler, in);
registerConverters();
handleUnstableOptions();
preparePlugins();
parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
configureLogging(true);
// Set the goquorum compatibility mode based on the genesis file
if (genesisFile != null) {
genesisConfigOptions = readGenesisConfigOptions();
if (genesisConfigOptions.isQuorum()) {
// this static flag is read by the RLP decoder
GoQuorumOptions.goQuorumCompatibilityMode = true;
isGoQuorumCompatibilityMode = true;
}
}
instantiateSignatureAlgorithmFactory();
configureNativeLibs();
logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString));
// Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable
vertx = createVertx(createVertxOptions(metricsSystem.get()));
validateOptions();
configure();
initController();
startPlugins();
preSynchronization();
startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private void enableExperimentalEIPs() {
// Usage of static command line flags is strictly reserved for experimental EIPs
commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class);
}
private void addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
}
private void registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(Bytes.class, Bytes::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
commandLine.registerConverter(Optional.class, Optional::of);
commandLine.registerConverter(Double.class, Double::parseDouble);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
}
private void handleStableOptions() {
commandLine.addMixin("Ethstats", ethstatsOptions);
}
private void handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", unstableEthProtocolOptions)
.put("Metrics", unstableMetricsCLIOptions)
.put("P2P Network", unstableNetworkingOptions)
.put("RPC", unstableRPCOptions)
.put("DNS Configuration", unstableDnsOptions)
.put("NAT Configuration", unstableNatOptions)
.put("Synchronizer", unstableSynchronizerOptions)
.put("TransactionPool", unstableTransactionPoolOptions)
.put("Mining", unstableMiningOptions)
.put("Native Library", unstableNativeLibraryOptions)
.put("Data Storage Options", unstableDataStorageOptions)
.put("Launcher", unstableLauncherOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
}
private void preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(SecurityModuleService.class, securityModuleService);
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
besuPluginContext.addService(PermissioningService.class, permissioningService);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
new InMemoryStoragePlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
// register default security module
securityModuleService.register(
DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule));
}
private SecurityModule defaultSecurityModule() {
return new KeyPairSecurityModule(loadKeyPair());
}
@VisibleForTesting
KeyPair loadKeyPair() {
return KeyPairUtil.loadKeyPair(nodePrivateKeyFile());
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment);
ParseArgsHelper.getLauncherOptions(unstableLauncherOptions, args);
if (unstableLauncherOptions.isLauncherMode()
|| unstableLauncherOptions.isLauncherModeForced()) {
try {
final ImmutableLauncherConfig launcherConfig =
ImmutableLauncherConfig.builder()
.launcherScript(BesuCommand.class.getResourceAsStream("launcher.json"))
.addCommandClasses(this, unstableNatOptions, ethstatsOptions, unstableMiningOptions)
.isLauncherForced(unstableLauncherOptions.isLauncherModeForced())
.build();
final File file = new LauncherManager(launcherConfig).run();
logger.info("Config file location : {}", file.getAbsolutePath());
commandLine.parseWithHandlers(
configParsingHandler,
exceptionHandler,
String.format("%s=%s", CONFIG_FILE_OPTION_NAME, file.getAbsolutePath()));
} catch (final LauncherException e) {
logger.warn("Unable to run the launcher {}", e.getMessage());
}
} else {
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
}
private void preSynchronization() {
preSynchronizationTaskRunner.runTasks(besuController);
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
apiConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes,
pidPath);
}
private void startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolContext().getBlockchain(),
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
}
public void configureLogging(final boolean announce) {
// To change the configuration if color was enabled/disabled
Configurator.reconfigure();
// set log level per CLI flags
if (logLevel != null) {
if (announce) {
System.out.println("Setting logging level to " + logLevel.name());
}
Configurator.setAllLevels("", logLevel);
}
}
public static Optional<Boolean> getColorEnabled() {
return Optional.ofNullable(colorEnabled);
}
private void configureNativeLibs() {
if (unstableNativeLibraryOptions.getNativeAltbn128()
&& AbstractAltBnPrecompiledContract.isNative()) {
logger.info("Using LibEthPairings native alt bn128");
} else {
AbstractAltBnPrecompiledContract.disableNative();
logger.info("Using the Java implementation of alt bn128");
}
if (unstableNativeLibraryOptions.getNativeSecp256k1()
&& SignatureAlgorithmFactory.getInstance().isNative()) {
logger.info("Using native secp256k1");
} else {
SignatureAlgorithmFactory.getInstance().disableNative();
logger.info("Using the Java implementation of secp256k1");
}
}
private void validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
validateNatParams();
validateNetStatsParams();
validateDnsOptionsParams();
}
@SuppressWarnings("ConstantConditions")
private void validateMiningParams() {
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) "
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
if (!isMiningEnabled && iStratumMiningEnabled) {
throw new ParameterException(
this.commandLine,
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) "
+ "or specify mining is enabled (--miner-enabled)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
@SuppressWarnings("ConstantConditions")
private void validateNatParams() {
if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES))
&& !unstableNatOptions
.getNatManagerServiceName()
.equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name"
+ " or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled"
+ " or select another mode (via --nat--method=XXXX)");
}
}
private void validateNetStatsParams() {
if (Strings.isNullOrEmpty(ethstatsOptions.getEthstatsUrl())
&& !ethstatsOptions.getEthstatsContact().isEmpty()) {
throw new ParameterException(
this.commandLine,
"The `--ethstats-contact` requires ethstats server URL to be provided. Either remove --ethstats-contact"
+ " or provide an url (via --ethstats=nodename:secret@host:port)");
}
}
private void validateDnsOptionsParams() {
if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled"
+ " or specify dns is enabled (--Xdns-enabled)");
}
}
private GenesisConfigOptions readGenesisConfigOptions() {
final GenesisConfigOptions genesisConfigOptions;
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides);
} catch (final Exception e) {
throw new ParameterException(
this.commandLine, "Unable to load genesis file. " + e.getCause());
}
return genesisConfigOptions;
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList(
"--miner-coinbase",
"--min-block-occupancy-ratio",
"--miner-extra-data",
"--miner-stratum-enabled",
"--Xminer-remote-sealers-limit",
"--Xminer-remote-sealers-hashrate-ttl"));
CommandLineUtils.checkMultiOptionDependencies(
logger,
commandLine,
"--min-gas-price ignored because none of --miner-enabled or isQuorum (in genesis file) was defined.",
List.of(!isMiningEnabled, !isGoQuorumCompatibilityMode),
singletonList("--min-gas-price"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) {
logger.warn(
DEPENDENCY_WARNING_MSG,
"--node-private-key-file",
"--security-module=" + DEFAULT_SECURITY_MODULE);
}
}
private void configure() throws Exception {
checkPortClash();
syncMode =
Optional.ofNullable(syncMode)
.orElse(
genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV
? SyncMode.FAST
: SyncMode.FULL);
ethNetworkConfig = updateNetworkConfig(getNetwork());
checkGoQuorumCompatibilityConfig(ethNetworkConfig);
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
apiConfiguration = apiConfiguration();
// hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist
if (!hostsWhitelist.isEmpty()) {
// if allowlist == default values, remove the default values
if (hostsAllowlist.size() == 2
&& hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) {
hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1"));
}
hostsAllowlist.addAll(hostsWhitelist);
}
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes();
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p));
metricsConfiguration = metricsConfiguration();
logger.info("Security Module: {}", securityModuleName);
instantiateSignatureAlgorithmFactory();
}
private GoQuorumPrivacyParameters configureGoQuorumPrivacy(
final KeyValueStorageProvider storageProvider) {
return new GoQuorumPrivacyParameters(
createGoQuorumEnclave(),
readEnclaveKey(),
storageProvider.createGoQuorumPrivateStorage(),
createPrivateWorldStateArchive(storageProvider));
}
private GoQuorumEnclave createGoQuorumEnclave() {
final EnclaveFactory enclaveFactory = new EnclaveFactory(Vertx.vertx());
if (privacyKeyStoreFile != null) {
return enclaveFactory.createGoQuorumEnclave(
privacyUrl, privacyKeyStoreFile, privacyKeyStorePasswordFile, privacyTlsKnownEnclaveFile);
} else {
return enclaveFactory.createGoQuorumEnclave(privacyUrl);
}
}
private String readEnclaveKey() {
final String key;
try {
key = Files.asCharSource(privacyPublicKeyFile, UTF_8).read();
} catch (final Exception e) {
throw new ParameterException(
this.commandLine,
"--privacy-public-key-file must be set if isQuorum is set in the genesis file.",
e);
}
if (key.length() != 44) {
throw new IllegalArgumentException(
"Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key.");
}
// throws exception if invalid base 64
Base64.getDecoder().decode(key);
return key;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInAllowlist(
final Collection<EnodeURL> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInAllowlist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private void initController() {
besuController = buildController();
}
public BesuController buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder getControllerBuilder() {
final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName);
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(
coinbase,
minTransactionGasPrice,
extraData,
isMiningEnabled,
iStratumMiningEnabled,
stratumNetworkInterface,
stratumPort,
unstableMiningOptions.getStratumExtranonce(),
Optional.empty(),
minBlockOccupancyRatio,
unstableMiningOptions.getRemoteSealersLimit(),
unstableMiningOptions.getRemoteSealersTimeToLive()))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodeKey(buildNodeKey())
.metricsSystem(metricsSystem.get())
.messagePermissioningProviders(permissioningService.getMessagePermissioningProviders())
.privacyParameters(privacyParameters(storageProvider))
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(storageProvider)
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(
new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained))
.genesisConfigOverrides(genesisConfigOverrides)
.gasLimitCalculator(
Optional.ofNullable(targetGasLimit)
.<GasLimitCalculator>map(TargetingGasLimitCalculator::new)
.orElse(GasLimitCalculator.constant()))
.requiredBlocks(requiredBlocks)
.reorgLoggingThreshold(reorgLoggingThreshold)
.dataStorageConfiguration(unstableDataStorageOptions.toDomainObject());
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isGraphQLHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsAllowlist(hostsAllowlist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
checkRpcTlsClientAuthOptionsDependencies();
checkRpcTlsOptionsDependencies();
checkRpcHttpOptionsDependencies();
if (isRpcHttpAuthenticationEnabled
&& rpcHttpAuthenticationCredentialsFile() == null
&& rpcHttpAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setMaxActiveConnections(rpcHttpMaxConnections);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile);
jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration());
jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return jsonRpcConfiguration;
}
private void checkRpcHttpOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-max-active-connections",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file",
"--rpc-http-authentication-public-key-file",
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-enabled",
!isRpcHttpTlsEnabled,
asList(
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsClientAuthOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-client-auth-enabled",
!isRpcHttpTlsClientAuthEnabled,
asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled"));
}
private void checkPrivacyTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-tls-enabled",
!isPrivacyTlsEnabled,
asList(
"--privacy-tls-keystore-file",
"--privacy-tls-keystore-password-file",
"--privacy-tls-known-enclave-file"));
}
private Optional<TlsConfiguration> rpcHttpTlsConfiguration() {
if (!isRpcTlsConfigurationRequired()) {
return Optional.empty();
}
if (rpcHttpTlsKeyStoreFile == null) {
throw new ParameterException(
commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (rpcHttpTlsKeyStorePasswordFile == null) {
throw new ParameterException(
commandLine,
"File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (isRpcHttpTlsClientAuthEnabled
&& !isRpcHttpTlsCAClientsEnabled
&& rpcHttpTlsKnownClientsFile == null) {
throw new ParameterException(
commandLine,
"Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint");
}
return Optional.of(
TlsConfiguration.Builder.aTlsConfiguration()
.withKeyStorePath(rpcHttpTlsKeyStoreFile)
.withKeyStorePasswordSupplier(
new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile))
.withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration())
.build());
}
private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() {
if (isRpcHttpTlsClientAuthEnabled) {
return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration()
.withKnownClientsFile(rpcHttpTlsKnownClientsFile)
.withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled)
.build();
}
return null;
}
private boolean isRpcTlsConfigurationRequired() {
return isRpcHttpEnabled && isRpcHttpTlsEnabled;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-max-active-connections",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file"));
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& rpcWsAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setMaxActiveConnections(rpcWsMaxConnections);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsAllowlist(hostsAllowlist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec());
return webSocketConfiguration;
}
private ApiConfiguration apiConfiguration() {
return ImmutableApiConfiguration.builder()
.gasPriceBlocks(apiGasPriceBlocks)
.gasPricePercentile(apiGasPricePercentile)
.gasPriceMin(minTransactionGasPrice.toLong())
.gasPriceMax(apiGasPriceMax)
.build();
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return unstableMetricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.protocol(metricsProtocol)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsAllowlist(hostsAllowlist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile);
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountPermissionsConfigFile);
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
getEnodeDnsConfiguration(),
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile);
}
if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountPermissionsConfigFile);
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion(
permissionsNodesContractVersion);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration),
quorumPermissioningConfig());
return Optional.of(permissioningConfiguration);
}
private Optional<GoQuorumPermissioningConfiguration> quorumPermissioningConfig() {
if (!isGoQuorumCompatibilityMode) {
return Optional.empty();
}
try {
final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber();
return Optional.of(
GoQuorumPermissioningConfiguration.enabled(
qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK)));
} catch (final Exception e) {
throw new IllegalStateException("Error reading GoQuorum permissioning options", e);
}
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters(final KeyValueStorageProvider storageProvider) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList("--privacy-multi-tenancy-enabled", "--privacy-tls-enabled"));
CommandLineUtils.checkMultiOptionDependencies(
logger,
commandLine,
"--privacy-url and/or --privacy-public-key-file ignored because none of --privacy-enabled or isQuorum (in genesis file) was defined.",
List.of(!isPrivacyEnabled, !isGoQuorumCompatibilityMode),
List.of("--privacy-url", "--privacy-public-key-file"));
checkPrivacyTlsOptionsDependencies();
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
final String errorSuffix = "cannot be enabled with privacy.";
if (syncMode == SyncMode.FAST) {
throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
if (isGoQuorumCompatibilityMode) {
throw new ParameterException(
commandLine, String.format("%s %s", "GoQuorum mode", errorSuffix));
}
if (isPrivacyMultiTenancyEnabled
&& !jsonRpcConfiguration.isAuthenticationEnabled()
&& !webSocketConfiguration.isAuthenticationEnabled()) {
throw new ParameterException(
commandLine,
"Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled");
}
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled);
privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled);
final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null;
if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) {
try {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile);
} catch (final IOException e) {
throw new ParameterException(
commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e);
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e);
}
} else if (hasPrivacyPublicKey) {
throw new ParameterException(
commandLine, "Privacy multi-tenancy and privacy public key cannot be used together");
} else if (!isPrivacyMultiTenancyEnabled) {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) {
// if gas is required, cannot use random keys to sign private tx
// ie --privacy-marker-transaction-signing-key-file must be set
if (privateMarkerTransactionSigningKeyPath == null) {
throw new ParameterException(
commandLine,
"Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks");
}
}
if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) {
logger.warn(
"--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled.");
}
privacyParametersBuilder.setPrivateKeyPath(privateMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
privacyKeyStorageProvider(keyValueStorageName + "-privacy"));
if (isPrivacyTlsEnabled) {
privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile);
privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile);
privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile);
}
privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx));
} else if (isGoQuorumCompatibilityMode) {
privacyParametersBuilder.setGoQuorumPrivacyParameters(
Optional.of(configureGoQuorumPrivacy(storageProvider)));
}
if (!isPrivacyEnabled && anyPrivacyApiEnabled()) {
logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
}
if (!isGoQuorumCompatibilityMode
&& (rpcHttpApis.contains(RpcApis.GOQUORUM) || rpcWsApis.contains(RpcApis.GOQUORUM))) {
logger.warn("Cannot use GOQUORUM API methods when not in GoQuorum mode.");
}
final PrivacyParameters privacyParameters = privacyParametersBuilder.build();
if (isPrivacyEnabled) {
preSynchronizationTaskRunner.addTask(
new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase));
}
return privacyParameters;
}
public WorldStateArchive createPrivateWorldStateArchive(final StorageProvider storageProvider) {
final WorldStateStorage privateWorldStateStorage =
storageProvider.createPrivateWorldStateStorage();
final WorldStatePreimageStorage preimageStorage =
storageProvider.createPrivateWorldStatePreimageStorage();
return new DefaultWorldStateArchive(privateWorldStateStorage, preimageStorage);
}
private boolean anyPrivacyApiEnabled() {
return rpcHttpApis.contains(RpcApis.EEA)
|| rpcWsApis.contains(RpcApis.EEA)
|| rpcHttpApis.contains(RpcApis.PRIV)
|| rpcWsApis.contains(RpcApis.PRIV);
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
return new PrivacyKeyValueStorageProviderBuilder()
.withStorageFactory(privacyKeyValueStorageFactory(name))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) {
return (PrivacyKeyValueStorageFactory)
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name));
}
private KeyValueStorageProvider keyValueStorageProvider(final String name) {
if (this.keyValueStorageProvider == null) {
this.keyValueStorageProvider =
new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() ->
new StorageException(
"No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.isGoQuorumCompatibilityMode(isGoQuorumCompatibilityMode.booleanValue())
.build();
}
return this.keyValueStorageProvider;
}
private SynchronizerConfiguration buildSyncConfig() {
return unstableSynchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return unstableTransactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pooledTransactionHashesSize(pooledTransactionHashesSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.priceBump(Percentage.fromInt(priceBump))
.txFeeCap(txFeeCap)
.build();
}
private boolean isPruningEnabled() {
return pruningEnabled;
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final ApiConfiguration apiConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes,
final Path pidPath) {
checkNotNull(runnerBuilder);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(vertx)
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.natManagerServiceName(unstableNatOptions.getNatManagerServiceName())
.natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled())
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.permissioningConfiguration(permissioningConfiguration)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.randomPeerPriority(randomPeerPriority)
.networkingConfiguration(unstableNetworkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.apiConfiguration(apiConfiguration)
.pidPath(pidPath)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.permissioningService(permissioningService)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.identityString(identityString)
.besuPluginContext(besuPluginContext)
.autoLogBloomCaching(autoLogBloomCachingEnabled)
.ethstatsUrl(ethstatsOptions.getEthstatsUrl())
.ethstatsContact(ethstatsOptions.getEthstatsContact())
.storageProvider(keyValueStorageProvider(keyValueStorageName))
.forkIdSupplier(() -> besuController.getProtocolManager().getForkIdAsBytesList())
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
protected Vertx createVertx(final VertxOptions vertxOptions) {
return Vertx.vertx(vertxOptions);
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
builder.setNetworkId(
getGenesisConfigFile()
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
builder.setDnsDiscoveryUrl(null);
}
if (discoveryDnsUrl != null) {
builder.setDnsDiscoveryUrl(discoveryDnsUrl);
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
if (!peerDiscoveryEnabled) {
logger.warn("Discovery disabled: bootnodes will be ignored.");
}
try {
final List<EnodeURL> listBootNodes =
bootNodes.stream()
.filter(value -> !value.isEmpty())
.map(url -> EnodeURLImpl.fromString(url, getEnodeDnsConfiguration()))
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(listBootNodes);
builder.setBootNodes(listBootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
return builder.build();
}
private GenesisConfigFile getGenesisConfigFile() {
return GenesisConfigFile.fromConfig(genesisConfig());
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile.toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e);
}
}
// dataDir() is public because it is accessed by subcommands
public Path dataDir() {
return dataPath.toAbsolutePath();
}
private Path pluginsDir() {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
}
@VisibleForTesting
NodeKey buildNodeKey() {
return new NodeKey(securityModule());
}
private SecurityModule securityModule() {
return securityModuleService
.getByName(securityModuleName)
.orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName))
.get();
}
private File nodePrivateKeyFile() {
return Optional.ofNullable(nodePrivateKeyFile)
.orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir()));
}
private String rpcHttpAuthenticationCredentialsFile() {
final String filename = rpcHttpAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
final String filename = rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final Path staticNodesPath;
if (staticNodesFile != null) {
staticNodesPath = staticNodesFile.toAbsolutePath();
if (!staticNodesPath.toFile().exists()) {
throw new ParameterException(
commandLine, String.format("Static nodes file %s does not exist", staticNodesPath));
}
} else {
final String staticNodesFilename = "static-nodes.json";
staticNodesPath = dataDir().resolve(staticNodesFilename);
}
logger.info("Static Nodes file = {}", staticNodesPath);
return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration());
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
public EnodeDnsConfiguration getEnodeDnsConfiguration() {
if (enodeDnsConfiguration == null) {
enodeDnsConfiguration = unstableDnsOptions.toDomainObject();
}
return enodeDnsConfiguration;
}
private void checkPortClash() {
getEffectivePorts().stream()
.filter(Objects::nonNull)
.filter(port -> port > 0)
.forEach(
port -> {
if (!allocatedPorts.add(port)) {
throw new ParameterException(
commandLine,
"Port number '"
+ port
+ "' has been specified multiple times. Please review the supplied configuration.");
}
});
}
/**
* * Gets the list of effective ports (ports that are enabled).
*
* @return The list of effective ports
*/
private List<Integer> getEffectivePorts() {
final List<Integer> effectivePorts = new ArrayList<>();
addPortIfEnabled(effectivePorts, p2pPort, p2pEnabled);
addPortIfEnabled(effectivePorts, graphQLHttpPort, isGraphQLHttpEnabled);
addPortIfEnabled(effectivePorts, rpcHttpPort, isRpcHttpEnabled);
addPortIfEnabled(effectivePorts, rpcWsPort, isRpcWsEnabled);
addPortIfEnabled(effectivePorts, metricsPort, isMetricsEnabled);
addPortIfEnabled(effectivePorts, metricsPushPort, isMetricsPushEnabled);
addPortIfEnabled(effectivePorts, stratumPort, iStratumMiningEnabled);
return effectivePorts;
}
/**
* Adds port in the passed list only if enabled.
*
* @param ports The list of ports
* @param port The port value
* @param enabled true if enabled, false otherwise
*/
private void addPortIfEnabled(
final List<Integer> ports, final Integer port, final boolean enabled) {
if (enabled) {
ports.add(port);
}
}
private void checkGoQuorumCompatibilityConfig(final EthNetworkConfig ethNetworkConfig) {
if (isGoQuorumCompatibilityMode) {
if (!minTransactionGasPrice.isZero()) {
throw new ParameterException(
this.commandLine,
"--min-gas-price must be set to zero if isQuorum mode is enabled in the genesis file.");
}
if (ensureGoQuorumCompatibilityModeNotUsedOnMainnet(genesisConfigOptions, ethNetworkConfig)) {
throw new ParameterException(this.commandLine, "isQuorum mode cannot be used on Mainnet.");
}
}
}
private static boolean ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
final GenesisConfigOptions genesisConfigOptions, final EthNetworkConfig ethNetworkConfig) {
return ethNetworkConfig.getNetworkId().equals(EthNetworkConfig.MAINNET_NETWORK_ID)
|| genesisConfigOptions
.getChainId()
.map(chainId -> chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID))
.orElse(false);
}
@VisibleForTesting
Level getLogLevel() {
return logLevel;
}
private class BesuCommandConfigurationService implements BesuConfiguration {
@Override
public Path getStoragePath() {
return dataDir().resolve(DATABASE_PATH);
}
@Override
public Path getDataPath() {
return dataDir();
}
@Override
public int getDatabaseVersion() {
return unstableDataStorageOptions
.toDomainObject()
.getDataStorageFormat()
.getDatabaseVersion();
}
}
private void instantiateSignatureAlgorithmFactory() {
if (SignatureAlgorithmFactory.isInstanceSet()) {
return;
}
final Optional<String> ecCurve = getEcCurveFromGenesisFile();
if (ecCurve.isEmpty()) {
SignatureAlgorithmFactory.setDefaultInstance();
return;
}
try {
SignatureAlgorithmFactory.setInstance(SignatureAlgorithmType.create(ecCurve.get()));
} catch (final IllegalArgumentException e) {
throw new CommandLine.InitializationException(
new StringBuilder()
.append("Invalid genesis file configuration for ecCurve. ")
.append(e.getMessage())
.toString());
}
}
private Optional<String> getEcCurveFromGenesisFile() {
if (genesisFile == null) {
return Optional.empty();
}
return genesisConfigOptions.getEcCurve();
}
}
| 1 | 25,217 | could we please change that to something that indicates that this feature is not "production" ready! | hyperledger-besu | java |
@@ -0,0 +1,5 @@
+_base_ = './mask_rcnn_swim-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py'
+model = dict(
+ pretrained=\
+ 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth', # noqa
+ backbone=dict(depths=[2, 2, 18, 2])) | 1 | 1 | 24,725 | swim -> swin. Other configs and file names also should be modified. | open-mmlab-mmdetection | py |
|
@@ -47,7 +47,7 @@ class BigQueryClient(_base_client.BaseClient):
return RateLimiter(FLAGS.max_bigquery_api_calls_per_100_seconds,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
- def get_bigquery_projectids(self):
+ def get_bigquery_projectids(self, key='projects'):
"""Request and page through bigquery projectids.
Returns: A list of project_ids enabled for bigquery. | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the BigQuery API client."""
import gflags as flags
from ratelimiter import RateLimiter
from google.cloud.security.common.gcp_api import _base_client
from google.cloud.security.common.util import log_util
FLAGS = flags.FLAGS
flags.DEFINE_integer('max_bigquery_api_calls_per_100_seconds', 17000,
'BigQuery Discovery requests per 100 seconds.')
LOGGER = log_util.get_logger(__name__)
class BigQueryClient(_base_client.BaseClient):
"""BigQuery Client manager."""
API_NAME = 'bigquery'
# TODO: Remove pylint disable.
# pylint: disable=invalid-name
DEFAULT_QUOTA_TIMESPAN_PER_SECONDS = 100
# pylint: enable=invalid-name
def __init__(self):
super(BigQueryClient, self).__init__(
api_name=self.API_NAME)
self.rate_limiter = self.get_rate_limiter()
def get_rate_limiter(self):
"""Return an appropriate rate limiter."""
return RateLimiter(FLAGS.max_bigquery_api_calls_per_100_seconds,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
def get_bigquery_projectids(self):
"""Request and page through bigquery projectids.
Returns: A list of project_ids enabled for bigquery.
['project-id',
'project-id',
'...']
If there are no project_ids enabled for bigquery an empty list will
be returned.
"""
bigquery_projects_api = self.service.projects()
request = bigquery_projects_api.list()
results = self._build_paged_result(request, bigquery_projects_api,
self.rate_limiter)
project_ids = []
for result in results:
for project in result.get('projects', []):
project_ids.append(project.get('id'))
return project_ids
def get_datasets_for_projectid(self, project_id, key='datasets'):
"""Return BigQuery datasets stored in the requested project_id.
Args:
project_id: String representing the project id.
Returns: A list of datasetReference objects for a given project_id.
[{'datasetId': 'dataset-id',
'projectId': 'project-id'},
{...}]
"""
bigquery_datasets_api = self.service.datasets()
request = bigquery_datasets_api.list(projectId=project_id, all=True)
results = self._build_paged_result(request, bigquery_datasets_api,
self.rate_limiter)
datasets = []
for result in results:
if key in result:
for item in result.get(key):
datasets.append(item.get('datasetReference'))
return datasets
def get_dataset_access(self, project_id, dataset_id, key='access'):
"""Return the access portion of the dataset resource object.
Args:
project_id: String representing the project id.
dataset_id: String representing the dataset id.
Returns: A list of access lists for a given project_id and dataset_id.
[{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': '[email protected]'},
{'role': 'READER', 'specialGroup': 'projectReaders'}]
"""
bigquery_datasets_api = self.service.datasets()
request = bigquery_datasets_api.get(projectId=project_id,
datasetId=dataset_id)
results = self._build_paged_result(request, bigquery_datasets_api,
self.rate_limiter)
access_list = []
for result in results:
if key in result:
for item in result.get(key):
access_list.append(item)
return access_list
| 1 | 26,227 | nit: arg description for "key"? | forseti-security-forseti-security | py |
@@ -244,6 +244,15 @@ func (tlf *TLF) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
return dir.Setattr(ctx, req, resp)
}
+// Fsync implements the fs.NodeFsyncer interface for TLF.
+func (tlf *TLF) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
+ dir, err := tlf.loadDir(ctx)
+ if err != nil {
+ return err
+ }
+ return dir.Fsync(ctx, req)
+}
+
var _ fs.Handle = (*TLF)(nil)
var _ fs.NodeOpener = (*TLF)(nil) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libfuse
import (
"os"
"sync"
"time"
"bazil.org/fuse"
"bazil.org/fuse/fs"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libkbfs"
"golang.org/x/net/context"
)
// TLF represents the root directory of a TLF. It wraps a lazy-loaded
// Dir.
type TLF struct {
folder *Folder
dirLock sync.RWMutex
dir *Dir
}
func newTLF(fl *FolderList, h *libkbfs.TlfHandle,
name libkbfs.PreferredTlfName) *TLF {
folder := newFolder(fl, h, name)
tlf := &TLF{
folder: folder,
}
return tlf
}
var _ DirInterface = (*TLF)(nil)
func (tlf *TLF) isPublic() bool {
return tlf.folder.list.public
}
func (tlf *TLF) getStoredDir() *Dir {
tlf.dirLock.RLock()
defer tlf.dirLock.RUnlock()
return tlf.dir
}
func (tlf *TLF) clearStoredDir() {
tlf.dirLock.Lock()
defer tlf.dirLock.Unlock()
tlf.dir = nil
}
func (tlf *TLF) log() logger.Logger {
return tlf.folder.fs.log
}
func (tlf *TLF) loadDirHelper(ctx context.Context, mode libkbfs.ErrorModeType,
filterErr bool) (dir *Dir, exitEarly bool, err error) {
dir = tlf.getStoredDir()
if dir != nil {
return dir, false, nil
}
tlf.dirLock.Lock()
defer tlf.dirLock.Unlock()
// Need to check for nilness again to avoid racing with other
// calls to loadDir().
if tlf.dir != nil {
return tlf.dir, false, nil
}
tlf.log().CDebugf(ctx, "Loading root directory for folder %s "+
"(public: %t, filter error: %t)", tlf.folder.name(),
tlf.isPublic(), filterErr)
defer func() {
if filterErr {
exitEarly, err = libfs.FilterTLFEarlyExitError(ctx, err, tlf.log(), tlf.folder.name())
}
tlf.folder.reportErr(ctx, mode, err)
}()
handle, err := tlf.folder.resolve(ctx)
if err != nil {
return nil, false, err
}
var rootNode libkbfs.Node
if filterErr {
rootNode, _, err = tlf.folder.fs.config.KBFSOps().GetRootNode(
ctx, handle, libkbfs.MasterBranch)
if err != nil {
return nil, false, err
}
// If not fake an empty directory.
if rootNode == nil {
return nil, false, libfs.TlfDoesNotExist{}
}
} else {
rootNode, _, err = tlf.folder.fs.config.KBFSOps().GetOrCreateRootNode(
ctx, handle, libkbfs.MasterBranch)
if err != nil {
return nil, false, err
}
}
err = tlf.folder.setFolderBranch(rootNode.GetFolderBranch())
if err != nil {
return nil, false, err
}
tlf.folder.nodes[rootNode.GetID()] = tlf
tlf.dir = newDir(tlf.folder, rootNode)
return tlf.dir, false, nil
}
func (tlf *TLF) loadDir(ctx context.Context) (*Dir, error) {
dir, _, err := tlf.loadDirHelper(ctx, libkbfs.WriteMode, false)
return dir, err
}
// loadDirAllowNonexistent loads a TLF if it's not already loaded. If
// the TLF doesn't yet exist, it still returns a nil error and
// indicates that the calling function should pretend it's an empty
// folder.
func (tlf *TLF) loadDirAllowNonexistent(ctx context.Context) (
*Dir, bool, error) {
return tlf.loadDirHelper(ctx, libkbfs.ReadMode, true)
}
// Access implements the fs.NodeAccesser interface for *TLF.
func (tlf *TLF) Access(ctx context.Context, r *fuse.AccessRequest) error {
return tlf.folder.access(ctx, r)
}
// Attr implements the fs.Node interface for TLF.
func (tlf *TLF) Attr(ctx context.Context, a *fuse.Attr) error {
dir := tlf.getStoredDir()
if dir == nil {
tlf.log().CDebugf(
ctx, "Faking Attr for TLF %s", tlf.folder.name())
// Have a low non-zero value for Valid to avoid being
// swamped with requests, while still not showing
// stale data for too long if we end up loading the
// dir.
a.Valid = 1 * time.Second
a.Mode = os.ModeDir | 0500
a.Uid = uint32(os.Getuid())
return nil
}
return dir.Attr(ctx, a)
}
// Lookup implements the fs.NodeRequestLookuper interface for TLF.
func (tlf *TLF) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {
dir, exitEarly, err := tlf.loadDirAllowNonexistent(ctx)
if err != nil {
return nil, err
}
if exitEarly {
if node := handleTLFSpecialFile(
req.Name, tlf.folder, &resp.EntryValid); node != nil {
return node, nil
}
return nil, fuse.ENOENT
}
return dir.Lookup(ctx, req, resp)
}
// Create implements the fs.NodeCreater interface for TLF.
func (tlf *TLF) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
dir, err := tlf.loadDir(ctx)
if err != nil {
return nil, nil, err
}
return dir.Create(ctx, req, resp)
}
// Mkdir implements the fs.NodeMkdirer interface for TLF.
func (tlf *TLF) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (_ fs.Node, err error) {
dir, err := tlf.loadDir(ctx)
if err != nil {
return nil, err
}
return dir.Mkdir(ctx, req)
}
// Symlink implements the fs.NodeSymlinker interface for TLF.
func (tlf *TLF) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (
fs.Node, error) {
dir, err := tlf.loadDir(ctx)
if err != nil {
return nil, err
}
return dir.Symlink(ctx, req)
}
// Rename implements the fs.NodeRenamer interface for TLF.
func (tlf *TLF) Rename(ctx context.Context, req *fuse.RenameRequest,
newDir fs.Node) error {
dir, err := tlf.loadDir(ctx)
if err != nil {
return err
}
return dir.Rename(ctx, req, newDir)
}
// Remove implements the fs.NodeRemover interface for TLF.
func (tlf *TLF) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
dir, err := tlf.loadDir(ctx)
if err != nil {
return err
}
return dir.Remove(ctx, req)
}
// ReadDirAll implements the fs.NodeReadDirAller interface for TLF.
func (tlf *TLF) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
dir, exitEarly, err := tlf.loadDirAllowNonexistent(ctx)
if err != nil || exitEarly {
return nil, err
}
return dir.ReadDirAll(ctx)
}
// Forget kernel reference to this node.
func (tlf *TLF) Forget() {
dir := tlf.getStoredDir()
if dir != nil {
dir.Forget()
}
}
// Setattr implements the fs.NodeSetattrer interface for TLF.
func (tlf *TLF) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
dir, err := tlf.loadDir(ctx)
if err != nil {
return err
}
return dir.Setattr(ctx, req, resp)
}
var _ fs.Handle = (*TLF)(nil)
var _ fs.NodeOpener = (*TLF)(nil)
// Open implements the fs.NodeOpener interface for TLF.
func (tlf *TLF) Open(ctx context.Context, req *fuse.OpenRequest,
resp *fuse.OpenResponse) (fs.Handle, error) {
// Explicitly load the directory when a TLF is opened, because
// some OSX programs like ls have a bug that doesn't report errors
// on a ReadDirAll.
_, _, err := tlf.loadDirAllowNonexistent(ctx)
if err != nil {
return nil, err
}
return tlf, nil
}
| 1 | 16,514 | If we don't have a `dir` yet, we wouldn't need to do a sync right? If so, perhaps we can just `getStoredDir()` like `Attr()`? | keybase-kbfs | go |
@@ -1637,10 +1637,13 @@ privload_mem_is_elf_so_header(byte *mem)
/* libdynamorio should be ET_DYN */
if (elf_hdr->e_type != ET_DYN)
return false;
- /* ARM or X86 */
+ /* ARM or X86 */
+# ifndef DR_HOST_NOT_TARGET
if (elf_hdr->e_machine !=
- IF_X86_ELSE(IF_X64_ELSE(EM_X86_64, EM_386), IF_X64_ELSE(EM_AARCH64, EM_ARM)))
+ IF_HOST_X86_ELSE(IF_HOST_X64_ELSE(EM_X86_64, EM_386),
+ IF_HOST_X64_ELSE(EM_AARCH64, EM_ARM)))
return false;
+# endif
if (elf_hdr->e_ehsize != sizeof(ELF_HEADER_TYPE))
return false;
return true; | 1 | /* *******************************************************************************
* Copyright (c) 2011-2020 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* loader.c: custom private library loader for Linux
*
* original case: i#157
*/
#include "../globals.h"
#include "../module_shared.h"
#include "os_private.h"
#include "../ir/instr.h" /* SEG_GS/SEG_FS */
#include "module.h"
#include "module_private.h"
#include "../heap.h" /* HEAPACCT */
#ifdef LINUX
# include "include/syscall.h"
# include "memquery.h"
# define _GNU_SOURCE 1
# define __USE_GNU 1
# include <link.h> /* struct dl_phdr_info, must be prior to dlfcn.h */
#else
# include <sys/syscall.h>
#endif
#include "tls.h"
#include <dlfcn.h> /* dlsym */
#ifdef LINUX
# include <sys/prctl.h> /* PR_SET_NAME */
#endif
#include <stdlib.h> /* getenv */
#include <dlfcn.h> /* dlopen/dlsym */
#include <unistd.h> /* __environ */
#include <stddef.h> /* offsetof */
extern size_t
wcslen(const wchar_t *str); /* in string.c */
/* Written during initialization only */
/* FIXME: i#460, the path lookup itself is a complicated process,
* so we just list possible common but in-complete paths for now.
*/
#define SYSTEM_LIBRARY_PATH_VAR "LD_LIBRARY_PATH"
static char *ld_library_path = NULL;
static const char *const system_lib_paths[] = {
#ifdef X86
"/lib/tls/i686/cmov",
#endif
"/usr/lib",
"/lib",
"/usr/local/lib", /* Ubuntu: /etc/ld.so.conf.d/libc.conf */
#ifdef ANDROID
"/system/lib",
#endif
#ifndef X64
"/usr/lib32",
"/lib32",
# ifdef X86
"/lib32/tls/i686/cmov",
/* 32-bit Ubuntu */
"/lib/i386-linux-gnu",
"/usr/lib/i386-linux-gnu",
# elif defined(ARM)
"/lib/arm-linux-gnueabihf",
"/usr/lib/arm-linux-gnueabihf",
"/lib/arm-linux-gnueabi",
"/usr/lib/arm-linux-gnueabi",
# endif
#else
/* 64-bit Ubuntu */
# ifdef X86
"/lib64/tls/i686/cmov",
# endif
"/usr/lib64",
"/lib64",
# ifdef X86
"/lib/x86_64-linux-gnu", /* /etc/ld.so.conf.d/x86_64-linux-gnu.conf */
"/usr/lib/x86_64-linux-gnu", /* /etc/ld.so.conf.d/x86_64-linux-gnu.conf */
# elif defined(AARCH64)
"/lib/aarch64-linux-gnu",
"/usr/lib/aarch64-linux-gnu",
# endif
#endif
};
#define NUM_SYSTEM_LIB_PATHS (sizeof(system_lib_paths) / sizeof(system_lib_paths[0]))
#define RPATH_ORIGIN "$ORIGIN"
#define APP_BRK_GAP 64 * 1024 * 1024
static os_privmod_data_t *libdr_opd;
#ifdef LINUX /* XXX i#1285: implement MacOS private loader */
# if defined(INTERNAL) || defined(CLIENT_INTERFACE)
static bool printed_gdb_commands = false;
/* Global so visible in release build gdb */
static char gdb_priv_cmds[4096];
static size_t gdb_priv_cmds_sofar;
# endif
#endif
/* pointing to the I/O data structure in privately loaded libc,
* They are used on exit when we need update file_no.
*/
stdfile_t **privmod_stdout;
stdfile_t **privmod_stderr;
stdfile_t **privmod_stdin;
#define LIBC_STDOUT_NAME "stdout"
#define LIBC_STDERR_NAME "stderr"
#define LIBC_STDIN_NAME "stdin"
/* We save the original sp from the kernel, for use by TLS setup on Android */
void *kernel_init_sp;
/* forward decls */
static void
privload_init_search_paths(void);
static bool
privload_locate(const char *name, privmod_t *dep, char *filename OUT, bool *client OUT);
static privmod_t *
privload_locate_and_load(const char *impname, privmod_t *dependent, bool reachable);
static void
privload_call_lib_func(fp_t func);
static void
privload_relocate_mod(privmod_t *mod);
static void
privload_create_os_privmod_data(privmod_t *privmod, bool dyn_reloc);
static void
privload_delete_os_privmod_data(privmod_t *privmod);
#ifdef LINUX
void
privload_mod_tls_init(privmod_t *mod);
void
privload_mod_tls_primary_thread_init(privmod_t *mod);
#endif
/***************************************************************************/
/* Register a symbol file with gdb. This symbol needs to be exported so that
* gdb can find it even when full debug information is unavailable. We do
* *not* consider it part of DR's public API.
* i#531: gdb support for private loader
*/
DYNAMORIO_EXPORT void
dr_gdb_add_symbol_file(const char *filename, app_pc textaddr)
{
/* Do nothing. If gdb is attached with libdynamorio.so-gdb.py loaded, it
* will stop here and lift the argument values.
*/
/* FIXME: This only passes the text section offset. gdb can accept
* additional "-s<section> <address>" arguments to locate data sections.
* This would be useful for setting watchpoints on client global variables.
*/
}
#ifdef LINUX /* XXX i#1285: implement MacOS private loader */
# if defined(INTERNAL) || defined(CLIENT_INTERFACE)
static void
privload_add_gdb_cmd(elf_loader_t *loader, const char *filename, bool reachable)
{
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
/* Get the text addr to register the ELF with gdb. The section headers
* are not part of the mapped image, so we have to map the whole file.
* XXX: seek to e_shoff and read the section headers to avoid this map.
*/
if (elf_loader_map_file(loader, reachable) != NULL) {
app_pc text_addr =
(app_pc)module_get_text_section(loader->file_map, loader->file_size);
text_addr += loader->load_delta;
print_to_buffer(gdb_priv_cmds, BUFFER_SIZE_ELEMENTS(gdb_priv_cmds),
&gdb_priv_cmds_sofar, "add-symbol-file '%s' %p\n", filename,
text_addr);
/* Add debugging comment about how to get symbol information in gdb. */
if (printed_gdb_commands) {
/* This is a dynamically loaded auxlib, so we print here.
* The client and its direct dependencies are batched up and
* printed in os_loader_init_epilogue.
*/
SYSLOG_INTERNAL_INFO("Paste into GDB to debug DynamoRIO clients:\n"
"add-symbol-file '%s' %p\n",
filename, text_addr);
}
LOG(GLOBAL, LOG_LOADER, 1, "for debugger: add-symbol-file %s %p\n", filename,
text_addr);
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(privload_register_gdb), false)) {
dr_gdb_add_symbol_file(filename, text_addr);
}
}
}
# endif
#endif
/* os specific loader initialization prologue before finalizing the load. */
void
os_loader_init_prologue(void)
{
#ifndef STATIC_LIBRARY
privmod_t *mod;
#endif
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
privload_init_search_paths();
#ifndef STATIC_LIBRARY
/* insert libdynamorio.so */
mod = privload_insert(NULL, get_dynamorio_dll_start(),
get_dynamorio_dll_end() - get_dynamorio_dll_start(),
get_shared_lib_name(get_dynamorio_dll_start()),
get_dynamorio_library_path());
ASSERT(mod != NULL);
/* If DR was loaded by system ld.so, then .dynamic *was* relocated (i#1589) */
privload_create_os_privmod_data(mod, !DYNAMO_OPTION(early_inject));
libdr_opd = (os_privmod_data_t *)mod->os_privmod_data;
DODEBUG({
if (DYNAMO_OPTION(early_inject)) {
/* We've already filled the gap in dynamorio_lib_gap_empty(). We just
* verify here now that we have segment info.
*/
int i;
for (i = 0; i < libdr_opd->os_data.num_segments - 1; i++) {
size_t sz = libdr_opd->os_data.segments[i + 1].start -
libdr_opd->os_data.segments[i].end;
if (sz > 0) {
dr_mem_info_t info;
bool ok = query_memory_ex_from_os(libdr_opd->os_data.segments[i].end,
&info);
ASSERT(ok);
ASSERT(info.base_pc == libdr_opd->os_data.segments[i].end &&
info.size == sz &&
(info.type == DR_MEMTYPE_FREE ||
/* If we reloaded DR, our own loader filled it in. */
info.prot == DR_MEMPROT_NONE));
}
}
}
});
mod->externally_loaded = true;
# if defined(LINUX) /*i#1285*/ && (defined(INTERNAL) || defined(CLIENT_INTERFACE))
if (DYNAMO_OPTION(early_inject)) {
/* libdynamorio isn't visible to gdb so add to the cmd list */
byte *dr_base = get_dynamorio_dll_start(), *pref_base;
elf_loader_t dr_ld;
IF_DEBUG(bool success =)
elf_loader_read_headers(&dr_ld, get_dynamorio_library_path());
ASSERT(success);
module_walk_program_headers(dr_base, get_dynamorio_dll_end() - dr_base, false,
false, (byte **)&pref_base, NULL, NULL, NULL, NULL);
dr_ld.load_delta = dr_base - pref_base;
privload_add_gdb_cmd(&dr_ld, get_dynamorio_library_path(), false /*!reach*/);
elf_loader_destroy(&dr_ld);
}
# endif
#endif
}
/* os specific loader initialization epilogue after finalizing the load. */
void
os_loader_init_epilogue(void)
{
#ifdef LINUX /* XXX i#1285: implement MacOS private loader */
# if defined(INTERNAL) || defined(CLIENT_INTERFACE)
/* Print the add-symbol-file commands so they can be copy-pasted into gdb.
* We have to do it in a single syslog so they can be copy pasted.
* For non-internal builds, or for private libs loaded after this point,
* the user must look at the global gdb_priv_cmds buffer in gdb.
* FIXME i#531: Support attaching from the gdb script.
*/
ASSERT(!printed_gdb_commands);
printed_gdb_commands = true;
if (gdb_priv_cmds_sofar > 0) {
SYSLOG_INTERNAL_INFO("Paste into GDB to debug DynamoRIO clients:\n"
/* Need to turn off confirm for paste to work. */
"set confirm off\n"
"%s",
gdb_priv_cmds);
}
# endif /* INTERNAL || CLIENT_INTERFACE */
#endif
}
void
os_loader_exit(void)
{
if (libdr_opd != NULL) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, libdr_opd->os_data.segments, module_segment_t,
libdr_opd->os_data.alloc_segments, ACCT_OTHER, PROTECTED);
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, libdr_opd, os_privmod_data_t, ACCT_OTHER,
PROTECTED);
}
#if defined(LINUX) && (defined(INTERNAL) || defined(CLIENT_INTERFACE))
/* Put printed_gdb_commands into its original state for potential
* re-attaching and os_loader_init_epilogue().
*/
printed_gdb_commands = false;
#endif
}
/* These are called before loader_init for the primary thread for UNIX. */
void
os_loader_thread_init_prologue(dcontext_t *dcontext)
{
/* do nothing */
}
void
os_loader_thread_init_epilogue(dcontext_t *dcontext)
{
/* do nothing */
}
void
os_loader_thread_exit(dcontext_t *dcontext)
{
/* do nothing */
}
void
privload_add_areas(privmod_t *privmod)
{
os_privmod_data_t *opd;
uint i;
/* create and init the os_privmod_data for privmod.
* The os_privmod_data can only be created after heap is ready and
* should be done before adding in vmvector_add,
* so it can be either right before calling to privload_add_areas
* in the privload_load_finalize, or in here.
* We prefer here because it avoids changing the code in
* loader_shared.c, which affects windows too.
*/
privload_create_os_privmod_data(privmod, false /* i#1589: .dynamic not relocated */);
opd = (os_privmod_data_t *)privmod->os_privmod_data;
for (i = 0; i < opd->os_data.num_segments; i++) {
vmvector_add(modlist_areas, opd->os_data.segments[i].start,
opd->os_data.segments[i].end, (void *)privmod);
}
}
void
privload_remove_areas(privmod_t *privmod)
{
uint i;
os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data;
/* walk the program header to remove areas */
for (i = 0; i < opd->os_data.num_segments; i++) {
vmvector_remove(modlist_areas, opd->os_data.segments[i].start,
opd->os_data.segments[i].end);
}
/* NOTE: we create os_privmod_data in privload_add_areas but
* do not delete here, non-symmetry.
* This is because we still need the information in os_privmod_data
* to unmap the segments in privload_unmap_file, which happens after
* privload_remove_areas.
* The create of os_privmod_data should be done when mapping the file
* into memory, but the heap is not ready at that time, so postponed
* until privload_add_areas.
*/
}
void
privload_unmap_file(privmod_t *privmod)
{
/* walk the program header to unmap files, also the tls data */
uint i;
os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data;
/* unmap segments */
IF_DEBUG(size_t size_unmapped = 0);
for (i = 0; i < opd->os_data.num_segments; i++) {
d_r_unmap_file(opd->os_data.segments[i].start,
opd->os_data.segments[i].end - opd->os_data.segments[i].start);
DODEBUG({
size_unmapped +=
opd->os_data.segments[i].end - opd->os_data.segments[i].start;
});
if (i + 1 < opd->os_data.num_segments &&
opd->os_data.segments[i + 1].start > opd->os_data.segments[i].end) {
/* unmap the gap */
d_r_unmap_file(opd->os_data.segments[i].end,
opd->os_data.segments[i + 1].start -
opd->os_data.segments[i].end);
DODEBUG({
size_unmapped +=
opd->os_data.segments[i + 1].start - opd->os_data.segments[i].end;
});
}
}
ASSERT(size_unmapped == privmod->size);
/* XXX i#3570: Better to store the MODLOAD_SEPARATE_BSS flag but there's no
* simple code path to do it so we check the option.
*/
if (INTERNAL_OPTION(separate_private_bss)) {
/* unmap the extra .bss-separating page */
d_r_unmap_file(privmod->base + privmod->size, PAGE_SIZE);
DODEBUG({ size_unmapped += PAGE_SIZE; });
}
/* free segments */
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, opd->os_data.segments, module_segment_t,
opd->os_data.alloc_segments, ACCT_OTHER, PROTECTED);
/* delete os_privmod_data */
privload_delete_os_privmod_data(privmod);
}
bool
privload_unload_imports(privmod_t *privmod)
{
/* FIXME: i#474 unload dependent libraries if necessary */
return true;
}
#ifdef LINUX
/* Core-specific functionality for elf_loader_map_phdrs(). */
static modload_flags_t
privload_map_flags(modload_flags_t init_flags)
{
/* XXX: Keep this condition matching the check in privload_unmap_file()
* (minus MODLOAD_NOT_PRIVLIB since non-privlibs don't reach our unmap).
*/
if (INTERNAL_OPTION(separate_private_bss) && !TEST(MODLOAD_NOT_PRIVLIB, init_flags)) {
/* place an extra no-access page after .bss */
/* XXX: update privload_early_inject call to init_emulated_brk if this changes */
/* XXX: should we avoid this for -early_inject's map of the app and ld.so? */
return init_flags | MODLOAD_SEPARATE_BSS;
}
return init_flags;
}
/* Core-specific functionality for elf_loader_map_phdrs(). */
static void
privload_check_new_map_bounds(elf_loader_t *elf, byte *map_base, byte *map_end)
{
/* This is only called for MAP_FIXED. */
if (get_dynamorio_dll_start() < map_end && get_dynamorio_dll_end() > map_base) {
FATAL_USAGE_ERROR(FIXED_MAP_OVERLAPS_DR, 3, get_application_name(),
get_application_pid(), elf->filename);
ASSERT_NOT_REACHED();
}
}
#endif
/* This only maps, as relocation for ELF requires processing imports first,
* which we have to delay at init time at least.
*/
app_pc
privload_map_and_relocate(const char *filename, size_t *size OUT, modload_flags_t flags)
{
#ifdef LINUX
map_fn_t map_func;
unmap_fn_t unmap_func;
prot_fn_t prot_func;
app_pc base = NULL;
elf_loader_t loader;
ASSERT_OWN_RECURSIVE_LOCK(!TEST(MODLOAD_NOT_PRIVLIB, flags), &privload_lock);
/* get appropriate function */
/* NOTE: all but the client lib will be added to DR areas list b/c using
* d_r_map_file()
*/
if (dynamo_heap_initialized && !standalone_library) {
map_func = d_r_map_file;
unmap_func = d_r_unmap_file;
prot_func = set_protection;
} else {
map_func = os_map_file;
unmap_func = os_unmap_file;
prot_func = os_set_protection;
}
if (!elf_loader_read_headers(&loader, filename)) {
/* We may want to move the bitwidth check out if is_elf_so_header_common()
* but for now we keep that there and do another check here.
* If loader.buf was not read into it will be all zeroes.
*/
ELF_HEADER_TYPE *elf_header = (ELF_HEADER_TYPE *)loader.buf;
ELF_ALTARCH_HEADER_TYPE *altarch = (ELF_ALTARCH_HEADER_TYPE *)elf_header;
if (!TEST(MODLOAD_NOT_PRIVLIB, flags) && elf_header->e_version == 1 &&
altarch->e_ehsize == sizeof(ELF_ALTARCH_HEADER_TYPE) &&
altarch->e_machine ==
IF_X64_ELSE(IF_AARCHXX_ELSE(EM_ARM, EM_386),
IF_AARCHXX_ELSE(EM_AARCH64, EM_X86_64))) {
/* XXX i#147: Should we try some path substs like s/lib32/lib64/?
* Maybe it's better to error out to avoid loading some unintended lib.
*/
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_WRONG_BITWIDTH, 3, get_application_name(),
get_application_pid(), filename);
}
return NULL;
}
base =
elf_loader_map_phdrs(&loader, false /* fixed */, map_func, unmap_func, prot_func,
privload_check_new_map_bounds, privload_map_flags(flags));
if (base != NULL) {
if (size != NULL)
*size = loader.image_size;
# if defined(INTERNAL) || defined(CLIENT_INTERFACE)
if (!TEST(MODLOAD_NOT_PRIVLIB, flags))
privload_add_gdb_cmd(&loader, filename, TEST(MODLOAD_REACHABLE, flags));
# endif
}
elf_loader_destroy(&loader);
return base;
#else
/* XXX i#1285: implement MacOS private loader */
return NULL;
#endif
}
bool
privload_process_imports(privmod_t *mod)
{
#ifdef LINUX
ELF_DYNAMIC_ENTRY_TYPE *dyn;
os_privmod_data_t *opd;
char *strtab, *name;
opd = (os_privmod_data_t *)mod->os_privmod_data;
ASSERT(opd != NULL);
/* 1. get DYNAMIC section pointer */
dyn = (ELF_DYNAMIC_ENTRY_TYPE *)opd->dyn;
/* 2. get dynamic string table */
strtab = (char *)opd->os_data.dynstr;
/* 3. depth-first recursive load, so add into the deps list first */
while (dyn->d_tag != DT_NULL) {
if (dyn->d_tag == DT_NEEDED) {
name = strtab + dyn->d_un.d_val;
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s imports from %s\n", __FUNCTION__,
mod->name, name);
if (privload_lookup(name) == NULL) {
privmod_t *impmod =
privload_locate_and_load(name, mod, false /*client dir=>true*/);
if (impmod == NULL)
return false;
# ifdef CLIENT_INTERFACE
/* i#852: identify all libs that import from DR as client libs.
* XXX: this code seems stale as libdynamorio.so is already loaded
* (xref #3850).
*/
if (impmod->base == get_dynamorio_dll_start())
mod->is_client = true;
# endif
}
}
++dyn;
}
/* Relocate library's symbols after load dependent libraries (so that we
* can resolve symbols in the global ELF namespace).
*/
if (!mod->externally_loaded) {
privload_relocate_mod(mod);
}
return true;
#else
/* XXX i#1285: implement MacOS private loader */
if (!mod->externally_loaded) {
privload_relocate_mod(mod);
}
return false;
#endif
}
bool
privload_call_entry(dcontext_t *dcontext, privmod_t *privmod, uint reason)
{
os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data;
ASSERT(os_get_priv_tls_base(NULL, TLS_REG_LIB) != NULL);
if (reason == DLL_PROCESS_INIT) {
/* calls init and init array */
LOG(GLOBAL, LOG_LOADER, 3, "%s: calling init routines of %s\n", __FUNCTION__,
privmod->name);
if (opd->init != NULL) {
LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s init func " PFX "\n", __FUNCTION__,
privmod->name, opd->init);
privload_call_lib_func(opd->init);
}
if (opd->init_array != NULL) {
uint i;
for (i = 0; i < opd->init_arraysz / sizeof(opd->init_array[i]); i++) {
if (opd->init_array[i] != NULL) { /* be paranoid */
LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s init array func " PFX "\n",
__FUNCTION__, privmod->name, opd->init_array[i]);
privload_call_lib_func(opd->init_array[i]);
}
}
}
return true;
} else if (reason == DLL_PROCESS_EXIT) {
/* calls fini and fini array */
#ifdef ANDROID
/* i#1701: libdl.so fini routines call into libc somehow, which is
* often already unmapped. We just skip them as a workaround.
*/
if (strcmp(privmod->name, "libdl.so") == 0) {
LOG(GLOBAL, LOG_LOADER, 3, "%s: NOT calling fini routines of %s\n",
__FUNCTION__, privmod->name);
return true;
}
#endif
LOG(GLOBAL, LOG_LOADER, 3, "%s: calling fini routines of %s\n", __FUNCTION__,
privmod->name);
if (opd->fini != NULL) {
LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s fini func " PFX "\n", __FUNCTION__,
privmod->name, opd->fini);
privload_call_lib_func(opd->fini);
}
if (opd->fini_array != NULL) {
uint i;
for (i = 0; i < opd->fini_arraysz / sizeof(opd->fini_array[0]); i++) {
if (opd->fini_array[i] != NULL) { /* be paranoid */
LOG(GLOBAL, LOG_LOADER, 4, "%s: calling %s fini array func " PFX "\n",
__FUNCTION__, privmod->name, opd->fini_array[i]);
privload_call_lib_func(opd->fini_array[i]);
}
}
}
return true;
}
return false;
}
void
privload_redirect_setup(privmod_t *privmod)
{
/* do nothing, the redirection is done when relocating */
}
void
privload_os_finalize(privmod_t *privmod_t)
{
/* nothing */
}
static void
privload_init_search_paths(void)
{
privload_add_drext_path();
ld_library_path = getenv(SYSTEM_LIBRARY_PATH_VAR);
}
static privmod_t *
privload_locate_and_load(const char *impname, privmod_t *dependent, bool reachable)
{
char filename[MAXIMUM_PATH];
if (privload_locate(impname, dependent, filename, &reachable))
return privload_load(filename, dependent, reachable);
return NULL;
}
app_pc
privload_load_private_library(const char *name, bool reachable)
{
privmod_t *newmod;
app_pc res = NULL;
acquire_recursive_lock(&privload_lock);
newmod = privload_lookup(name);
if (newmod == NULL)
newmod = privload_locate_and_load(name, NULL, reachable);
else
newmod->ref_count++;
if (newmod != NULL)
res = newmod->base;
release_recursive_lock(&privload_lock);
return res;
}
void
privload_load_finalized(privmod_t *mod)
{
/* nothing further to do */
}
/* If runpath, then DT_RUNPATH is searched; else, DT_RPATH. */
static bool
privload_search_rpath(privmod_t *mod, bool runpath, const char *name,
char *filename OUT /* buffer size is MAXIMUM_PATH */)
{
#ifdef LINUX
os_privmod_data_t *opd;
ELF_DYNAMIC_ENTRY_TYPE *dyn;
ASSERT(mod != NULL && "can't look for rpath without a dependent module");
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
/* get the loading module's dir for RPATH_ORIGIN */
opd = (os_privmod_data_t *)mod->os_privmod_data;
/* i#460: if DT_RUNPATH exists we must ignore ignore DT_RPATH and
* search DT_RUNPATH after LD_LIBRARY_PATH.
*/
if (!runpath && opd->os_data.has_runpath)
return false;
const char *moddir_end = strrchr(mod->path, '/');
size_t moddir_len = (moddir_end == NULL ? strlen(mod->path) : moddir_end - mod->path);
const char *strtab;
ASSERT(opd != NULL);
dyn = (ELF_DYNAMIC_ENTRY_TYPE *)opd->dyn;
strtab = (char *)opd->os_data.dynstr;
bool lib_found = false;
/* support $ORIGIN expansion to lib's current directory */
while (dyn->d_tag != DT_NULL) {
if (dyn->d_tag == (runpath ? DT_RUNPATH : DT_RPATH)) {
/* DT_RPATH and DT_RUNPATH are each a colon-separated list of paths */
const char *list = strtab + dyn->d_un.d_val;
const char *sep, *origin;
size_t len;
while (*list != '\0') {
/* really we want strchrnul() */
sep = strchr(list, ':');
if (sep == NULL)
len = strlen(list);
else
len = sep - list;
/* support $ORIGIN expansion to lib's current directory */
origin = strstr(list, RPATH_ORIGIN);
char path[MAXIMUM_PATH];
if (origin != NULL && origin < list + len) {
size_t pre_len = origin - list;
snprintf(path, BUFFER_SIZE_ELEMENTS(path), "%.*s%.*s%.*s", pre_len,
list, moddir_len, mod->path,
/* the '/' should already be here */
len - strlen(RPATH_ORIGIN) - pre_len,
origin + strlen(RPATH_ORIGIN));
NULL_TERMINATE_BUFFER(path);
} else {
snprintf(path, BUFFER_SIZE_ELEMENTS(path), "%.*s", len, list);
NULL_TERMINATE_BUFFER(path);
}
# ifdef CLIENT_INTERFACE
if (mod->is_client) {
/* We are adding a client's lib rpath to the general search path. This
* is not bullet proof compliant with what the loader should really
* do. The real problem is that the loader is walking library
* dependencies depth-first, while it should really search
* breadth-first (xref i#3850). This can lead to libraries being
* unlocatable, if the original client library had the proper rpath of
* the library, but a dependency later in the chain did not. In order
* to avoid this, we consider adding the rpath here relatively safe.
* It only affects dependent libraries of the same name in different
* locations. We are only doing this for client libraries, so we are
* not at risk to search for the wrong system libraries.
*/
if (!privload_search_path_exists(path, strlen(path))) {
snprintf(search_paths[search_paths_idx],
BUFFER_SIZE_ELEMENTS(search_paths[search_paths_idx]),
"%.*s", strlen(path), path);
NULL_TERMINATE_BUFFER(search_paths[search_paths_idx]);
LOG(GLOBAL, LOG_LOADER, 1, "%s: added search dir \"%s\"\n",
__FUNCTION__, search_paths[search_paths_idx]);
search_paths_idx++;
}
}
# endif
if (!lib_found) {
snprintf(filename, MAXIMUM_PATH, "%s/%s", path, name);
filename[MAXIMUM_PATH - 1] = 0;
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__,
filename);
if (os_file_exists(filename, false /*!is_dir*/) &&
module_file_has_module_header(filename)) {
# ifdef CLIENT_INTERFACE
lib_found = true;
# else
return true;
# endif
}
}
list += len;
if (sep != NULL)
list += 1;
}
}
++dyn;
}
return lib_found;
#else
/* XXX i#1285: implement MacOS private loader */
#endif
return false;
}
static bool
privload_locate(const char *name, privmod_t *dep,
char *filename OUT /* buffer size is MAXIMUM_PATH */,
bool *reachable INOUT)
{
uint i;
char *lib_paths;
/* We may be passed a full path. */
if (name[0] == '/' && os_file_exists(name, false /*!is_dir*/)) {
snprintf(filename, MAXIMUM_PATH, "%s", name);
filename[MAXIMUM_PATH - 1] = 0;
return true;
}
/* FIXME: We have a simple implementation of library search.
* libc implementation can be found at elf/dl-load.c:_dl_map_object.
*/
/* the loader search order: */
/* 0) DT_RPATH */
if (dep != NULL && privload_search_rpath(dep, false /*rpath*/, name, filename))
return true;
/* 1) client lib dir */
for (i = 0; i < search_paths_idx; i++) {
snprintf(filename, MAXIMUM_PATH, "%s/%s", search_paths[i], name);
/* NULL_TERMINATE_BUFFER(filename) */
filename[MAXIMUM_PATH - 1] = 0;
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename);
if (os_file_exists(filename, false /*!is_dir*/) &&
module_file_has_module_header(filename)) {
/* If in client or extension dir, always map it reachable */
*reachable = true;
return true;
}
}
/* 2) curpath */
snprintf(filename, MAXIMUM_PATH, "./%s", name);
/* NULL_TERMINATE_BUFFER(filename) */
filename[MAXIMUM_PATH - 1] = 0;
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename);
if (os_file_exists(filename, false /*!is_dir*/) &&
module_file_has_module_header(filename))
return true;
/* 3) LD_LIBRARY_PATH */
lib_paths = ld_library_path;
while (lib_paths != NULL) {
char *end = strstr(lib_paths, ":");
if (end != NULL)
*end = '\0';
snprintf(filename, MAXIMUM_PATH, "%s/%s", lib_paths, name);
if (end != NULL) {
*end = ':';
end++;
}
/* NULL_TERMINATE_BUFFER(filename) */
filename[MAXIMUM_PATH - 1] = 0;
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename);
if (os_file_exists(filename, false /*!is_dir*/) &&
module_file_has_module_header(filename))
return true;
lib_paths = end;
}
/* 4) DT_RUNPATH */
if (dep != NULL && privload_search_rpath(dep, true /*runpath*/, name, filename))
return true;
/* 5) FIXME: i#460, we use our system paths instead of /etc/ld.so.cache. */
for (i = 0; i < NUM_SYSTEM_LIB_PATHS; i++) {
snprintf(filename, MAXIMUM_PATH, "%s/%s", system_lib_paths[i], name);
/* NULL_TERMINATE_BUFFER(filename) */
filename[MAXIMUM_PATH - 1] = 0;
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, filename);
if (os_file_exists(filename, false /*!is_dir*/) &&
module_file_has_module_header(filename))
return true;
}
/* Cannot find the library */
/* There's a syslog in loader_init() but we want to provide the lib name */
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4, get_application_name(),
get_application_pid(), name,
"\n\tUnable to locate library! Try adding path to LD_LIBRARY_PATH");
return false;
}
#pragma weak dlsym
app_pc
get_private_library_address(app_pc modbase, const char *name)
{
#ifdef LINUX
privmod_t *mod;
app_pc res;
acquire_recursive_lock(&privload_lock);
mod = privload_lookup_by_base(modbase);
if (mod == NULL || mod->externally_loaded) {
release_recursive_lock(&privload_lock);
# ifdef STATIC_LIBRARY
/* externally loaded, use dlsym instead */
ASSERT(!DYNAMO_OPTION(early_inject));
return dlsym(modbase, name);
# else
/* Only libdynamorio.so is externally_loaded and we should not be querying
* for it. Unknown libs shouldn't be queried here: get_proc_address should
* be used instead.
*/
ASSERT_NOT_REACHED();
return NULL;
# endif
}
/* Before the heap is initialized, we store the text address in opd, so we
* can't check if opd != NULL to know whether it's valid.
*/
if (dynamo_heap_initialized) {
/* opd is initialized */
os_privmod_data_t *opd = (os_privmod_data_t *)mod->os_privmod_data;
res = get_proc_address_from_os_data(&opd->os_data, opd->load_delta, name, NULL);
release_recursive_lock(&privload_lock);
return res;
} else {
/* opd is not initialized */
/* get_private_library_address is first called on looking up
* USES_DR_VERSION_NAME right after loading client_lib,
* The os_privmod_data is not setup yet then because the heap
* is not initialized, so it is possible opd to be NULL.
* For this case, we have to compute the temporary os_data instead.
*/
ptr_int_t delta;
char *soname;
os_module_data_t os_data;
memset(&os_data, 0, sizeof(os_data));
if (!module_read_os_data(mod->base, false /* .dynamic not relocated (i#1589) */,
&delta, &os_data, &soname)) {
release_recursive_lock(&privload_lock);
return NULL;
}
res = get_proc_address_from_os_data(&os_data, delta, name, NULL);
release_recursive_lock(&privload_lock);
return res;
}
ASSERT_NOT_REACHED();
#else
/* XXX i#1285: implement MacOS private loader */
#endif
return NULL;
}
static void
privload_call_lib_func(fp_t func)
{
char dummy_str[] = "dummy";
char *dummy_argv[2];
/* FIXME: i#475
* The regular loader always passes argc, argv and env to libaries,
* (see libc code elf/dl-init.c), which might be ignored by those
* routines.
* we create dummy argc and argv, and passed with the real __environ.
*/
dummy_argv[0] = dummy_str;
dummy_argv[1] = NULL;
func(1, dummy_argv, our_environ);
}
bool
get_private_library_bounds(IN app_pc modbase, OUT byte **start, OUT byte **end)
{
privmod_t *mod;
bool found = false;
ASSERT(start != NULL && end != NULL);
acquire_recursive_lock(&privload_lock);
mod = privload_lookup_by_base(modbase);
if (mod != NULL) {
*start = mod->base;
*end = mod->base + mod->size;
found = true;
}
release_recursive_lock(&privload_lock);
return found;
}
#ifdef LINUX
# if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY)
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* If we fail to relocate dynamorio, print the error msg and abort. */
static void
privload_report_relocate_error()
{
/* The problem is that we can't call any normal routines here, or
* even reference global vars like string literals. We thus use
* a char array:
*/
const char aslr_msg[] = { 'E', 'R', 'R', 'O', 'R', ':', ' ', 'f', 'a', 'i', 'l', 'e',
'd', ' ', 't', 'o', ' ', 'r', 'e', 'l', 'o', 'c', 'a', 't',
'e', ' ', 'D', 'y', 'n', 'a', 'm', 'o', 'R', 'I', 'O', '!',
'\n', 'P', 'l', 'e', 'a', 's', 'e', ' ', 'f', 'i', 'l', 'e',
' ', 'a', 'n', ' ', 'i', 's', 's', 'u', 'e', ' ', 'a', 't',
' ', 'h', 't', 't', 'p', ':', '/', '/', 'd', 'y', 'n', 'a',
'm', 'o', 'r', 'i', 'o', '.', 'o', 'r', 'g', '/', 'i', 's',
's', 'u', 'e', 's', '.', '\n' };
# define STDERR_FD 2
os_write(STDERR_FD, aslr_msg, sizeof(aslr_msg));
dynamorio_syscall(SYS_exit_group, 1, -1);
}
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* This routine is duplicated from module_relocate_symbol and simplified
* for only relocating dynamorio symbols.
*/
static void
privload_relocate_symbol(ELF_REL_TYPE *rel, os_privmod_data_t *opd, bool is_rela)
{
ELF_ADDR *r_addr;
uint r_type;
reg_t addend;
/* XXX: we assume ELF_REL_TYPE and ELF_RELA_TYPE only differ at the end,
* i.e. with or without r_addend.
*/
if (is_rela)
addend = ((ELF_RELA_TYPE *)rel)->r_addend;
else
addend = 0;
/* assume everything is read/writable */
r_addr = (ELF_ADDR *)(rel->r_offset + opd->load_delta);
r_type = (uint)ELF_R_TYPE(rel->r_info);
/* handle the most common case, i.e. ELF_R_RELATIVE */
if (r_type == ELF_R_RELATIVE) {
if (is_rela)
*r_addr = addend + opd->load_delta;
else
*r_addr += opd->load_delta;
return;
} else if (r_type == ELF_R_NONE)
return;
/* XXX i#1708: support more relocation types in bootstrap stage */
privload_report_relocate_error();
}
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* This routine is duplicated from module_relocate_rel for relocating dynamorio. */
static void
privload_relocate_rel(os_privmod_data_t *opd, ELF_REL_TYPE *start, ELF_REL_TYPE *end)
{
ELF_REL_TYPE *rel;
for (rel = start; rel < end; rel++)
privload_relocate_symbol(rel, opd, false);
}
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* This routine is duplicated from module_relocate_rela for relocating dynamorio. */
static void
privload_relocate_rela(os_privmod_data_t *opd, ELF_RELA_TYPE *start, ELF_RELA_TYPE *end)
{
ELF_RELA_TYPE *rela;
for (rela = start; rela < end; rela++)
privload_relocate_symbol((ELF_REL_TYPE *)rela, opd, true);
}
/* XXX: This routine may be called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* This routine is duplicated from privload_relocate_os_privmod_data */
static void
privload_early_relocate_os_privmod_data(os_privmod_data_t *opd, byte *mod_base)
{
if (opd->rel != NULL)
privload_relocate_rel(opd, opd->rel, opd->rel + opd->relsz / opd->relent);
if (opd->rela != NULL)
privload_relocate_rela(opd, opd->rela, opd->rela + opd->relasz / opd->relaent);
if (opd->jmprel != NULL) {
if (opd->pltrel == DT_REL) {
privload_relocate_rel(opd, (ELF_REL_TYPE *)opd->jmprel,
(ELF_REL_TYPE *)(opd->jmprel + opd->pltrelsz));
} else if (opd->pltrel == DT_RELA) {
privload_relocate_rela(opd, (ELF_RELA_TYPE *)opd->jmprel,
(ELF_RELA_TYPE *)(opd->jmprel + opd->pltrelsz));
} else {
privload_report_relocate_error();
}
}
}
# endif /* !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) */
/* This routine is duplicated at privload_early_relocate_os_privmod_data. */
static void
privload_relocate_os_privmod_data(os_privmod_data_t *opd, byte *mod_base)
{
if (opd->rel != NULL) {
module_relocate_rel(mod_base, opd, opd->rel, opd->rel + opd->relsz / opd->relent);
}
if (opd->rela != NULL) {
module_relocate_rela(mod_base, opd, opd->rela,
opd->rela + opd->relasz / opd->relaent);
}
if (opd->jmprel != NULL) {
if (opd->pltrel == DT_REL) {
module_relocate_rel(mod_base, opd, (ELF_REL_TYPE *)opd->jmprel,
(ELF_REL_TYPE *)(opd->jmprel + opd->pltrelsz));
} else if (opd->pltrel == DT_RELA) {
module_relocate_rela(mod_base, opd, (ELF_RELA_TYPE *)opd->jmprel,
(ELF_RELA_TYPE *)(opd->jmprel + opd->pltrelsz));
} else {
ASSERT(false);
}
}
}
#endif /* LINUX */
static void
privload_relocate_mod(privmod_t *mod)
{
#ifdef LINUX
os_privmod_data_t *opd = (os_privmod_data_t *)mod->os_privmod_data;
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
LOG(GLOBAL, LOG_LOADER, 3, "relocating %s\n", mod->name);
/* If module has tls block need update its tls offset value.
* This must be done *before* relocating as relocating needs the os_privmod_data_t
* TLS fields set here.
*/
if (opd->tls_block_size != 0)
privload_mod_tls_init(mod);
privload_relocate_os_privmod_data(opd, mod->base);
/* For the primary thread, we now perform TLS block copying, after relocating.
* For subsequent threads this is done in privload_tls_init().
*/
if (opd->tls_block_size != 0)
privload_mod_tls_primary_thread_init(mod);
/* special handling on I/O file */
if (strstr(mod->name, "libc.so") == mod->name) {
privmod_stdout = (FILE **)get_proc_address_from_os_data(
&opd->os_data, opd->load_delta, LIBC_STDOUT_NAME, NULL);
privmod_stdin = (FILE **)get_proc_address_from_os_data(
&opd->os_data, opd->load_delta, LIBC_STDIN_NAME, NULL);
privmod_stderr = (FILE **)get_proc_address_from_os_data(
&opd->os_data, opd->load_delta, LIBC_STDERR_NAME, NULL);
}
#else
/* XXX i#1285: implement MacOS private loader */
#endif
}
static void
privload_create_os_privmod_data(privmod_t *privmod, bool dyn_reloc)
{
os_privmod_data_t *opd;
opd = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, os_privmod_data_t, ACCT_OTHER, PROTECTED);
privmod->os_privmod_data = opd;
memset(opd, 0, sizeof(*opd));
/* walk the module's program header to get privmod information */
module_walk_program_headers(privmod->base, privmod->size,
false, /* segments are remapped */
dyn_reloc, &opd->os_data.base_address, NULL,
&opd->max_end, &opd->soname, &opd->os_data);
module_get_os_privmod_data(privmod->base, privmod->size, false /*!relocated*/, opd);
}
static void
privload_delete_os_privmod_data(privmod_t *privmod)
{
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, privmod->os_privmod_data, os_privmod_data_t,
ACCT_OTHER, PROTECTED);
privmod->os_privmod_data = NULL;
}
/* i#1589: the client lib is already on the priv lib list, so we share its
* data with loaded_module_areas (which also avoids problems with .dynamic
* not being relocated for priv libs).
*/
bool
privload_fill_os_module_info(app_pc base, OUT app_pc *out_base /* relative pc */,
OUT app_pc *out_max_end /* relative pc */,
OUT char **out_soname, OUT os_module_data_t *out_data)
{
bool res = false;
privmod_t *privmod;
acquire_recursive_lock(&privload_lock);
privmod = privload_lookup_by_base(base);
if (privmod != NULL) {
os_privmod_data_t *opd = (os_privmod_data_t *)privmod->os_privmod_data;
if (out_base != NULL)
*out_base = opd->os_data.base_address;
if (out_max_end != NULL)
*out_max_end = opd->max_end;
if (out_soname != NULL)
*out_soname = opd->soname;
if (out_data != NULL)
module_copy_os_data(out_data, &opd->os_data);
res = true;
}
release_recursive_lock(&privload_lock);
return res;
}
/****************************************************************************
* Function Redirection
*/
#if defined(LINUX) && !defined(ANDROID)
/* These are not yet supported by Android's Bionic */
void *
redirect___tls_get_addr();
void *
redirect____tls_get_addr();
#endif
#ifdef LINUX
static int
redirect_dl_iterate_phdr(int (*callback)(struct dl_phdr_info *info, size_t size,
void *data),
void *data)
{
int res = 0;
struct dl_phdr_info info;
privmod_t *mod;
acquire_recursive_lock(&privload_lock);
for (mod = privload_first_module(); mod != NULL; mod = privload_next_module(mod)) {
ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)mod->base;
os_privmod_data_t *opd = (os_privmod_data_t *)mod->os_privmod_data;
/* We do want to include externally loaded (if any) and clients as
* clients can contain C++ exception code, which will call here.
*/
if (mod->base == get_dynamorio_dll_start())
continue;
info.dlpi_addr = opd->load_delta;
info.dlpi_name = mod->name;
info.dlpi_phdr = (ELF_PROGRAM_HEADER_TYPE *)(mod->base + elf_hdr->e_phoff);
info.dlpi_phnum = elf_hdr->e_phnum;
res = callback(&info, sizeof(info), data);
if (res != 0)
break;
}
release_recursive_lock(&privload_lock);
return res;
}
# if defined(ARM) && !defined(ANDROID)
typedef struct _unwind_callback_data_t {
void *pc;
void *base;
int size;
} unwind_callback_data_t;
/* Find the exception unwind table (exidx) of the image that contains the
* exception pc.
*/
int
exidx_lookup_callback(struct dl_phdr_info *info, size_t size, void *data)
{
int i;
int res = 0;
unwind_callback_data_t *ucd;
if (data == NULL || size != sizeof(*info))
return res;
ucd = (unwind_callback_data_t *)data;
for (i = 0; i < info->dlpi_phnum; i++) {
/* look for the table */
if (info->dlpi_phdr[i].p_type == PT_ARM_EXIDX) {
/* the location and size of the table for the image */
ucd->base = (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
ucd->size = info->dlpi_phdr[i].p_memsz;
}
/* look for the segment */
if (res == 0 && info->dlpi_phdr[i].p_type == PT_LOAD) {
if (ucd->pc >= (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr) &&
ucd->pc < (void *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr +
info->dlpi_phdr[i].p_memsz)) {
res = 1;
}
}
}
return res;
}
/* find the exception unwind table that contains the PC during an exception */
void *
redirect___gnu_Unwind_Find_exidx(void *pc, int *count)
{
unwind_callback_data_t ucd;
memset(&ucd, 0, sizeof(ucd));
ucd.pc = pc;
if (redirect_dl_iterate_phdr(exidx_lookup_callback, &ucd) <= 0)
return NULL;
if (count != NULL)
*count = ucd.size / 8 /* exidx table entry size */;
return ucd.base;
}
# endif /* ARM && !ANDROID */
#endif /* LINUX */
typedef struct _redirect_import_t {
const char *name;
app_pc func;
} redirect_import_t;
static const redirect_import_t redirect_imports[] = {
{ "calloc", (app_pc)redirect_calloc },
{ "malloc", (app_pc)redirect_malloc },
{ "free", (app_pc)redirect_free },
{ "realloc", (app_pc)redirect_realloc },
{ "strdup", (app_pc)redirect_strdup },
/* TODO i#4243: we should also redirect functions including:
* + malloc_usable_size, memalign, valloc, mallinfo, mallopt, etc.
* + tcmalloc: tc_malloc, tc_free, etc.
* + __libc_malloc, __libc_free, etc.
* + OSX: malloc_zone_malloc, etc.? Or just malloc_create_zone?
* + C++ operators in case they don't just call libc malloc?
*/
#if defined(LINUX) && !defined(ANDROID)
{ "__tls_get_addr", (app_pc)redirect___tls_get_addr },
{ "___tls_get_addr", (app_pc)redirect____tls_get_addr },
#endif
#ifdef LINUX
/* i#1717: C++ exceptions call this */
{ "dl_iterate_phdr", (app_pc)redirect_dl_iterate_phdr },
# if defined(ARM) && !defined(ANDROID)
/* i#1717: C++ exceptions call this on ARM Linux */
{ "__gnu_Unwind_Find_exidx", (app_pc)redirect___gnu_Unwind_Find_exidx },
# endif
#endif
/* We need these for clients that don't use libc (i#1747) */
{ "strlen", (app_pc)strlen },
{ "wcslen", (app_pc)wcslen },
{ "strchr", (app_pc)strchr },
{ "strrchr", (app_pc)strrchr },
{ "strncpy", (app_pc)strncpy },
{ "memcpy", (app_pc)memcpy },
{ "memset", (app_pc)memset },
{ "memmove", (app_pc)memmove },
{ "strncat", (app_pc)strncat },
{ "strcmp", (app_pc)strcmp },
{ "strncmp", (app_pc)strncmp },
{ "memcmp", (app_pc)memcmp },
{ "strstr", (app_pc)strstr },
{ "strcasecmp", (app_pc)strcasecmp },
/* Also redirect the _chk versions (i#1747, i#46) */
{ "memcpy_chk", (app_pc)memcpy },
{ "memset_chk", (app_pc)memset },
{ "memmove_chk", (app_pc)memmove },
{ "strncpy_chk", (app_pc)strncpy },
};
#define REDIRECT_IMPORTS_NUM (sizeof(redirect_imports) / sizeof(redirect_imports[0]))
#ifdef DEBUG
static const redirect_import_t redirect_debug_imports[] = {
{ "calloc", (app_pc)redirect_calloc_initonly },
{ "malloc", (app_pc)redirect_malloc_initonly },
{ "free", (app_pc)redirect_free_initonly },
{ "realloc", (app_pc)redirect_realloc_initonly },
{ "strdup", (app_pc)redirect_strdup_initonly },
};
# define REDIRECT_DEBUG_IMPORTS_NUM \
(sizeof(redirect_debug_imports) / sizeof(redirect_debug_imports[0]))
#endif
bool
privload_redirect_sym(ptr_uint_t *r_addr, const char *name)
{
int i;
/* iterate over all symbols and redirect syms when necessary, e.g. malloc */
#ifdef DEBUG
if (disallow_unsafe_static_calls) {
for (i = 0; i < REDIRECT_DEBUG_IMPORTS_NUM; i++) {
if (strcmp(redirect_debug_imports[i].name, name) == 0) {
*r_addr = (ptr_uint_t)redirect_debug_imports[i].func;
return true;
;
}
}
}
#endif
for (i = 0; i < REDIRECT_IMPORTS_NUM; i++) {
if (strcmp(redirect_imports[i].name, name) == 0) {
*r_addr = (ptr_uint_t)redirect_imports[i].func;
return true;
;
}
}
return false;
}
/***************************************************************************
* DynamoRIO Early Injection Code
*/
#ifdef LINUX
# if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY)
/* Find the auxiliary vector and adjust it to look as if the kernel had set up
* the stack for the ELF mapped at map. The auxiliary vector starts after the
* terminating NULL pointer in the envp array.
*/
static void
privload_setup_auxv(char **envp, app_pc map, ptr_int_t delta, app_pc interp_map,
const char *exe_path /*must be persistent*/)
{
ELF_AUXV_TYPE *auxv;
ELF_HEADER_TYPE *elf = (ELF_HEADER_TYPE *)map;
/* The aux vector is after the last environment pointer. */
while (*envp != NULL)
envp++;
auxv = (ELF_AUXV_TYPE *)(envp + 1);
/* fix up the auxv entries that refer to the executable */
for (; auxv->a_type != AT_NULL; auxv++) {
/* the actual addr should be: (base + offs) or (v_addr + delta) */
switch (auxv->a_type) {
case AT_ENTRY:
auxv->a_un.a_val = (ptr_int_t)elf->e_entry + delta;
LOG(GLOBAL, LOG_LOADER, 2, "AT_ENTRY: " PFX "\n", auxv->a_un.a_val);
break;
case AT_PHDR:
auxv->a_un.a_val = (ptr_int_t)map + elf->e_phoff;
LOG(GLOBAL, LOG_LOADER, 2, "AT_PHDR: " PFX "\n", auxv->a_un.a_val);
break;
case AT_PHENT: auxv->a_un.a_val = (ptr_int_t)elf->e_phentsize; break;
case AT_PHNUM: auxv->a_un.a_val = (ptr_int_t)elf->e_phnum; break;
case AT_BASE: /* Android loader reads this */
auxv->a_un.a_val = (ptr_int_t)interp_map;
LOG(GLOBAL, LOG_LOADER, 2, "AT_BASE: " PFX "\n", auxv->a_un.a_val);
break;
case AT_EXECFN: /* Android loader references this, unclear what for */
auxv->a_un.a_val = (ptr_int_t)exe_path;
LOG(GLOBAL, LOG_LOADER, 2, "AT_EXECFN: " PFX " %s\n", auxv->a_un.a_val,
(char *)auxv->a_un.a_val);
break;
/* The rest of these AT_* values don't seem to be important to the
* loader, but we log them.
*/
case AT_EXECFD:
LOG(GLOBAL, LOG_LOADER, 2, "AT_EXECFD: %d\n", auxv->a_un.a_val);
break;
}
}
}
/* Entry point for ptrace injection. */
static void
takeover_ptrace(ptrace_stack_args_t *args)
{
static char home_var[MAXIMUM_PATH + 6 /*HOME=path\0*/];
static char *fake_envp[] = { home_var, NULL };
/* When we come in via ptrace, we have no idea where the environment
* pointer is. We could use /proc/self/environ to read it or go searching
* near the stack base. However, both are fragile and we don't really need
* the environment for anything except for option passing. In the initial
* ptraced process, we can assume our options are in a config file and not
* the environment, so we just set an environment with HOME.
*/
snprintf(home_var, BUFFER_SIZE_ELEMENTS(home_var), "HOME=%s", args->home_dir);
NULL_TERMINATE_BUFFER(home_var);
dynamorio_set_envp(fake_envp);
dynamorio_app_init();
/* FIXME i#37: takeover other threads */
/* We need to wait until dr_inject_process_run() is called to finish
* takeover, and this is an easy way to stop and return control to the
* injector.
*/
dynamorio_syscall(SYS_kill, 2, get_process_id(), SIGTRAP);
dynamo_start(&args->mc);
}
static void
reserve_brk(app_pc post_app)
{
/* We haven't parsed the options yet, so we rely on drinjectlib
* setting this env var if the user passed -no_emulate_brk:
*/
if (getenv(DYNAMORIO_VAR_NO_EMULATE_BRK) == NULL) {
/* i#1004: we're going to emulate the brk via our own mmap.
* Reserve the initial brk now before any of DR's mmaps to avoid overlap.
*/
dynamo_options.emulate_brk = true; /* not parsed yet */
init_emulated_brk(post_app);
} else {
/* i#1004: as a workaround, reserve some space for sbrk() during early injection
* before initializing DR's heap. With early injection, the program break comes
* somewhere after DR's bss section, subject to some ASLR. When we allocate our
* heap, sometimes we mmap right over the break, so any brk() calls will fail.
* When brk() fails, most malloc() implementations fall back to mmap().
* However, sometimes libc startup code needs to allocate memory before libc is
* initialized. In this case it calls brk(), and will crash if it fails.
*
* Ideally we'd just set the break to follow the app's exe, but the kernel
* forbids setting the break to a value less than the current break. I also
* tried to reserve memory by increasing the break by ~20 pages and then
* resetting it, but the kernel unreserves it. The current work around is to
* increase the break by 1. The loader needs to allocate more than a page of
* memory, so this doesn't guarantee that further brk() calls will succeed.
* However, I haven't observed any brk() failures after adding this workaround.
*/
ptr_int_t start_brk;
ASSERT(!dynamo_heap_initialized);
start_brk = dynamorio_syscall(SYS_brk, 1, 0);
dynamorio_syscall(SYS_brk, 1, start_brk + 1);
/* I'd log the results, but logs aren't initialized yet. */
}
}
byte *
map_exe_file_and_brk(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags)
{
/* A little hacky: we assume the MEMPROT_NONE is the overall mmap for the whole
* region, where our goal is to push it back for top-down PIE filling to leave
* room for a reasonable brk.
*/
if (prot == MEMPROT_NONE && offs == 0) {
size_t sz_with_brk = *size + APP_BRK_GAP;
byte *res = os_map_file(f, &sz_with_brk, offs, addr, prot, map_flags);
if (res != NULL)
os_unmap_file(res + sz_with_brk - APP_BRK_GAP, APP_BRK_GAP);
*size = sz_with_brk - APP_BRK_GAP;
return res;
} else
return os_map_file(f, size, offs, addr, prot, map_flags);
}
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* This routine is partially duplicated from module_get_os_privmod_data.
* It paritially fills the os_privmod_data for dynamorio relocation.
* Return true if relocation is required.
*/
static bool
privload_get_os_privmod_data(app_pc base, OUT os_privmod_data_t *opd)
{
app_pc mod_base, mod_end;
ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)base;
ELF_PROGRAM_HEADER_TYPE *prog_hdr;
uint i;
/* walk program headers to get mod_base mod_end and delta */
mod_base = module_vaddr_from_prog_header(base + elf_hdr->e_phoff, elf_hdr->e_phnum,
NULL, &mod_end);
/* delta from preferred address, used for calcuate real address */
opd->load_delta = base - mod_base;
/* At this point one could consider returning false if the load_delta
* is zero. However, this optimisation was found to give only a small
* benefit, and is not safe if RELA relocations are in use. In particular,
* it did not work on AArch64 when libdynamorio.so was built with the BFD
* linker from Debian's binutils 2.26-8.
*/
/* walk program headers to get dynamic section pointer */
prog_hdr = (ELF_PROGRAM_HEADER_TYPE *)(base + elf_hdr->e_phoff);
for (i = 0; i < elf_hdr->e_phnum; i++) {
if (prog_hdr->p_type == PT_DYNAMIC) {
opd->dyn = (ELF_DYNAMIC_ENTRY_TYPE *)(prog_hdr->p_vaddr + opd->load_delta);
opd->dynsz = prog_hdr->p_memsz;
# ifdef DEBUG
} else if (prog_hdr->p_type == PT_TLS && prog_hdr->p_memsz > 0) {
/* XXX: we assume libdynamorio has no tls block b/c we're not calling
* privload_relocate_mod().
*/
privload_report_relocate_error();
# endif /* DEBUG */
}
++prog_hdr;
}
if (opd->dyn == NULL)
return false;
module_init_os_privmod_data_from_dyn(opd, opd->dyn, opd->load_delta);
return true;
}
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
/* This routine is duplicated from is_elf_so_header_common. */
static bool
privload_mem_is_elf_so_header(byte *mem)
{
/* assume we can directly read from mem */
ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)mem;
/* ELF magic number */
if (elf_hdr->e_ident[EI_MAG0] != ELFMAG0 || elf_hdr->e_ident[EI_MAG1] != ELFMAG1 ||
elf_hdr->e_ident[EI_MAG2] != ELFMAG2 || elf_hdr->e_ident[EI_MAG3] != ELFMAG3)
return false;
/* libdynamorio should be ET_DYN */
if (elf_hdr->e_type != ET_DYN)
return false;
/* ARM or X86 */
if (elf_hdr->e_machine !=
IF_X86_ELSE(IF_X64_ELSE(EM_X86_64, EM_386), IF_X64_ELSE(EM_AARCH64, EM_ARM)))
return false;
if (elf_hdr->e_ehsize != sizeof(ELF_HEADER_TYPE))
return false;
return true;
}
/* Returns false if the text-data gap is not empty. Else, fills the gap with
* no-access mappings and returns true.
*/
static bool
dynamorio_lib_gap_empty(void)
{
/* XXX: get_dynamorio_dll_start() is already calling
* memquery_library_bounds_by_iterator() which is doing this maps walk: can we
* avoid this extra walk by somehow passing info back to us? Have an
* "interrupted" output param or sthg and is_dynamorio_dll_interrupted()?
*/
memquery_iter_t iter;
bool res = true;
if (memquery_iterator_start(&iter, NULL, false /*no heap*/)) {
byte *dr_start = get_dynamorio_dll_start();
byte *dr_end = get_dynamorio_dll_end();
byte *gap_start = dr_start;
const char *dynamorio_library_path = get_dynamorio_library_path();
while (memquery_iterator_next(&iter) && iter.vm_start < dr_end) {
if (iter.vm_start >= dr_start && iter.vm_end <= dr_end &&
iter.comment[0] != '\0' &&
/* i#3799: ignore the kernel labeling DR's .bss as "[heap]". */
strcmp(iter.comment, "[heap]") != 0 &&
strcmp(iter.comment, dynamorio_library_path) != 0) {
/* There's a non-anon mapping inside: probably vvar and/or vdso. */
res = false;
break;
}
/* i#1659: fill in the text-data segment gap to ensure no mmaps in between.
* The kernel does not do this. Our private loader does, so if we reloaded
* ourselves this is already in place. We do this now rather than in
* os_loader_init_prologue() to prevent our brk mmap from landing here.
*/
if (iter.vm_start > gap_start) {
size_t sz = iter.vm_start - gap_start;
ASSERT(sz > 0);
DEBUG_DECLARE(byte *fill =)
os_map_file(-1, &sz, 0, gap_start, MEMPROT_NONE,
MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(fill != NULL);
gap_start = iter.vm_end;
} else if (iter.vm_end > gap_start) {
gap_start = iter.vm_end;
}
}
memquery_iterator_stop(&iter);
}
return res;
}
/* XXX: This routine is called before dynamorio relocation when we are in a
* fragile state and thus no globals access or use of ASSERT/LOG/STATS!
*/
void
relocate_dynamorio(byte *dr_map, size_t dr_size, byte *sp)
{
ptr_uint_t argc = *(ptr_uint_t *)sp;
/* Plus 2 to skip argc and null pointer that terminates argv[]. */
const char **env = (const char **)sp + argc + 2;
os_privmod_data_t opd = { { 0 } };
os_page_size_init(env, true);
if (dr_map == NULL) {
/* we do not know where dynamorio is, so check backward page by page */
dr_map = (app_pc)ALIGN_BACKWARD((ptr_uint_t)relocate_dynamorio, PAGE_SIZE);
while (dr_map != NULL && !privload_mem_is_elf_so_header(dr_map)) {
dr_map -= PAGE_SIZE;
}
}
if (dr_map == NULL)
privload_report_relocate_error();
/* Relocate it */
if (privload_get_os_privmod_data(dr_map, &opd))
privload_early_relocate_os_privmod_data(&opd, dr_map);
}
/* i#1227: on a conflict with the app we reload ourselves.
* Does not return.
*/
static void
reload_dynamorio(void **init_sp, app_pc conflict_start, app_pc conflict_end)
{
elf_loader_t dr_ld;
os_privmod_data_t opd;
byte *dr_map;
/* We expect at most vvar+vdso+stack+vsyscall => 5 different mappings
* even if they were all in the conflict area.
*/
# define MAX_TEMP_MAPS 16
byte *temp_map[MAX_TEMP_MAPS];
size_t temp_size[MAX_TEMP_MAPS];
uint num_temp_maps = 0, i;
memquery_iter_t iter;
app_pc entry;
byte *cur_dr_map = get_dynamorio_dll_start();
byte *cur_dr_end = get_dynamorio_dll_end();
size_t dr_size = cur_dr_end - cur_dr_map;
IF_DEBUG(bool success =)
elf_loader_read_headers(&dr_ld, get_dynamorio_library_path());
ASSERT(success);
/* XXX: have better strategy for picking base: currently we rely on
* the kernel picking an address, so we have to block out the conflicting
* region first, avoiding any existing mappings (like vvar+vdso: i#2641).
*/
if (memquery_iterator_start(&iter, NULL, false /*no heap*/)) {
/* Strategy: track the leading edge ("tocover_start") of the conflict region.
* Find the next block beyond that edge so we know the safe endpoint for a
* temp mmap.
*/
byte *tocover_start = conflict_start;
while (memquery_iterator_next(&iter)) {
if (iter.vm_start > tocover_start) {
temp_map[num_temp_maps] = tocover_start;
temp_size[num_temp_maps] =
MIN(iter.vm_start, conflict_end) - tocover_start;
tocover_start = iter.vm_end;
if (temp_size[num_temp_maps] > 0) {
temp_map[num_temp_maps] = os_map_file(
-1, &temp_size[num_temp_maps], 0, temp_map[num_temp_maps],
MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(temp_map[num_temp_maps] != NULL);
num_temp_maps++;
}
} else if (iter.vm_end > tocover_start) {
tocover_start = iter.vm_end;
}
if (iter.vm_start >= conflict_end)
break;
}
memquery_iterator_stop(&iter);
if (tocover_start < conflict_end) {
temp_map[num_temp_maps] = tocover_start;
temp_size[num_temp_maps] = conflict_end - tocover_start;
temp_map[num_temp_maps] =
os_map_file(-1, &temp_size[num_temp_maps], 0, temp_map[num_temp_maps],
MEMPROT_NONE, MAP_FILE_COPY_ON_WRITE | MAP_FILE_FIXED);
ASSERT(temp_map[num_temp_maps] != NULL);
num_temp_maps++;
}
}
/* Now load the 2nd libdynamorio.so */
dr_map = elf_loader_map_phdrs(&dr_ld, false /*!fixed*/, os_map_file, os_unmap_file,
os_set_protection, privload_check_new_map_bounds,
privload_map_flags(0 /*!reachable*/));
ASSERT(dr_map != NULL);
ASSERT(is_elf_so_header(dr_map, 0));
/* Relocate it */
memset(&opd, 0, sizeof(opd));
module_get_os_privmod_data(dr_map, dr_size, false /*!relocated*/, &opd);
/* XXX: we assume libdynamorio has no tls block b/c we're not calling
* privload_relocate_mod().
*/
ASSERT(opd.tls_block_size == 0);
privload_relocate_os_privmod_data(&opd, dr_map);
for (i = 0; i < num_temp_maps; i++)
os_unmap_file(temp_map[i], temp_size[i]);
entry = (app_pc)dr_ld.ehdr->e_entry + dr_ld.load_delta;
elf_loader_destroy(&dr_ld);
/* Now we transfer control unconditionally to the new DR's _start, after
* first restoring init_sp. We pass along the current (old) DR's bounds
* for removal.
*/
xfer_to_new_libdr(entry, init_sp, cur_dr_map, dr_size);
ASSERT_NOT_REACHED();
}
/* Called from _start in x86.asm. sp is the initial app stack pointer that the
* kernel set up for us, and it points to the usual argc, argv, envp, and auxv
* that the kernel puts on the stack. The 2nd & 3rd args must be 0 in
* the initial call.
*
* We assume that _start has already called relocate_dynamorio() for us and
* that it is now safe to access globals.
*/
void
privload_early_inject(void **sp, byte *old_libdr_base, size_t old_libdr_size)
{
ptr_int_t *argc = (ptr_int_t *)sp; /* Kernel writes an elf_addr_t. */
char **argv = (char **)sp + 1;
char **envp = argv + *argc + 1;
app_pc entry = NULL;
char *exe_path;
char *exe_basename;
app_pc exe_map, exe_end;
elf_loader_t exe_ld;
const char *interp;
priv_mcontext_t mc;
bool success;
memquery_iter_t iter;
app_pc interp_map;
if (*argc == ARGC_PTRACE_SENTINEL) {
/* XXX: Teach the injector to look up takeover_ptrace() and call it
* directly instead of using this sentinel. We come here because we
* can easily find the address of _start in the ELF header.
*/
takeover_ptrace((ptrace_stack_args_t *)sp);
ASSERT_NOT_REACHED();
}
kernel_init_sp = (void *)sp;
/* XXX i#47: for Linux, we can't easily have this option on by default as
* code like get_application_short_name() called from drpreload before
* even _init is run needs to have a non-early default.
*/
dynamo_options.early_inject = true;
/* i#1227: if we reloaded ourselves, unload the old libdynamorio */
if (old_libdr_base != NULL) {
/* i#2641: we can't blindly unload the whole region as vvar+vdso may be
* in the text-data gap.
*/
const char *dynamorio_library_path = get_dynamorio_library_path();
if (memquery_iterator_start(&iter, NULL, false /*no heap*/)) {
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= old_libdr_base &&
iter.vm_end <= old_libdr_base + old_libdr_size &&
(iter.comment[0] == '\0' /* .bss */ ||
/* The kernel sometimes mis-labels our .bss as "[heap]". */
strcmp(iter.comment, "[heap]") == 0 ||
strcmp(iter.comment, dynamorio_library_path) == 0)) {
os_unmap_file(iter.vm_start, iter.vm_end - iter.vm_start);
}
if (iter.vm_start >= old_libdr_base + old_libdr_size)
break;
}
memquery_iterator_stop(&iter);
}
}
dynamorio_set_envp(envp);
/* argv[0] doesn't actually have to be the path to the exe, so we put the
* real exe path in an environment variable.
*/
exe_path = getenv(DYNAMORIO_VAR_EXE_PATH);
/* i#1677: this happens upon re-launching within gdb, so provide a nice error */
if (exe_path == NULL) {
/* i#1677: avoid assert in get_application_name_helper() */
set_executable_path("UNKNOWN");
apicheck(exe_path != NULL,
DYNAMORIO_VAR_EXE_PATH " env var is not set. "
"Are you re-launching within gdb?");
}
/* i#907: We can't rely on /proc/self/exe for the executable path, so we
* have to tell get_application_name() to use this path.
*/
set_executable_path(exe_path);
/* XXX i#2662: Currently, we only support getting args for early injection.
* Add support for late injection.
*/
set_app_args((int *)argc, argv);
success = elf_loader_read_headers(&exe_ld, exe_path);
apicheck(success,
"Failed to read app ELF headers. Check path and "
"architecture.");
/* Find range of app */
exe_map = module_vaddr_from_prog_header((app_pc)exe_ld.phdrs, exe_ld.ehdr->e_phnum,
NULL, &exe_end);
/* i#1227: on a conflict with the app (+ room for the brk): reload ourselves */
if (get_dynamorio_dll_start() < exe_end + APP_BRK_GAP &&
get_dynamorio_dll_end() > exe_map) {
elf_loader_destroy(&exe_ld);
reload_dynamorio(sp, exe_map, exe_end + APP_BRK_GAP);
ASSERT_NOT_REACHED();
}
/* i#2641: we can't handle something in the text-data gap.
* Various parts of DR assume there's nothing inside (and we even fill the
* gap with a PROT_NONE mmap later: i#1659), so we reload to avoid it,
* under the assumption that it's rare and we're not paying this cost
* very often.
*/
if (!dynamorio_lib_gap_empty()) {
elf_loader_destroy(&exe_ld);
reload_dynamorio(sp, get_dynamorio_dll_start(), get_dynamorio_dll_end());
ASSERT_NOT_REACHED();
}
exe_map = elf_loader_map_phdrs(&exe_ld,
/* fixed at preferred address,
* will be overridden if preferred base is 0
*/
true,
/* ensure there's space for the brk */
map_exe_file_and_brk, os_unmap_file, os_set_protection,
privload_check_new_map_bounds,
privload_map_flags(0 /*!reachable*/));
apicheck(exe_map != NULL,
"Failed to load application. "
"Check path and architecture.");
ASSERT(is_elf_so_header(exe_map, 0));
/* i#1660: the app may have passed a relative path or a symlink to execve,
* yet the kernel will put a resolved path into /proc/self/maps.
* Rather than us here or in pre-execve, plus in drrun or drinjectlib,
* making paths absolute and resolving symlinks to try and match what the
* kernel does, we just read the kernel's resolved path.
* This is prior to memquery_init() but that's fine (it's already being
* called by is_elf_so_header() above).
*/
if (memquery_iterator_start(&iter, exe_map, false /*no heap*/)) {
while (memquery_iterator_next(&iter)) {
if (iter.vm_start == exe_map) {
set_executable_path(iter.comment);
break;
}
}
memquery_iterator_stop(&iter);
}
/* Set the process name with prctl PR_SET_NAME. This makes killall <app>
* work.
*/
exe_basename = strrchr(exe_path, '/');
if (exe_basename == NULL) {
exe_basename = exe_path;
} else {
exe_basename++;
}
dynamorio_syscall(SYS_prctl, 5, PR_SET_NAME, (ptr_uint_t)exe_basename, 0, 0, 0);
reserve_brk(exe_map + exe_ld.image_size +
(INTERNAL_OPTION(separate_private_bss) ? PAGE_SIZE : 0));
interp = elf_loader_find_pt_interp(&exe_ld);
if (interp != NULL) {
/* Load the ELF pointed at by PT_INTERP, usually ld.so. */
elf_loader_t interp_ld;
success = elf_loader_read_headers(&interp_ld, interp);
apicheck(success, "Failed to read ELF interpreter headers.");
interp_map = elf_loader_map_phdrs(
&interp_ld, false /* fixed */, os_map_file, os_unmap_file, os_set_protection,
privload_check_new_map_bounds, privload_map_flags(0 /*!reachable*/));
apicheck(interp_map != NULL && is_elf_so_header(interp_map, 0),
"Failed to map ELF interpreter.");
/* On Android, the system loader /system/bin/linker sets itself
* as the interpreter in the ELF header .interp field.
*/
ASSERT_CURIOSITY_ONCE((strcmp(interp, "/system/bin/linker") == 0 ||
elf_loader_find_pt_interp(&interp_ld) == NULL) &&
"The interpreter shouldn't have an interpreter");
entry = (app_pc)interp_ld.ehdr->e_entry + interp_ld.load_delta;
elf_loader_destroy(&interp_ld);
} else {
/* No PT_INTERP, so this is a static exe. */
interp_map = NULL;
entry = (app_pc)exe_ld.ehdr->e_entry + exe_ld.load_delta;
}
privload_setup_auxv(envp, exe_map, exe_ld.load_delta, interp_map, exe_path);
elf_loader_destroy(&exe_ld);
/* Initialize DR *after* we map the app image. This is consistent with our
* old behavior, and allows the client to do things like call
* dr_get_proc_address() on the app from dr_client_main(). We let
* find_executable_vm_areas re-discover the mappings we made for the app and
* interp images.
*/
dynamorio_app_init();
LOG(GLOBAL, LOG_TOP, 1, "early injected into app with this cmdline:\n");
DOLOG(1, LOG_TOP, {
int i;
for (i = 0; i < *argc; i++) {
LOG(GLOBAL, LOG_TOP, 1, "%s ", argv[i]);
}
LOG(GLOBAL, LOG_TOP, 1, "\n");
});
if (RUNNING_WITHOUT_CODE_CACHE()) {
/* Reset the stack pointer back to the beginning and jump to the entry
* point to execute the app natively. This is also useful for testing
* if the app has been mapped correctly without involving DR's code
* cache.
*/
# ifdef X86
asm("mov %0, %%" ASM_XSP "\n\t"
"jmp *%1\n\t"
:
: "r"(sp), "r"(entry));
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
# endif
}
memset(&mc, 0, sizeof(mc));
mc.xsp = (reg_t)sp;
mc.pc = entry;
dynamo_start(&mc);
}
# endif /* !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY) */
#else
/* XXX i#1285: implement MacOS private loader */
#endif
| 1 | 20,953 | I still don't seem to fully understand this. Why are we testing the host if DR_HOST_NOT_TARGET is not set? | DynamoRIO-dynamorio | c |
@@ -34,9 +34,9 @@ import (
func TestBzzFiles(t *testing.T) {
var (
- fileUploadResource = "/bzz"
+ fileUploadResource = "/v1/bzz"
targets = "0x222"
- fileDownloadResource = func(addr string) string { return "/bzz/" + addr }
+ fileDownloadResource = func(addr string) string { return "/v1/bzz/" + addr }
simpleData = []byte("this is a simple text")
storerMock = smock.NewStorer()
statestoreMock = statestore.NewStateStore() | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api_test
import (
"bytes"
"context"
"fmt"
"io"
"mime"
"mime/multipart"
"net/http"
"strconv"
"strings"
"testing"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/file/loadsave"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/jsonhttp/jsonhttptest"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/manifest"
pinning "github.com/ethersphere/bee/pkg/pinning/mock"
mockpost "github.com/ethersphere/bee/pkg/postage/mock"
statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/steward/mock"
"github.com/ethersphere/bee/pkg/storage"
smock "github.com/ethersphere/bee/pkg/storage/mock"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
)
func TestBzzFiles(t *testing.T) {
var (
fileUploadResource = "/bzz"
targets = "0x222"
fileDownloadResource = func(addr string) string { return "/bzz/" + addr }
simpleData = []byte("this is a simple text")
storerMock = smock.NewStorer()
statestoreMock = statestore.NewStateStore()
pinningMock = pinning.NewServiceMock()
logger = logging.New(io.Discard, 0)
client, _, _ = newTestServer(t, testServerOptions{
Storer: storerMock,
Pinning: pinningMock,
Tags: tags.NewTags(statestoreMock, logger),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
t.Run("invalid-content-type", func(t *testing.T) {
jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource,
http.StatusBadRequest,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: api.InvalidContentType.Error(),
Code: http.StatusBadRequest,
}),
)
})
t.Run("tar-file-upload", func(t *testing.T) {
tr := tarFiles(t, []f{
{
data: []byte("robots text"),
name: "robots.txt",
dir: "",
header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
},
{
data: []byte("image 1"),
name: "1.png",
dir: "img",
header: http.Header{
"Content-Type": {"image/png"},
},
},
{
data: []byte("image 2"),
name: "2.png",
dir: "img",
header: http.Header{
"Content-Type": {"image/png"},
},
},
})
address := swarm.MustParseHexAddress("f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b")
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tr),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: address,
}),
)
isTagFoundInResponse(t, rcvdHeader, nil)
has, err := storerMock.Has(context.Background(), address)
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("storer check root chunk address: have none; want one")
}
refs, err := pinningMock.Pins()
if err != nil {
t.Fatal("unable to get pinned references")
}
if have, want := len(refs), 0; have != want {
t.Fatalf("root pin count mismatch: have %d; want %d", have, want)
}
})
t.Run("tar-file-upload-with-pinning", func(t *testing.T) {
tr := tarFiles(t, []f{
{
data: []byte("robots text"),
name: "robots.txt",
dir: "",
header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
},
{
data: []byte("image 1"),
name: "1.png",
dir: "img",
header: http.Header{
"Content-Type": {"image/png"},
},
},
{
data: []byte("image 2"),
name: "2.png",
dir: "img",
header: http.Header{
"Content-Type": {"image/png"},
},
},
})
reference := swarm.MustParseHexAddress("f30c0aa7e9e2a0ef4c9b1b750ebfeaeb7c7c24da700bb089da19a46e3677824b")
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "true"),
jsonhttptest.WithRequestBody(tr),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: reference,
}),
)
isTagFoundInResponse(t, rcvdHeader, nil)
has, err := storerMock.Has(context.Background(), reference)
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("storer check root chunk reference: have none; want one")
}
refs, err := pinningMock.Pins()
if err != nil {
t.Fatal(err)
}
if have, want := len(refs), 1; have != want {
t.Fatalf("root pin count mismatch: have %d; want %d", have, want)
}
if have, want := refs[0], reference; !have.Equal(want) {
t.Fatalf("root pin reference mismatch: have %q; want %q", have, want)
}
})
t.Run("encrypt-decrypt", func(t *testing.T) {
fileName := "my-pictures.jpeg"
var resp api.BzzUploadResponse
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithRequestHeader(api.SwarmEncryptHeader, "True"),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
)
isTagFoundInResponse(t, rcvdHeader, nil)
rootHash := resp.Reference.String()
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid file name detected")
}
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("filter out filename path", func(t *testing.T) {
fileName := "my-pictures.jpeg"
fileNameWithPath := "../../" + fileName
var resp api.BzzUploadResponse
_ = jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileNameWithPath, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
)
rootHash := resp.Reference.String()
rcvdHeader := jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatalf("want filename %s, got %s", fileName, params["filename"])
}
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("check-content-type-detection", func(t *testing.T) {
fileName := "my-pictures.jpeg"
rootHash := "4f9146b3813ccbd7ce45a18be23763d7e436ab7a3982ef39961c6f3cd4da1dcf"
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "image/jpeg; charset=utf-8"),
)
isTagFoundInResponse(t, rcvdHeader, nil)
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid file name detected")
}
if rcvdHeader.Get("Content-Type") != "image/jpeg; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("upload-then-download-and-check-data", func(t *testing.T) {
fileName := "sample.html"
rootHash := "36e6c1bbdfee6ac21485d5f970479fd1df458d36df9ef4e8179708ed46da557f"
sampleHtml := `<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>`
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(strings.NewReader(sampleHtml)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "text/html; charset=utf-8"),
)
if rcvdHeader.Get("ETag") != fmt.Sprintf("%q", rootHash) {
t.Fatal("Invalid ETags header received")
}
isTagFoundInResponse(t, rcvdHeader, nil)
// try to fetch the same file and check the data
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash), http.StatusOK,
jsonhttptest.WithExpectedResponse([]byte(sampleHtml)),
)
// check the headers
cd := rcvdHeader.Get("Content-Disposition")
_, params, err := mime.ParseMediaType(cd)
if err != nil {
t.Fatal(err)
}
if params["filename"] != fileName {
t.Fatal("Invalid filename detected")
}
if rcvdHeader.Get("Content-Type") != "text/html; charset=utf-8" {
t.Fatal("Invalid content type detected")
}
})
t.Run("upload-then-download-with-targets", func(t *testing.T) {
fileName := "simple_file.txt"
rootHash := "65148cd89b58e91616773f5acea433f7b5a6274f2259e25f4893a332b74a7e28"
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost,
fileUploadResource+"?name="+fileName, http.StatusCreated,
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(bytes.NewReader(simpleData)),
jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{
Reference: swarm.MustParseHexAddress(rootHash),
}),
jsonhttptest.WithRequestHeader("Content-Type", "text/html; charset=utf-8"),
)
isTagFoundInResponse(t, rcvdHeader, nil)
rcvdHeader = jsonhttptest.Request(t, client, http.MethodGet,
fileDownloadResource(rootHash)+"?targets="+targets, http.StatusOK,
jsonhttptest.WithExpectedResponse(simpleData),
)
if rcvdHeader.Get(api.TargetsRecoveryHeader) != targets {
t.Fatalf("targets mismatch. got %s, want %s",
rcvdHeader.Get(api.TargetsRecoveryHeader), targets)
}
})
}
// TestRangeRequests validates that all endpoints are serving content with
// respect to HTTP Range headers.
func TestBzzFilesRangeRequests(t *testing.T) {
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus dignissim tincidunt orci id aliquam. Praesent eget turpis in lectus semper consectetur et ut nibh. Nam rhoncus, augue sit amet sollicitudin lacinia, turpis tortor molestie urna, at mattis sem sapien sit amet augue. In bibendum ex vel odio dignissim interdum. Quisque hendrerit sapien et porta condimentum. Vestibulum efficitur mauris tellus, eget vestibulum sapien vulputate ac. Proin et vulputate sapien. Duis tincidunt mauris vulputate porta venenatis. Sed dictum aliquet urna, sit amet fermentum velit pellentesque vitae. Nam sed nisi ultrices, volutpat quam et, malesuada sapien. Nunc gravida non orci at rhoncus. Sed vitae dui accumsan, venenatis lectus et, mattis tellus. Proin sed mauris eu mi congue lacinia.")
uploads := []struct {
name string
uploadEndpoint string
downloadEndpoint string
filepath string
reader io.Reader
contentType string
}{
{
name: "bytes",
uploadEndpoint: "/bytes",
downloadEndpoint: "/bytes",
reader: bytes.NewReader(data),
contentType: "text/plain; charset=utf-8",
},
{
name: "file",
uploadEndpoint: "/bzz",
downloadEndpoint: "/bzz",
reader: bytes.NewReader(data),
contentType: "text/plain; charset=utf-8",
},
{
name: "dir",
uploadEndpoint: "/bzz",
downloadEndpoint: "/bzz",
filepath: "ipsum/lorem.txt",
reader: tarFiles(t, []f{
{
data: data,
name: "lorem.txt",
dir: "ipsum",
header: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
},
}),
contentType: api.ContentTypeTar,
},
}
ranges := []struct {
name string
ranges [][2]int
}{
{
name: "all",
ranges: [][2]int{{0, len(data)}},
},
{
name: "all without end",
ranges: [][2]int{{0, -1}},
},
{
name: "all without start",
ranges: [][2]int{{-1, len(data)}},
},
{
name: "head",
ranges: [][2]int{{0, 50}},
},
{
name: "tail",
ranges: [][2]int{{250, len(data)}},
},
{
name: "middle",
ranges: [][2]int{{10, 15}},
},
{
name: "multiple",
ranges: [][2]int{{10, 15}, {100, 125}},
},
{
name: "even more multiple parts",
ranges: [][2]int{{10, 15}, {100, 125}, {250, 252}, {261, 270}, {270, 280}},
},
}
for _, upload := range uploads {
t.Run(upload.name, func(t *testing.T) {
mockStatestore := statestore.NewStateStore()
logger := logging.New(io.Discard, 0)
client, _, _ := newTestServer(t, testServerOptions{
Storer: smock.NewStorer(),
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
var resp api.BzzUploadResponse
testOpts := []jsonhttptest.Option{
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(upload.reader),
jsonhttptest.WithRequestHeader("Content-Type", upload.contentType),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
}
if upload.name == "dir" {
testOpts = append(testOpts, jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"))
}
rcvdHeader := jsonhttptest.Request(t, client, http.MethodPost, upload.uploadEndpoint, http.StatusCreated,
testOpts...,
)
isTagFoundInResponse(t, rcvdHeader, nil)
var downloadPath string
if upload.downloadEndpoint != "/bytes" {
downloadPath = upload.downloadEndpoint + "/" + resp.Reference.String() + "/" + upload.filepath
} else {
downloadPath = upload.downloadEndpoint + "/" + resp.Reference.String()
}
for _, tc := range ranges {
t.Run(tc.name, func(t *testing.T) {
rangeHeader, want := createRangeHeader(data, tc.ranges)
var body []byte
respHeaders := jsonhttptest.Request(t, client, http.MethodGet,
downloadPath,
http.StatusPartialContent,
jsonhttptest.WithRequestHeader("Range", rangeHeader),
jsonhttptest.WithPutResponseBody(&body),
)
got := parseRangeParts(t, respHeaders.Get("Content-Type"), body)
if len(got) != len(want) {
t.Fatalf("got %v parts, want %v parts", len(got), len(want))
}
for i := 0; i < len(want); i++ {
if !bytes.Equal(got[i], want[i]) {
t.Errorf("part %v: got %q, want %q", i, string(got[i]), string(want[i]))
}
}
})
}
})
}
}
func createRangeHeader(data []byte, ranges [][2]int) (header string, parts [][]byte) {
header = "bytes="
for i, r := range ranges {
if i > 0 {
header += ", "
}
if r[0] >= 0 && r[1] >= 0 {
parts = append(parts, data[r[0]:r[1]])
// Range: <unit>=<range-start>-<range-end>, end is inclusive
header += fmt.Sprintf("%v-%v", r[0], r[1]-1)
} else {
if r[0] >= 0 {
header += strconv.Itoa(r[0]) // Range: <unit>=<range-start>-
parts = append(parts, data[r[0]:])
}
header += "-"
if r[1] >= 0 {
if r[0] >= 0 {
// Range: <unit>=<range-start>-<range-end>, end is inclusive
header += strconv.Itoa(r[1] - 1)
} else {
// Range: <unit>=-<suffix-length>, the parameter is length
header += strconv.Itoa(r[1])
}
parts = append(parts, data[:r[1]])
}
}
}
return
}
func parseRangeParts(t *testing.T, contentType string, body []byte) (parts [][]byte) {
t.Helper()
mimetype, params, _ := mime.ParseMediaType(contentType)
if mimetype != "multipart/byteranges" {
parts = append(parts, body)
return
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for part, err := mr.NextPart(); err == nil; part, err = mr.NextPart() {
value, err := io.ReadAll(part)
if err != nil {
t.Fatal(err)
}
parts = append(parts, value)
}
return parts
}
func TestFeedIndirection(t *testing.T) {
// first, "upload" some content for the update
var (
updateData = []byte("<h1>Swarm Feeds Hello World!</h1>")
mockStatestore = statestore.NewStateStore()
logger = logging.New(io.Discard, 0)
storer = smock.NewStorer()
client, _, _ = newTestServer(t, testServerOptions{
Storer: storer,
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
Post: mockpost.New(mockpost.WithAcceptAll()),
})
)
// tar all the test case files
tarReader := tarFiles(t, []f{
{
data: updateData,
name: "index.html",
dir: "",
filePath: "./index.html",
},
})
var resp api.BzzUploadResponse
options := []jsonhttptest.Option{
jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr),
jsonhttptest.WithRequestBody(tarReader),
jsonhttptest.WithRequestHeader("Content-Type", api.ContentTypeTar),
jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True"),
jsonhttptest.WithUnmarshalJSONResponse(&resp),
jsonhttptest.WithRequestHeader(api.SwarmIndexDocumentHeader, "index.html"),
}
// verify directory tar upload response
jsonhttptest.Request(t, client, http.MethodPost, "/bzz", http.StatusCreated, options...)
if resp.Reference.String() == "" {
t.Fatalf("expected file reference, did not got any")
}
// now use the "content" to mock the feed lookup
// also, use the mocked mantaray chunks that unmarshal
// into a real manifest with the mocked feed values when
// called from the bzz endpoint. then call the bzz endpoint with
// the pregenerated feed root manifest hash
feedUpdate := toChunk(t, 121212, resp.Reference.Bytes())
var (
look = newMockLookup(-1, 0, feedUpdate, nil, &id{}, nil)
factory = newMockFactory(look)
bzzDownloadResource = func(addr, path string) string { return "/bzz/" + addr + "/" + path }
ctx = context.Background()
)
client, _, _ = newTestServer(t, testServerOptions{
Storer: storer,
Tags: tags.NewTags(mockStatestore, logger),
Logger: logger,
Feeds: factory,
})
_, err := storer.Put(ctx, storage.ModePutUpload, feedUpdate)
if err != nil {
t.Fatal(err)
}
m, err := manifest.NewDefaultManifest(
loadsave.New(storer, pipelineFactory(storer, storage.ModePutUpload, false)),
false,
)
if err != nil {
t.Fatal(err)
}
emptyAddr := make([]byte, 32)
err = m.Add(ctx, manifest.RootPath, manifest.NewEntry(swarm.NewAddress(emptyAddr), map[string]string{
api.FeedMetadataEntryOwner: "8d3766440f0d7b949a5e32995d09619a7f86e632",
api.FeedMetadataEntryTopic: "abcc",
api.FeedMetadataEntryType: "epoch",
}))
if err != nil {
t.Fatal(err)
}
manifRef, err := m.Store(ctx)
if err != nil {
t.Fatal(err)
}
jsonhttptest.Request(t, client, http.MethodGet, bzzDownloadResource(manifRef.String(), ""), http.StatusOK,
jsonhttptest.WithExpectedResponse(updateData),
)
}
func TestBzzReupload(t *testing.T) {
var (
logger = logging.New(io.Discard, 0)
statestoreMock = statestore.NewStateStore()
stewardMock = &mock.Steward{}
storer = smock.NewStorer()
addr = swarm.NewAddress([]byte{31: 128})
)
client, _, _ := newTestServer(t, testServerOptions{
Storer: storer,
Tags: tags.NewTags(statestoreMock, logger),
Logger: logger,
Steward: stewardMock,
})
jsonhttptest.Request(t, client, http.MethodPatch, "/v1/bzz/"+addr.String(), http.StatusOK,
jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{
Message: http.StatusText(http.StatusOK),
Code: http.StatusOK,
}),
)
if !stewardMock.LastAddress().Equal(addr) {
t.Fatalf("got address %s want %s", stewardMock.LastAddress().String(), addr.String())
}
}
| 1 | 16,006 | in the current implementation, both schemes are supported (you can call either `/bzz` or `/v1/bzz`). i would suggest to keep it this way | ethersphere-bee | go |
@@ -44,9 +44,12 @@ module.exports = class Client {
body: data
}).then((response) => response.json()).then((assembly) => {
if (assembly.error) {
- const error = new Error(assembly.error)
+ const error = new Error(assembly)
error.message = assembly.error
- error.details = assembly.reason
+ error.details = assembly.message
+ if (assembly.assembly_id) {
+ error.details += ' ' + `assembly_id: ${assembly.assembly_id}`
+ }
throw error
}
| 1 | /**
* A Barebones HTTP API client for Transloadit.
*/
module.exports = class Client {
constructor (opts = {}) {
this.opts = opts
this._reportError = this._reportError.bind(this)
this._headers = {
'Transloadit-Client': this.opts.client
}
}
/**
* Create a new assembly.
*
* @param {object} options
*/
createAssembly ({
templateId,
params,
fields,
signature,
expectedFiles
}) {
const data = new FormData()
data.append('params', typeof params === 'string'
? params
: JSON.stringify(params))
if (signature) {
data.append('signature', signature)
}
Object.keys(fields).forEach((key) => {
data.append(key, fields[key])
})
data.append('num_expected_upload_files', expectedFiles)
const url = `${this.opts.service}/assemblies`
return fetch(url, {
method: 'post',
headers: this._headers,
body: data
}).then((response) => response.json()).then((assembly) => {
if (assembly.error) {
const error = new Error(assembly.error)
error.message = assembly.error
error.details = assembly.reason
throw error
}
return assembly
}).catch((err) => this._reportError(err, { url, type: 'API_ERROR' }))
}
/**
* Reserve resources for a file in an Assembly. Then addFile can be used later.
*
* @param {object} assembly
* @param {UppyFile} file
*/
reserveFile (assembly, file) {
const size = encodeURIComponent(file.size)
const url = `${assembly.assembly_ssl_url}/reserve_file?size=${size}`
return fetch(url, { method: 'post', headers: this._headers })
.then((response) => response.json())
.catch((err) => this._reportError(err, { assembly, file, url, type: 'API_ERROR' }))
}
/**
* Import a remote file to an Assembly.
*
* @param {object} assembly
* @param {UppyFile} file
*/
addFile (assembly, file) {
if (!file.uploadURL) {
return Promise.reject(new Error('File does not have an `uploadURL`.'))
}
const size = encodeURIComponent(file.size)
const uploadUrl = encodeURIComponent(file.uploadURL)
const filename = encodeURIComponent(file.name)
const fieldname = 'file'
const qs = `size=${size}&filename=${filename}&fieldname=${fieldname}&s3Url=${uploadUrl}`
const url = `${assembly.assembly_ssl_url}/add_file?${qs}`
return fetch(url, { method: 'post', headers: this._headers })
.then((response) => response.json())
.catch((err) => this._reportError(err, { assembly, file, url, type: 'API_ERROR' }))
}
/**
* Cancel a running Assembly.
*
* @param {object} assembly
*/
cancelAssembly (assembly) {
const url = assembly.assembly_ssl_url
return fetch(url, { method: 'delete', headers: this._headers })
.then((response) => response.json())
.catch((err) => this._reportError(err, { url, type: 'API_ERROR' }))
}
/**
* Get the current status for an assembly.
*
* @param {string} url The status endpoint of the assembly.
*/
getAssemblyStatus (url) {
return fetch(url, { headers: this._headers })
.then((response) => response.json())
.catch((err) => this._reportError(err, { url, type: 'STATUS_ERROR' }))
}
submitError (err, { endpoint, instance, assembly }) {
const message = err.details
? `${err.message} (${err.details})`
: err.message
return fetch('https://status.transloadit.com/client_error', {
method: 'post',
body: JSON.stringify({
endpoint,
instance,
assembly_id: assembly,
agent: typeof navigator !== 'undefined' ? navigator.userAgent : '',
client: this.opts.client,
error: message
})
}).then((response) => response.json())
}
_reportError (err, params) {
if (this.opts.errorReporting === false) {
throw err
}
const opts = {
type: params.type
}
if (params.assembly) {
opts.assembly = params.assembly.assembly_id
opts.instance = params.assembly.instance
}
if (params.url) {
opts.endpoint = params.url
}
this.submitError(err, opts).catch((_) => {
// not much we can do then is there
})
throw err
}
}
| 1 | 12,707 | hmm, I think we can just do `new Error(assembly.error)` and that should set `error.message` correctly too. I don't know why it was done this way with a separate `.message` assignment before :sweat_smile: Should we do `error.assembly = assembly` so the template editor can access it that way, rather than parsing `error.details`? | transloadit-uppy | js |
@@ -315,7 +315,10 @@ class Typo3PageIndexer
$document->setField('rootline', $rootline);
// access
- $document->setField('access', (string)$this->pageAccessRootline);
+ $access = (string)$this->pageAccessRootline;
+ if (trim($access) !== "") {
+ $document->setField('access', $access);
+ }
if ($this->page->page['endtime']) {
$document->setField('endtime', $pageRecord['endtime']);
} | 1 | <?php
namespace ApacheSolrForTypo3\Solr;
/***************************************************************
* Copyright notice
*
* (c) 2009-2015 Ingo Renner <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use ApacheSolrForTypo3\Solr\Access\Rootline;
use TYPO3\CMS\Core\Utility\GeneralUtility;
use TYPO3\CMS\Frontend\Controller\TypoScriptFrontendController;
/**
* Page Indexer to index TYPO3 pages used by the Index Queue.
*
* @author Ingo Renner <[email protected]>
* @author Daniel Poetzinger <[email protected]>
* @author Timo Schmidt <[email protected]>
*/
class Typo3PageIndexer
{
/**
* ID of the current page's Solr document.
*
* @var string
*/
protected static $pageSolrDocumentId = '';
/**
* The Solr document generated for the current page.
*
* @var \Apache_Solr_Document
*/
protected static $pageSolrDocument = null;
/**
* The mount point parameter used in the Frontend controller.
*
* @var string
*/
protected $mountPointParameter;
/**
* Solr server connection.
*
* @var SolrService
*/
protected $solrConnection = null;
/**
* Frontend page object (TSFE).
*
* @var TypoScriptFrontendController
*/
protected $page = null;
/**
* Content extractor to extract content from TYPO3 pages
*
* @var Typo3PageContentExtractor
*/
protected $contentExtractor = null;
/**
* URL to be indexed as the page's URL
*
* @var string
*/
protected $pageUrl = '';
/**
* The page's access rootline
*
* @var Rootline
*/
protected $pageAccessRootline = null;
/**
* Documents that have been sent to Solr
*
* @var array
*/
protected $documentsSentToSolr = array();
/**
* @var array
*/
protected $configuration;
/**
* Constructor
*
* @param TypoScriptFrontendController $page The page to index
*/
public function __construct(TypoScriptFrontendController $page)
{
$this->page = $page;
$this->pageUrl = GeneralUtility::getIndpEnv('TYPO3_REQUEST_URL');
$this->configuration = Util::getSolrConfiguration();
try {
$this->initializeSolrConnection();
} catch (\Exception $e) {
$this->log($e->getMessage() . ' Error code: ' . $e->getCode(), 3);
// TODO extract to a class "ExceptionLogger"
if ($this->configuration->getLoggingExceptions()) {
GeneralUtility::devLog('Exception while trying to index a page',
'solr', 3, array(
$e->__toString()
));
}
}
$this->contentExtractor = GeneralUtility::makeInstance(
'ApacheSolrForTypo3\\Solr\\Typo3PageContentExtractor',
$this->page->content,
$this->page->renderCharset
);
$this->pageAccessRootline = GeneralUtility::makeInstance(
'ApacheSolrForTypo3\\Solr\\Access\\Rootline',
''
);
}
/**
* Initializes the Solr server connection.
*
* @throws \Exception when no Solr connection can be established.
*/
protected function initializeSolrConnection()
{
$solr = GeneralUtility::makeInstance('ApacheSolrForTypo3\\Solr\\ConnectionManager')->getConnectionByPageId(
$this->page->id,
$this->page->sys_language_uid
);
// do not continue if no server is available
if (!$solr->ping()) {
throw new \Exception(
'No Solr instance available while trying to index a page.',
1234790825
);
}
$this->solrConnection = $solr;
}
/**
* Logs messages to devlog and TS log (admin panel)
*
* @param string $message Message to set
* @param int $errorNum Error number
* @param array $data Additional data to log
* @return void
*/
protected function log($message, $errorNum = 0, array $data = array())
{
if (is_object($GLOBALS['TT'])) {
$GLOBALS['TT']->setTSlogMessage('tx_solr: ' . $message, $errorNum);
}
if ($this->configuration->getLoggingIndexing()) {
if (!empty($data)) {
$logData = array();
foreach ($data as $value) {
$logData[] = (array)$value;
}
}
GeneralUtility::devLog($message, 'solr', $errorNum, $logData);
}
}
/**
* Gets the current page's Solr document ID.
*
* @return string|NULL The page's Solr document ID or NULL in case no document was generated yet.
*/
public static function getPageSolrDocumentId()
{
return self::$pageSolrDocumentId;
}
/**
* Gets the Solr document generated for the current page.
*
* @return \Apache_Solr_Document|NULL The page's Solr document or NULL if it has not been generated yet.
*/
public static function getPageSolrDocument()
{
return self::$pageSolrDocument;
}
/**
* Allows to provide a Solr server connection other than the one
* initialized by the constructor.
*
* @param SolrService $solrConnection Solr connection
* @throws \Exception if the Solr server cannot be reached
*/
public function setSolrConnection(SolrService $solrConnection)
{
if (!$solrConnection->ping()) {
throw new \Exception(
'Could not connect to Solr server.',
1323946472
);
}
$this->solrConnection = $solrConnection;
}
/**
* Indexes a page.
*
* @return bool TRUE after successfully indexing the page, FALSE on error
* @throws \UnexpectedValueException if a page document post processor fails to implement interface ApacheSolrForTypo3\Solr\PageDocumentPostProcessor
*/
public function indexPage()
{
$pageIndexed = false;
$documents = array(); // this will become useful as soon as when starting to index individual records instead of whole pages
if (is_null($this->solrConnection)) {
// intended early return as it doesn't make sense to continue
// and waste processing time if the solr server isn't available
// anyways
// FIXME use an exception
return $pageIndexed;
}
$pageDocument = $this->getPageDocument();
$pageDocument = $this->substitutePageDocument($pageDocument);
if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPagePostProcessPageDocument'])) {
foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPagePostProcessPageDocument'] as $classReference) {
$postProcessor = GeneralUtility::getUserObj($classReference);
if ($postProcessor instanceof PageDocumentPostProcessor) {
$postProcessor->postProcessPageDocument($pageDocument,
$this->page);
} else {
throw new \UnexpectedValueException(
get_class($pageDocument) . ' must implement interface ApacheSolrForTypo3\Solr\PageDocumentPostProcessor',
1397739154
);
}
}
}
self::$pageSolrDocument = $pageDocument;
$documents[] = $pageDocument;
$documents = $this->getAdditionalDocuments($pageDocument, $documents);
$this->processDocuments($documents);
$pageIndexed = $this->addDocumentsToSolrIndex($documents);
$this->documentsSentToSolr = $documents;
return $pageIndexed;
}
/**
* Builds the Solr document for the current page.
*
* @return \Apache_Solr_Document A document representing the page
*/
protected function getPageDocument()
{
$document = GeneralUtility::makeInstance('Apache_Solr_Document');
/* @var $document \Apache_Solr_Document */
$site = Site::getSiteByPageId($this->page->id);
$pageRecord = $this->page->page;
self::$pageSolrDocumentId = $documentId = Util::getPageDocumentId(
$this->page->id,
$this->page->type,
$this->page->sys_language_uid,
$this->getDocumentIdGroups(),
$this->getMountPointParameter()
);
$document->setField('id', $documentId);
$document->setField('site', $site->getDomain());
$document->setField('siteHash', $site->getSiteHash());
$document->setField('appKey', 'EXT:solr');
$document->setField('type', 'pages');
// system fields
$document->setField('uid', $this->page->id);
$document->setField('pid', $pageRecord['pid']);
// variantId
$document->setField('variantId', 'pages/' . $this->page->id);
$document->setField('typeNum', $this->page->type);
$document->setField('created', $pageRecord['crdate']);
$document->setField('changed', $pageRecord['SYS_LASTCHANGED']);
$rootline = $this->page->id;
$mountPointParameter = $this->getMountPointParameter();
if ($mountPointParameter !== '') {
$rootline .= ',' . $mountPointParameter;
}
$document->setField('rootline', $rootline);
// access
$document->setField('access', (string)$this->pageAccessRootline);
if ($this->page->page['endtime']) {
$document->setField('endtime', $pageRecord['endtime']);
}
// content
$document->setField('title', $this->contentExtractor->getPageTitle());
$document->setField('subTitle', $pageRecord['subtitle']);
$document->setField('navTitle', $pageRecord['nav_title']);
$document->setField('author', $pageRecord['author']);
$document->setField('description', $pageRecord['description']);
$document->setField('abstract', $pageRecord['abstract']);
$document->setField('content',
$this->contentExtractor->getIndexableContent());
$document->setField('url', $this->pageUrl);
// keywords, multi valued
$keywords = array_unique(GeneralUtility::trimExplode(
',',
$pageRecord['keywords'],
true
));
foreach ($keywords as $keyword) {
$document->addField('keywords', $keyword);
}
// content from several tags like headers, anchors, ...
$tagContent = $this->contentExtractor->getTagContent();
foreach ($tagContent as $fieldName => $fieldValue) {
$document->setField($fieldName, $fieldValue);
}
return $document;
}
/**
* Gets a comma separated list of frontend user groups to use for the
* document ID.
*
* @return string A comma separated list of frontend user groups.
*/
protected function getDocumentIdGroups()
{
$groups = $this->pageAccessRootline->getGroups();
$groups = Rootline::cleanGroupArray($groups);
if (empty($groups)) {
$groups[] = 0;
}
$groups = implode(',', $groups);
return $groups;
}
// Logging
// TODO replace by a central logger
/**
* Gets the mount point parameter that is used in the Frontend controller.
*
* @return string
*/
public function getMountPointParameter()
{
return $this->mountPointParameter;
}
// Misc
/**
* Sets the mount point parameter that is used in the Frontend controller.
*
* @param string $mountPointParameter
*/
public function setMountPointParameter($mountPointParameter)
{
$this->mountPointParameter = (string)$mountPointParameter;
}
/**
* Allows third party extensions to replace or modify the page document
* created by this indexer.
*
* @param \Apache_Solr_Document $pageDocument The page document created by this indexer.
* @return \Apache_Solr_Document An Apache Solr document representing the currently indexed page
*/
protected function substitutePageDocument(
\Apache_Solr_Document $pageDocument
) {
if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageSubstitutePageDocument'])) {
foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageSubstitutePageDocument'] as $classReference) {
$substituteIndexer = GeneralUtility::getUserObj($classReference);
if ($substituteIndexer instanceof SubstitutePageIndexer) {
$substituteDocument = $substituteIndexer->getPageDocument($pageDocument);
if ($substituteDocument instanceof \Apache_Solr_Document) {
$pageDocument = $substituteDocument;
} else {
throw new \UnexpectedValueException(
'The document returned by ' . get_class($substituteIndexer) . ' is not a valid Apache_Solr_Document document.',
1310490952
);
}
} else {
throw new \UnexpectedValueException(
get_class($substituteIndexer) . ' must implement interface ApacheSolrForTypo3\Solr\SubstitutePageIndexer',
1310491001
);
}
}
}
return $pageDocument;
}
/**
* Allows third party extensions to provide additional documents which
* should be indexed for the current page.
*
* @param \Apache_Solr_Document $pageDocument The main document representing this page.
* @param array $existingDocuments An array of documents already created for this page.
* @return array An array of additional \Apache_Solr_Document objects to index
*/
protected function getAdditionalDocuments(
\Apache_Solr_Document $pageDocument,
array $existingDocuments
) {
$documents = $existingDocuments;
if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageAddDocuments'])) {
foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr']['Indexer']['indexPageAddDocuments'] as $classReference) {
$additionalIndexer = GeneralUtility::getUserObj($classReference);
if ($additionalIndexer instanceof AdditionalPageIndexer) {
$additionalDocuments = $additionalIndexer->getAdditionalPageDocuments($pageDocument,
$documents);
if (is_array($additionalDocuments)) {
$documents = array_merge($documents,
$additionalDocuments);
}
} else {
throw new \UnexpectedValueException(
get_class($additionalIndexer) . ' must implement interface ApacheSolrForTypo3\Solr\AdditionalPageIndexer',
1310491024
);
}
}
}
return $documents;
}
/**
* Sends the given documents to the field processing service which takes
* care of manipulating fields as defined in the field's configuration.
*
* @param array $documents An array of documents to manipulate
*/
protected function processDocuments(array $documents)
{
$processingInstructions = $this->configuration->getIndexFieldProcessingInstructionsConfiguration();
if (count($processingInstructions) > 0) {
$service = GeneralUtility::makeInstance('ApacheSolrForTypo3\\Solr\\FieldProcessor\\Service');
$service->processDocuments($documents, $processingInstructions);
}
}
/**
* Adds the collected documents to the Solr index.
*
* @param array $documents An array of \Apache_Solr_Document objects.
* @return bool TRUE if documents were added successfully, FALSE otherwise
*/
protected function addDocumentsToSolrIndex(array $documents)
{
$documentsAdded = false;
if (!count($documents)) {
return $documentsAdded;
}
try {
$this->log('Adding ' . count($documents) . ' documents.', 0,
$documents);
// chunk adds by 20
$documentChunks = array_chunk($documents, 20);
foreach ($documentChunks as $documentChunk) {
$response = $this->solrConnection->addDocuments($documentChunk);
if ($response->getHttpStatus() != 200) {
$transportException = new \Apache_Solr_HttpTransportException($response);
throw new \RuntimeException('Solr Request failed.',
1331834983, $transportException);
}
}
$documentsAdded = true;
} catch (\Exception $e) {
$this->log($e->getMessage() . ' Error code: ' . $e->getCode(), 2);
if ($this->configuration->getLoggingExceptions()) {
GeneralUtility::devLog('Exception while adding documents',
'solr', 3, array(
$e->__toString()
));
}
}
return $documentsAdded;
}
/**
* Gets the current page's URL.
*
* @return string URL of the current page.
*/
public function getPageUrl()
{
return $this->pageUrl;
}
/**
* Sets the URL to use for the page document.
*
* @param string $url The page's URL.
*/
public function setPageUrl($url)
{
$this->pageUrl = $url;
}
/**
* Gets the page's access rootline.
*
* @return Rootline The page's access rootline
*/
public function getPageAccessRootline()
{
return $this->pageAccessRootline;
}
/**
* Sets the page's access rootline.
*
* @param Rootline $accessRootline The page's access rootline
*/
public function setPageAccessRootline(Rootline $accessRootline)
{
$this->pageAccessRootline = $accessRootline;
}
/**
* Gets the documents that have been sent to Solr
*
* @return array An array of \Apache_Solr_Document objects
*/
public function getDocumentsSentToSolr()
{
return $this->documentsSentToSolr;
}
}
| 1 | 5,872 | This should never be empty. The access field always needs a value of at least `c:0` or `r:0` | TYPO3-Solr-ext-solr | php |
@@ -6,11 +6,16 @@ import (
"encoding/json"
"github.com/influxdata/flux/ast"
+ "github.com/influxdata/flux/internal/parser"
"github.com/influxdata/flux/internal/token"
"github.com/influxdata/flux/libflux/go/libflux"
)
func parseFile(f *token.File, src []byte) (*ast.File, error) {
+ if !useRustParser() {
+ return parser.ParseFile(f, src), nil
+ }
+
astFile := libflux.Parse(string(src))
defer astFile.Free()
| 1 | // +build libflux
package parser
import (
"encoding/json"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/internal/token"
"github.com/influxdata/flux/libflux/go/libflux"
)
func parseFile(f *token.File, src []byte) (*ast.File, error) {
astFile := libflux.Parse(string(src))
defer astFile.Free()
data, err := astFile.MarshalJSON()
if err != nil {
return nil, err
}
var file ast.File
if err := json.Unmarshal(data, &file); err != nil {
return nil, err
}
file.Name = f.Name()
// The go parser will not fill in the imports if there are
// none so we remove them here to retain compatibility.
if len(file.Imports) == 0 {
file.Imports = nil
}
return &file, nil
}
| 1 | 13,212 | Another question is, do we want to call `os.Getenv()` every time we parse a file? That seems a lot. | influxdata-flux | go |
@@ -33,6 +33,10 @@ public interface Tuple {
*/
Seq<?> toSeq();
+ <A> Tuple append(A v);
+
+ <A> Tuple prepend(A v);
+
// -- factory methods
/** | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-*\
G E N E R A T O R C R A F T E D
\*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-*/
import javaslang.collection.Seq;
/**
* The base interface of all tuples.
*
* @author Daniel Dietrich
* @since 1.1.0
*/
public interface Tuple {
/**
* Returns the number of elements of this tuple.
*
* @return the number of elements.
*/
int arity();
/**
* Converts this tuple to a sequence.
*
* @return A new {@code Seq}.
*/
Seq<?> toSeq();
// -- factory methods
/**
* Creates the empty tuple.
*
* @return the empty tuple.
*/
static Tuple0 empty() {
return Tuple0.instance();
}
/**
* Creates a tuple of one element.
*
* @param <T1> type of the 1st element
* @param t1 the 1st element
* @return a tuple of one element.
*/
static <T1> Tuple1<T1> of(T1 t1) {
return new Tuple1<>(t1);
}
/**
* Creates a tuple of two elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param t1 the 1st element
* @param t2 the 2nd element
* @return a tuple of two elements.
*/
static <T1, T2> Tuple2<T1, T2> of(T1 t1, T2 t2) {
return new Tuple2<>(t1, t2);
}
/**
* Creates a tuple of three elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param <T3> type of the 3rd element
* @param t1 the 1st element
* @param t2 the 2nd element
* @param t3 the 3rd element
* @return a tuple of three elements.
*/
static <T1, T2, T3> Tuple3<T1, T2, T3> of(T1 t1, T2 t2, T3 t3) {
return new Tuple3<>(t1, t2, t3);
}
/**
* Creates a tuple of 4 elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param <T3> type of the 3rd element
* @param <T4> type of the 4th element
* @param t1 the 1st element
* @param t2 the 2nd element
* @param t3 the 3rd element
* @param t4 the 4th element
* @return a tuple of 4 elements.
*/
static <T1, T2, T3, T4> Tuple4<T1, T2, T3, T4> of(T1 t1, T2 t2, T3 t3, T4 t4) {
return new Tuple4<>(t1, t2, t3, t4);
}
/**
* Creates a tuple of 5 elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param <T3> type of the 3rd element
* @param <T4> type of the 4th element
* @param <T5> type of the 5th element
* @param t1 the 1st element
* @param t2 the 2nd element
* @param t3 the 3rd element
* @param t4 the 4th element
* @param t5 the 5th element
* @return a tuple of 5 elements.
*/
static <T1, T2, T3, T4, T5> Tuple5<T1, T2, T3, T4, T5> of(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) {
return new Tuple5<>(t1, t2, t3, t4, t5);
}
/**
* Creates a tuple of 6 elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param <T3> type of the 3rd element
* @param <T4> type of the 4th element
* @param <T5> type of the 5th element
* @param <T6> type of the 6th element
* @param t1 the 1st element
* @param t2 the 2nd element
* @param t3 the 3rd element
* @param t4 the 4th element
* @param t5 the 5th element
* @param t6 the 6th element
* @return a tuple of 6 elements.
*/
static <T1, T2, T3, T4, T5, T6> Tuple6<T1, T2, T3, T4, T5, T6> of(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6) {
return new Tuple6<>(t1, t2, t3, t4, t5, t6);
}
/**
* Creates a tuple of 7 elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param <T3> type of the 3rd element
* @param <T4> type of the 4th element
* @param <T5> type of the 5th element
* @param <T6> type of the 6th element
* @param <T7> type of the 7th element
* @param t1 the 1st element
* @param t2 the 2nd element
* @param t3 the 3rd element
* @param t4 the 4th element
* @param t5 the 5th element
* @param t6 the 6th element
* @param t7 the 7th element
* @return a tuple of 7 elements.
*/
static <T1, T2, T3, T4, T5, T6, T7> Tuple7<T1, T2, T3, T4, T5, T6, T7> of(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7) {
return new Tuple7<>(t1, t2, t3, t4, t5, t6, t7);
}
/**
* Creates a tuple of 8 elements.
*
* @param <T1> type of the 1st element
* @param <T2> type of the 2nd element
* @param <T3> type of the 3rd element
* @param <T4> type of the 4th element
* @param <T5> type of the 5th element
* @param <T6> type of the 6th element
* @param <T7> type of the 7th element
* @param <T8> type of the 8th element
* @param t1 the 1st element
* @param t2 the 2nd element
* @param t3 the 3rd element
* @param t4 the 4th element
* @param t5 the 5th element
* @param t6 the 6th element
* @param t7 the 7th element
* @param t8 the 8th element
* @return a tuple of 8 elements.
*/
static <T1, T2, T3, T4, T5, T6, T7, T8> Tuple8<T1, T2, T3, T4, T5, T6, T7, T8> of(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8) {
return new Tuple8<>(t1, t2, t3, t4, t5, t6, t7, t8);
}
} | 1 | 9,085 | minor: please rename all occurrences of `A` to `T`. please also rename `v` to `value`. | vavr-io-vavr | java |
@@ -52,11 +52,13 @@ namespace Nethermind.Synchronization.ParallelSync
add { }
remove { }
}
-
+
+ public int FastSyncLag => (Current & SyncMode.Beam) == SyncMode.Beam ? SyncModeSelectorConstants.BeamSyncFastSyncLag : SyncModeSelectorConstants.NotBeamSyncFastSyncLag;
+
public event EventHandler<SyncModeChangedEventArgs> Changing
{
add { }
remove { }
}
}
-}
+} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
namespace Nethermind.Synchronization.ParallelSync
{
public class StaticSelector : ISyncModeSelector
{
public StaticSelector(SyncMode syncMode)
{
Current = syncMode;
}
public static StaticSelector Beam { get; } = new StaticSelector(SyncMode.Beam);
public static StaticSelector Full { get; } = new StaticSelector(SyncMode.Full);
public static StaticSelector FastSync { get; } = new StaticSelector(SyncMode.FastSync);
public static StaticSelector FastBlocks { get; } = new StaticSelector(SyncMode.FastBlocks);
public static StaticSelector FastSyncWithFastBlocks { get; } = new StaticSelector(SyncMode.FastSync | SyncMode.FastBlocks);
public static StaticSelector StateNodesWithFastBlocks { get; } = new StaticSelector(SyncMode.StateNodes | SyncMode.FastBlocks);
public static StaticSelector FullWithFastBlocks { get; } = new StaticSelector(SyncMode.Full | SyncMode.FastBlocks);
public SyncMode Current { get; }
public event EventHandler<SyncModeChangedEventArgs> Preparing
{
add { }
remove { }
}
public event EventHandler<SyncModeChangedEventArgs> Changed
{
add { }
remove { }
}
public event EventHandler<SyncModeChangedEventArgs> Changing
{
add { }
remove { }
}
}
} | 1 | 24,794 | Do we want it to be so dynamic? My first idea was to base it on SyncConfig.BeamSync . | NethermindEth-nethermind | .cs |
@@ -42,7 +42,7 @@ public class RemoteNetworkConnection implements NetworkConnection {
@Override
public ConnectionType setNetworkConnection(
ConnectionType type) {
- Map<String, ConnectionType> mode = ImmutableMap.of("type", type);
+ Map<String, Integer> mode = ImmutableMap.of("type", type.getBitMask());
return new ConnectionType(((Number) executeMethod.execute(DriverCommand.SET_NETWORK_CONNECTION,
ImmutableMap
.of("parameters", mode))) | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.mobile;
import com.google.common.collect.ImmutableMap;
import org.openqa.selenium.mobile.NetworkConnection;
import org.openqa.selenium.remote.DriverCommand;
import org.openqa.selenium.remote.ExecuteMethod;
import java.util.Map;
public class RemoteNetworkConnection implements NetworkConnection {
private final ExecuteMethod executeMethod;
public RemoteNetworkConnection(ExecuteMethod executeMethod) {
this.executeMethod = executeMethod;
}
@Override
public ConnectionType getNetworkConnection() {
return new ConnectionType(((Number) executeMethod.execute(DriverCommand.GET_NETWORK_CONNECTION,
null)).intValue());
}
@Override
public ConnectionType setNetworkConnection(
ConnectionType type) {
Map<String, ConnectionType> mode = ImmutableMap.of("type", type);
return new ConnectionType(((Number) executeMethod.execute(DriverCommand.SET_NETWORK_CONNECTION,
ImmutableMap
.of("parameters", mode)))
.intValue());
}
}
| 1 | 13,854 | can you change this instead to just `type.toString()` and then you wouldn't have to expose the getBitMask in the enum. (Alternatively you could have used `type.hashCode()` but that doesn't feel as nice) | SeleniumHQ-selenium | js |
@@ -68,6 +68,13 @@ def test_plot_importance(params, breast_cancer_split, train_data):
assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g
assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b
+ ax3 = lgb.plot_importance(gbm0, title='t @importance_type@', xlabel='x @importance_type@', ylabel='y @importance_type@')
+ assert isinstance(ax3, matplotlib.axes.Axes)
+ assert ax3.get_title() == 't @importance_type@'
+ assert ax3.get_xlabel() == 'x split'
+ assert ax3.get_ylabel() == 'y @importance_type@'
+ assert len(ax3.patches) <= 30
+
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, importance_type="gain")
gbm2.fit(X_train, y_train)
| 1 | # coding: utf-8
import pytest
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from lightgbm.compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED
if MATPLOTLIB_INSTALLED:
import matplotlib
matplotlib.use('Agg')
if GRAPHVIZ_INSTALLED:
import graphviz
from .utils import load_breast_cancer
@pytest.fixture(scope="module")
def breast_cancer_split():
return train_test_split(*load_breast_cancer(return_X_y=True),
test_size=0.1, random_state=1)
@pytest.fixture(scope="module")
def train_data(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
return lgb.Dataset(X_train, y_train)
@pytest.fixture
def params():
return {"objective": "binary",
"verbose": -1,
"num_leaves": 3}
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed')
def test_plot_importance(params, breast_cancer_split, train_data):
X_train, _, y_train, _ = breast_cancer_split
gbm0 = lgb.train(params, train_data, num_boost_round=10)
ax0 = lgb.plot_importance(gbm0)
assert isinstance(ax0, matplotlib.axes.Axes)
assert ax0.get_title() == 'Feature importance'
assert ax0.get_xlabel() == 'Feature importance'
assert ax0.get_ylabel() == 'Features'
assert len(ax0.patches) <= 30
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1.fit(X_train, y_train)
ax1 = lgb.plot_importance(gbm1, color='r', title='t', xlabel='x', ylabel='y')
assert isinstance(ax1, matplotlib.axes.Axes)
assert ax1.get_title() == 't'
assert ax1.get_xlabel() == 'x'
assert ax1.get_ylabel() == 'y'
assert len(ax1.patches) <= 30
for patch in ax1.patches:
assert patch.get_facecolor() == (1., 0, 0, 1.) # red
ax2 = lgb.plot_importance(gbm0, color=['r', 'y', 'g', 'b'], title=None, xlabel=None, ylabel=None)
assert isinstance(ax2, matplotlib.axes.Axes)
assert ax2.get_title() == ''
assert ax2.get_xlabel() == ''
assert ax2.get_ylabel() == ''
assert len(ax2.patches) <= 30
assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r
assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y
assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g
assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, importance_type="gain")
gbm2.fit(X_train, y_train)
def get_bounds_of_first_patch(axes):
return axes.patches[0].get_extents().bounds
first_bar1 = get_bounds_of_first_patch(lgb.plot_importance(gbm1))
first_bar2 = get_bounds_of_first_patch(lgb.plot_importance(gbm1, importance_type="split"))
first_bar3 = get_bounds_of_first_patch(lgb.plot_importance(gbm1, importance_type="gain"))
first_bar4 = get_bounds_of_first_patch(lgb.plot_importance(gbm2))
first_bar5 = get_bounds_of_first_patch(lgb.plot_importance(gbm2, importance_type="split"))
first_bar6 = get_bounds_of_first_patch(lgb.plot_importance(gbm2, importance_type="gain"))
assert first_bar1 == first_bar2
assert first_bar1 == first_bar5
assert first_bar3 == first_bar4
assert first_bar3 == first_bar6
assert first_bar1 != first_bar3
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed')
def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
X_train, _, y_train, _ = breast_cancer_split
gbm0 = lgb.train(params, train_data, num_boost_round=10)
ax0 = lgb.plot_split_value_histogram(gbm0, 27)
assert isinstance(ax0, matplotlib.axes.Axes)
assert ax0.get_title() == 'Split value histogram for feature with index 27'
assert ax0.get_xlabel() == 'Feature split value'
assert ax0.get_ylabel() == 'Count'
assert len(ax0.patches) <= 2
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1.fit(X_train, y_train)
ax1 = lgb.plot_split_value_histogram(gbm1, gbm1.booster_.feature_name()[27], figsize=(10, 5),
title='Histogram for feature @index/name@ @feature@',
xlabel='x', ylabel='y', color='r')
assert isinstance(ax1, matplotlib.axes.Axes)
title = f'Histogram for feature name {gbm1.booster_.feature_name()[27]}'
assert ax1.get_title() == title
assert ax1.get_xlabel() == 'x'
assert ax1.get_ylabel() == 'y'
assert len(ax1.patches) <= 2
for patch in ax1.patches:
assert patch.get_facecolor() == (1., 0, 0, 1.) # red
ax2 = lgb.plot_split_value_histogram(gbm0, 27, bins=10, color=['r', 'y', 'g', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax2, matplotlib.axes.Axes)
assert ax2.get_title() == ''
assert ax2.get_xlabel() == ''
assert ax2.get_ylabel() == ''
assert len(ax2.patches) == 10
assert ax2.patches[0].get_facecolor() == (1., 0, 0, 1.) # r
assert ax2.patches[1].get_facecolor() == (.75, .75, 0, 1.) # y
assert ax2.patches[2].get_facecolor() == (0, .5, 0, 1.) # g
assert ax2.patches[3].get_facecolor() == (0, 0, 1., 1.) # b
with pytest.raises(ValueError):
lgb.plot_split_value_histogram(gbm0, 0) # was not used in splitting
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED or not GRAPHVIZ_INSTALLED,
reason='matplotlib or graphviz is not installed')
def test_plot_tree(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm.fit(X_train, y_train, verbose=False)
with pytest.raises(IndexError):
lgb.plot_tree(gbm, tree_index=83)
ax = lgb.plot_tree(gbm, tree_index=3, figsize=(15, 8), show_info=['split_gain'])
assert isinstance(ax, matplotlib.axes.Axes)
w, h = ax.axes.get_figure().get_size_inches()
assert int(w) == 15
assert int(h) == 8
@pytest.mark.skipif(not GRAPHVIZ_INSTALLED, reason='graphviz is not installed')
def test_create_tree_digraph(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
constraints = [-1, 1] * int(X_train.shape[1] / 2)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, monotone_constraints=constraints)
gbm.fit(X_train, y_train, verbose=False)
with pytest.raises(IndexError):
lgb.create_tree_digraph(gbm, tree_index=83)
graph = lgb.create_tree_digraph(gbm, tree_index=3,
show_info=['split_gain', 'internal_value', 'internal_weight'],
name='Tree4', node_attr={'color': 'red'})
graph.render(view=False)
assert isinstance(graph, graphviz.Digraph)
assert graph.name == 'Tree4'
assert graph.filename == 'Tree4.gv'
assert len(graph.node_attr) == 1
assert graph.node_attr['color'] == 'red'
assert len(graph.graph_attr) == 0
assert len(graph.edge_attr) == 0
graph_body = ''.join(graph.body)
assert 'leaf' in graph_body
assert 'gain' in graph_body
assert 'value' in graph_body
assert 'weight' in graph_body
assert '#ffdddd' in graph_body
assert '#ddffdd' in graph_body
assert 'data' not in graph_body
assert 'count' not in graph_body
@pytest.mark.skipif(not MATPLOTLIB_INSTALLED, reason='matplotlib is not installed')
def test_plot_metrics(params, breast_cancer_split, train_data):
X_train, X_test, y_train, y_test = breast_cancer_split
test_data = lgb.Dataset(X_test, y_test, reference=train_data)
params.update({"metric": {"binary_logloss", "binary_error"}})
evals_result0 = {}
lgb.train(params, train_data,
valid_sets=[train_data, test_data],
valid_names=['v1', 'v2'],
num_boost_round=10,
evals_result=evals_result0,
verbose_eval=False)
ax0 = lgb.plot_metric(evals_result0)
assert isinstance(ax0, matplotlib.axes.Axes)
assert ax0.get_title() == 'Metric during training'
assert ax0.get_xlabel() == 'Iterations'
assert ax0.get_ylabel() in {'binary_logloss', 'binary_error'}
ax0 = lgb.plot_metric(evals_result0, metric='binary_error')
ax0 = lgb.plot_metric(evals_result0, metric='binary_logloss', dataset_names=['v2'])
evals_result1 = {}
lgb.train(params, train_data,
num_boost_round=10,
evals_result=evals_result1,
verbose_eval=False)
with pytest.raises(ValueError):
lgb.plot_metric(evals_result1)
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
ax2 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None)
assert isinstance(ax2, matplotlib.axes.Axes)
assert ax2.get_title() == ''
assert ax2.get_xlabel() == ''
assert ax2.get_ylabel() == ''
| 1 | 31,403 | I'm confused by these tests. Shouldn't the template string `@importance_type@` have been replaced with the actual value of `importance_type`? | microsoft-LightGBM | cpp |
@@ -25,7 +25,7 @@ import (
type harness struct{}
func (h *harness) MakeDriver(ctx context.Context) (driver.Keeper, error) {
- return NewKeeper(ByteKey("very secret secret")), nil
+ return &keeper{secretKey: ByteKey("very secret secret")}, nil
}
func (h *harness) Close() {} | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package localsecrets
import (
"context"
"testing"
"gocloud.dev/secrets/driver"
"gocloud.dev/secrets/drivertest"
)
type harness struct{}
func (h *harness) MakeDriver(ctx context.Context) (driver.Keeper, error) {
return NewKeeper(ByteKey("very secret secret")), nil
}
func (h *harness) Close() {}
func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
return &harness{}, nil
}
| 1 | 13,930 | Let's keep using NewKeeper so that it got covered by tests. | google-go-cloud | go |
@@ -70,6 +70,9 @@ import org.apache.solr.core.CoreContainer;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.component.HttpShardHandler;
import org.apache.solr.logging.MDCLoggingContext;
+import org.apache.solr.store.blob.process.BlobDeleteManager;
+import org.apache.solr.store.blob.process.BlobDeleteProcessor;
+import org.apache.solr.store.shared.SharedStoreManager;
import org.apache.solr.store.shared.metadata.SharedShardMetadataController;
import org.apache.solr.update.UpdateShardHandler;
import org.apache.zookeeper.CreateMode; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import static org.apache.solr.common.params.CommonParams.ID;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiConsumer;
import org.apache.lucene.util.Version;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.ClusterStateProvider;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
import org.apache.solr.cloud.autoscaling.OverseerTriggerThread;
import org.apache.solr.cloud.overseer.ClusterStateMutator;
import org.apache.solr.cloud.overseer.CollectionMutator;
import org.apache.solr.cloud.overseer.NodeMutator;
import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.cloud.overseer.ReplicaMutator;
import org.apache.solr.cloud.overseer.SliceMutator;
import org.apache.solr.cloud.overseer.ZkStateWriter;
import org.apache.solr.cloud.overseer.ZkWriteCommand;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.SolrCloseable;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ConnectionManager;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.ObjectReleaseTracker;
import org.apache.solr.common.util.Pair;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.CloudConfig;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.component.HttpShardHandler;
import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.store.shared.metadata.SharedShardMetadataController;
import org.apache.solr.update.UpdateShardHandler;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Timer;
/**
* Cluster leader. Responsible for processing state updates, node assignments, creating/deleting
* collections, shards, replicas and setting various properties.
*/
public class Overseer implements SolrCloseable {
public static final String QUEUE_OPERATION = "operation";
// System properties are used in tests to make them run fast
public static final int STATE_UPDATE_DELAY = ZkStateReader.STATE_UPDATE_DELAY;
public static final int STATE_UPDATE_BATCH_SIZE = Integer.getInteger("solr.OverseerStateUpdateBatchSize", 10000);
public static final int STATE_UPDATE_MAX_QUEUE = 20000;
public static final int NUM_RESPONSES_TO_STORE = 10000;
public static final String OVERSEER_ELECT = "/overseer_elect";
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
enum LeaderStatus {DONT_KNOW, NO, YES}
private class ClusterStateUpdater implements Runnable, Closeable {
private final ZkStateReader reader;
private final SolrZkClient zkClient;
private final String myId;
//queue where everybody can throw tasks
private final ZkDistributedQueue stateUpdateQueue;
//TODO remove in 9.0, we do not push message into this queue anymore
//Internal queue where overseer stores events that have not yet been published into cloudstate
//If Overseer dies while extracting the main queue a new overseer will start from this queue
private final ZkDistributedQueue workQueue;
// Internal map which holds the information about running tasks.
private final DistributedMap runningMap;
// Internal map which holds the information about successfully completed tasks.
private final DistributedMap completedMap;
// Internal map which holds the information about failed tasks.
private final DistributedMap failureMap;
private final Stats zkStats;
private boolean isClosed = false;
public ClusterStateUpdater(final ZkStateReader reader, final String myId, Stats zkStats) {
this.zkClient = reader.getZkClient();
this.zkStats = zkStats;
this.stateUpdateQueue = getStateUpdateQueue(zkStats);
this.workQueue = getInternalWorkQueue(zkClient, zkStats);
this.failureMap = getFailureMap(zkClient);
this.runningMap = getRunningMap(zkClient);
this.completedMap = getCompletedMap(zkClient);
this.myId = myId;
this.reader = reader;
}
public Stats getStateUpdateQueueStats() {
return stateUpdateQueue.getZkStats();
}
public Stats getWorkQueueStats() {
return workQueue.getZkStats();
}
@Override
public void run() {
MDCLoggingContext.setNode(zkController.getNodeName() );
LeaderStatus isLeader = amILeader();
while (isLeader == LeaderStatus.DONT_KNOW) {
log.debug("am_i_leader unclear {}", isLeader);
isLeader = amILeader(); // not a no, not a yes, try ask again
}
log.info("Starting to work on the main queue : {}", LeaderElector.getNodeName(myId));
try {
ZkStateWriter zkStateWriter = null;
ClusterState clusterState = null;
boolean refreshClusterState = true; // let's refresh in the first iteration
// we write updates in batch, but if an exception is thrown when writing new clusterstate,
// we do not sure which message is bad message, therefore we will re-process node one by one
int fallbackQueueSize = Integer.MAX_VALUE;
ZkDistributedQueue fallbackQueue = workQueue;
while (!this.isClosed) {
isLeader = amILeader();
if (LeaderStatus.NO == isLeader) {
break;
}
else if (LeaderStatus.YES != isLeader) {
log.debug("am_i_leader unclear {}", isLeader);
continue; // not a no, not a yes, try ask again
}
//TODO consider removing 'refreshClusterState' and simply check if clusterState is null
if (refreshClusterState) {
try {
reader.forciblyRefreshAllClusterStateSlow();
clusterState = reader.getClusterState();
zkStateWriter = new ZkStateWriter(reader, stats);
refreshClusterState = false;
// if there were any errors while processing
// the state queue, items would have been left in the
// work queue so let's process those first
byte[] data = fallbackQueue.peek();
while (fallbackQueueSize > 0 && data != null) {
final ZkNodeProps message = ZkNodeProps.load(data);
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
// force flush to ZK after each message because there is no fallback if workQueue items
// are removed from workQueue but fail to be written to ZK
try {
clusterState = processQueueItem(message, clusterState, zkStateWriter, false, null);
} catch (Exception e) {
if (isBadMessage(e)) {
log.warn("Exception when process message = {}, consider as bad message and poll out from the queue", message);
fallbackQueue.poll();
}
throw e;
}
fallbackQueue.poll(); // poll-ing removes the element we got by peek-ing
data = fallbackQueue.peek();
fallbackQueueSize--;
}
// force flush at the end of the loop, if there are no pending updates, this is a no op call
clusterState = zkStateWriter.writePendingUpdates();
// the workQueue is empty now, use stateUpdateQueue as fallback queue
fallbackQueue = stateUpdateQueue;
fallbackQueueSize = 0;
} catch (AlreadyClosedException e) {
return;
} catch (KeeperException.SessionExpiredException e) {
log.warn("Solr cannot talk to ZK, exiting Overseer work queue loop", e);
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
} catch (Exception e) {
log.error("Exception in Overseer when process message from work queue, retrying", e);
refreshClusterState = true;
continue;
}
}
LinkedList<Pair<String, byte[]>> queue = null;
try {
// We do not need to filter any nodes here cause all processed nodes are removed once we flush clusterstate
queue = new LinkedList<>(stateUpdateQueue.peekElements(1000, 3000L, (x) -> true));
} catch (KeeperException.SessionExpiredException e) {
log.warn("Solr cannot talk to ZK, exiting Overseer main queue loop", e);
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
} catch (AlreadyClosedException e) {
} catch (Exception e) {
log.error("Exception in Overseer main queue loop", e);
}
try {
Set<String> processedNodes = new HashSet<>();
while (queue != null && !queue.isEmpty()) {
for (Pair<String, byte[]> head : queue) {
byte[] data = head.second();
final ZkNodeProps message = ZkNodeProps.load(data);
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
processedNodes.add(head.first());
fallbackQueueSize = processedNodes.size();
// The callback always be called on this thread
clusterState = processQueueItem(message, clusterState, zkStateWriter, true, () -> {
stateUpdateQueue.remove(processedNodes);
processedNodes.clear();
});
}
if (isClosed) break;
// if an event comes in the next 100ms batch it together
queue = new LinkedList<>(stateUpdateQueue.peekElements(1000, 100, node -> !processedNodes.contains(node)));
}
fallbackQueueSize = processedNodes.size();
// we should force write all pending updates because the next iteration might sleep until there
// are more items in the main queue
clusterState = zkStateWriter.writePendingUpdates();
// clean work queue
stateUpdateQueue.remove(processedNodes);
processedNodes.clear();
} catch (KeeperException.SessionExpiredException e) {
log.warn("Solr cannot talk to ZK, exiting Overseer main queue loop", e);
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
} catch (AlreadyClosedException e) {
} catch (Exception e) {
log.error("Exception in Overseer main queue loop", e);
refreshClusterState = true; // it might have been a bad version error
}
}
} finally {
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
//do this in a separate thread because any wait is interrupted in this main thread
new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start();
}
}
// Return true whenever the exception thrown by ZkStateWriter is correspond
// to a invalid state or 'bad' message (in this case, we should remove that message from queue)
private boolean isBadMessage(Exception e) {
if (e instanceof KeeperException) {
KeeperException ke = (KeeperException) e;
return ke.code() == KeeperException.Code.NONODE || ke.code() == KeeperException.Code.NODEEXISTS;
}
return !(e instanceof InterruptedException);
}
private ClusterState processQueueItem(ZkNodeProps message, ClusterState clusterState, ZkStateWriter zkStateWriter, boolean enableBatching, ZkStateWriter.ZkWriteCallback callback) throws Exception {
final String operation = message.getStr(QUEUE_OPERATION);
if (operation == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Message missing " + QUEUE_OPERATION + ":" + message);
}
List<ZkWriteCommand> zkWriteCommands = null;
final Timer.Context timerContext = stats.time(operation);
try {
zkWriteCommands = processMessage(clusterState, message, operation);
stats.success(operation);
} catch (Exception e) {
// generally there is nothing we can do - in most cases, we have
// an issue that will fail again on retry or we cannot communicate with a
// ZooKeeper in which case another Overseer should take over
// TODO: if ordering for the message is not important, we could
// track retries and put it back on the end of the queue
log.error("Overseer could not process the current clusterstate state update message, skipping the message: " + message, e);
stats.error(operation);
} finally {
timerContext.stop();
}
if (zkWriteCommands != null) {
clusterState = zkStateWriter.enqueueUpdate(clusterState, zkWriteCommands, callback);
if (!enableBatching) {
clusterState = zkStateWriter.writePendingUpdates();
}
}
return clusterState;
}
private void checkIfIamStillLeader() {
if (zkController != null && (zkController.getCoreContainer().isShutDown() || zkController.isClosed())) {
return;//shutting down no need to go further
}
org.apache.zookeeper.data.Stat stat = new org.apache.zookeeper.data.Stat();
final String path = OVERSEER_ELECT + "/leader";
byte[] data;
try {
data = zkClient.getData(path, null, stat, true);
} catch (AlreadyClosedException e) {
return;
} catch (Exception e) {
log.warn("Error communicating with ZooKeeper", e);
return;
}
try {
Map m = (Map) Utils.fromJSON(data);
String id = (String) m.get(ID);
if(overseerCollectionConfigSetProcessor.getId().equals(id)){
try {
log.warn("I (id={}) am exiting, but I'm still the leader",
overseerCollectionConfigSetProcessor.getId());
zkClient.delete(path,stat.getVersion(),true);
} catch (KeeperException.BadVersionException e) {
//no problem ignore it some other Overseer has already taken over
} catch (Exception e) {
log.error("Could not delete my leader node "+path, e);
}
} else{
log.info("somebody else (id={}) has already taken up the overseer position", id);
}
} finally {
//if I am not shutting down, Then I need to rejoin election
try {
if (zkController != null && !zkController.getCoreContainer().isShutDown()) {
zkController.rejoinOverseerElection(null, false);
}
} catch (Exception e) {
log.warn("Unable to rejoinElection ",e);
}
}
}
private List<ZkWriteCommand> processMessage(ClusterState clusterState,
final ZkNodeProps message, final String operation) {
CollectionParams.CollectionAction collectionAction = CollectionParams.CollectionAction.get(operation);
if (collectionAction != null) {
switch (collectionAction) {
case CREATE:
return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).createCollection(clusterState, message));
case DELETE:
return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).deleteCollection(clusterState, message));
case CREATESHARD:
return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).createShard(clusterState, message));
case DELETESHARD:
return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).deleteShard(clusterState, message));
case ADDREPLICA:
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addReplica(clusterState, message));
case ADDREPLICAPROP:
return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).addReplicaProperty(clusterState, message));
case DELETEREPLICAPROP:
return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).deleteReplicaProperty(clusterState, message));
case BALANCESHARDUNIQUE:
ExclusiveSliceProperty dProp = new ExclusiveSliceProperty(clusterState, message);
if (dProp.balanceProperty()) {
String collName = message.getStr(ZkStateReader.COLLECTION_PROP);
return Collections.singletonList(new ZkWriteCommand(collName, dProp.getDocCollection()));
}
break;
case MODIFYCOLLECTION:
CollectionsHandler.verifyRuleParams(zkController.getCoreContainer() ,message.getProperties());
return Collections.singletonList(new CollectionMutator(getSolrCloudManager()).modifyCollection(clusterState,message));
case MIGRATESTATEFORMAT:
return Collections.singletonList(new ClusterStateMutator(getSolrCloudManager()).migrateStateFormat(clusterState, message));
default:
throw new RuntimeException("unknown operation:" + operation
+ " contents:" + message.getProperties());
}
} else {
OverseerAction overseerAction = OverseerAction.get(operation);
if (overseerAction == null) {
throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
}
switch (overseerAction) {
case STATE:
return Collections.singletonList(new ReplicaMutator(getSolrCloudManager()).setState(clusterState, message));
case LEADER:
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).setShardLeader(clusterState, message));
case DELETECORE:
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeReplica(clusterState, message));
case ADDROUTINGRULE:
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).addRoutingRule(clusterState, message));
case REMOVEROUTINGRULE:
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).removeRoutingRule(clusterState, message));
case UPDATESHARDSTATE:
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
case QUIT:
if (myId.equals(message.get(ID))) {
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
overseerCollectionConfigSetProcessor.close();
close();
} else {
log.warn("Overseer received wrong QUIT message {}", message);
}
break;
case DOWNNODE:
return new NodeMutator().downNode(clusterState, message);
default:
throw new RuntimeException("unknown operation:" + operation + " contents:" + message.getProperties());
}
}
return Collections.singletonList(ZkStateWriter.NO_OP);
}
private LeaderStatus amILeader() {
Timer.Context timerContext = stats.time("am_i_leader");
boolean success = true;
String propsId = null;
try {
ZkNodeProps props = ZkNodeProps.load(zkClient.getData(
OVERSEER_ELECT + "/leader", null, null, true));
propsId = props.getStr(ID);
if (myId.equals(propsId)) {
return LeaderStatus.YES;
}
} catch (KeeperException e) {
success = false;
if (e.code() == KeeperException.Code.CONNECTIONLOSS) {
log.error("", e);
return LeaderStatus.DONT_KNOW;
} else if (e.code() != KeeperException.Code.SESSIONEXPIRED) {
log.warn("", e);
} else {
log.debug("", e);
}
} catch (InterruptedException e) {
success = false;
Thread.currentThread().interrupt();
} catch (AlreadyClosedException e) {
success = false;
} catch (Exception e) {
success = false;
log.warn("Unexpected exception", e);
} finally {
timerContext.stop();
if (success) {
stats.success("am_i_leader");
} else {
stats.error("am_i_leader");
}
}
log.info("According to ZK I (id={}) am no longer a leader. propsId={}", myId, propsId);
return LeaderStatus.NO;
}
@Override
public void close() {
this.isClosed = true;
}
}
public static class OverseerThread extends Thread implements Closeable {
protected volatile boolean isClosed;
private Closeable thread;
public OverseerThread(ThreadGroup tg, Closeable thread) {
super(tg, (Runnable) thread);
this.thread = thread;
}
public OverseerThread(ThreadGroup ccTg, Closeable thread, String name) {
super(ccTg, (Runnable) thread, name);
this.thread = thread;
}
@Override
public void close() throws IOException {
thread.close();
this.isClosed = true;
}
public Closeable getThread() {
return thread;
}
public boolean isClosed() {
return this.isClosed;
}
}
private OverseerThread ccThread;
private OverseerThread updaterThread;
private OverseerThread triggerThread;
private final ZkStateReader reader;
private final HttpShardHandler shardHandler;
private final UpdateShardHandler updateShardHandler;
private final String adminPath;
private OverseerCollectionConfigSetProcessor overseerCollectionConfigSetProcessor;
private ZkController zkController;
private Stats stats;
private String id;
private volatile boolean closed;
private volatile boolean systemCollCompatCheck = true;
private CloudConfig config;
// overseer not responsible for closing reader
public Overseer(HttpShardHandler shardHandler,
UpdateShardHandler updateShardHandler, String adminPath,
final ZkStateReader reader, ZkController zkController, CloudConfig config)
throws KeeperException, InterruptedException {
this.reader = reader;
this.shardHandler = shardHandler;
this.updateShardHandler = updateShardHandler;
this.adminPath = adminPath;
this.zkController = zkController;
this.stats = new Stats();
this.config = config;
}
public synchronized void start(String id) {
MDCLoggingContext.setNode(zkController == null ?
null :
zkController.getNodeName());
this.id = id;
closed = false;
doClose();
stats = new Stats();
log.info("Overseer (id=" + id + ") starting");
createOverseerNode(reader.getZkClient());
//launch cluster state updater thread
ThreadGroup tg = new ThreadGroup("Overseer state updater.");
updaterThread = new OverseerThread(tg, new ClusterStateUpdater(reader, id, stats), "OverseerStateUpdate-" + id);
updaterThread.setDaemon(true);
ThreadGroup ccTg = new ThreadGroup("Overseer collection creation process.");
OverseerNodePrioritizer overseerPrioritizer = new OverseerNodePrioritizer(reader, getStateUpdateQueue(), adminPath, shardHandler.getShardHandlerFactory(), updateShardHandler.getDefaultHttpClient());
overseerCollectionConfigSetProcessor = new OverseerCollectionConfigSetProcessor(reader, id, shardHandler, adminPath, stats, Overseer.this, overseerPrioritizer);
ccThread = new OverseerThread(ccTg, overseerCollectionConfigSetProcessor, "OverseerCollectionConfigSetProcessor-" + id);
ccThread.setDaemon(true);
ThreadGroup triggerThreadGroup = new ThreadGroup("Overseer autoscaling triggers");
OverseerTriggerThread trigger = new OverseerTriggerThread(zkController.getCoreContainer().getResourceLoader(),
zkController.getSolrCloudManager(), config);
triggerThread = new OverseerThread(triggerThreadGroup, trigger, "OverseerAutoScalingTriggerThread-" + id);
updaterThread.start();
ccThread.start();
triggerThread.start();
systemCollectionCompatCheck(new BiConsumer<String, Object>() {
boolean firstPair = true;
@Override
public void accept(String s, Object o) {
if (firstPair) {
log.warn("WARNING: Collection '.system' may need re-indexing due to compatibility issues listed below. See REINDEXCOLLECTION documentation for more details.");
firstPair = false;
}
log.warn("WARNING: *\t{}:\t{}", s, o);
}
});
assert ObjectReleaseTracker.track(this);
}
public void systemCollectionCompatCheck(final BiConsumer<String, Object> consumer) {
ClusterState clusterState = zkController.getClusterState();
if (clusterState == null) {
log.warn("Unable to check back-compat of .system collection - can't obtain ClusterState.");
return;
}
DocCollection coll = clusterState.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
if (coll == null) {
return;
}
// check that all shard leaders are active
boolean allActive = true;
for (Slice s : coll.getActiveSlices()) {
if (s.getLeader() == null || !s.getLeader().isActive(clusterState.getLiveNodes())) {
allActive = false;
break;
}
}
if (allActive) {
doCompatCheck(consumer);
} else {
// wait for all leaders to become active and then check
zkController.zkStateReader.registerCollectionStateWatcher(CollectionAdminParams.SYSTEM_COLL, (liveNodes, state) -> {
boolean active = true;
if (state == null || liveNodes.isEmpty()) {
return true;
}
for (Slice s : state.getActiveSlices()) {
if (s.getLeader() == null || !s.getLeader().isActive(liveNodes)) {
active = false;
break;
}
}
if (active) {
doCompatCheck(consumer);
}
return active;
});
}
}
private void doCompatCheck(BiConsumer<String, Object> consumer) {
if (systemCollCompatCheck) {
systemCollCompatCheck = false;
} else {
return;
}
try (CloudSolrClient client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
.withSocketTimeout(30000).withConnectionTimeout(15000)
.withHttpClient(updateShardHandler.getDefaultHttpClient()).build()) {
CollectionAdminRequest.ColStatus req = CollectionAdminRequest.collectionStatus(CollectionAdminParams.SYSTEM_COLL)
.setWithSegments(true)
.setWithFieldInfo(true);
CollectionAdminResponse rsp = req.process(client);
NamedList<Object> status = (NamedList<Object>)rsp.getResponse().get(CollectionAdminParams.SYSTEM_COLL);
Collection<String> nonCompliant = (Collection<String>)status.get("schemaNonCompliant");
if (!nonCompliant.contains("(NONE)")) {
consumer.accept("indexFieldsNotMatchingSchema", nonCompliant);
}
Set<Integer> segmentCreatedMajorVersions = new HashSet<>();
Set<String> segmentVersions = new HashSet<>();
int currentMajorVersion = Version.LATEST.major;
String currentVersion = Version.LATEST.toString();
segmentVersions.add(currentVersion);
segmentCreatedMajorVersions.add(currentMajorVersion);
NamedList<Object> shards = (NamedList<Object>)status.get("shards");
for (Map.Entry<String, Object> entry : shards) {
NamedList<Object> leader = (NamedList<Object>)((NamedList<Object>)entry.getValue()).get("leader");
if (leader == null) {
continue;
}
NamedList<Object> segInfos = (NamedList<Object>)leader.get("segInfos");
if (segInfos == null) {
continue;
}
NamedList<Object> infos = (NamedList<Object>)segInfos.get("info");
if (((Number)infos.get("numSegments")).intValue() > 0) {
segmentVersions.add(infos.get("minSegmentLuceneVersion").toString());
}
if (infos.get("commitLuceneVersion") != null) {
segmentVersions.add(infos.get("commitLuceneVersion").toString());
}
NamedList<Object> segmentInfos = (NamedList<Object>)segInfos.get("segments");
segmentInfos.forEach((k, v) -> {
NamedList<Object> segment = (NamedList<Object>)v;
segmentVersions.add(segment.get("version").toString());
if (segment.get("minVersion") != null) {
segmentVersions.add(segment.get("version").toString());
}
if (segment.get("createdVersionMajor") != null) {
segmentCreatedMajorVersions.add(((Number)segment.get("createdVersionMajor")).intValue());
}
});
}
if (segmentVersions.size() > 1) {
consumer.accept("differentSegmentVersions", segmentVersions);
consumer.accept("currentLuceneVersion", currentVersion);
}
if (segmentCreatedMajorVersions.size() > 1) {
consumer.accept("differentMajorSegmentVersions", segmentCreatedMajorVersions);
consumer.accept("currentLuceneMajorVersion", currentMajorVersion);
}
} catch (SolrServerException | IOException e) {
log.warn("Unable to perform back-compat check of .system collection", e);
}
}
public Stats getStats() {
return stats;
}
ZkController getZkController(){
return zkController;
}
public CoreContainer getCoreContainer() {
return zkController.getCoreContainer();
}
public SolrCloudManager getSolrCloudManager() {
return zkController.getSolrCloudManager();
}
public SharedShardMetadataController getShardSharedMetadataController() {
return getCoreContainer().getSharedStoreManager().getSharedShardMetadataController();
}
/**
* For tests.
*
* @lucene.internal
* @return state updater thread
*/
public synchronized OverseerThread getUpdaterThread() {
return updaterThread;
}
/**
* For tests.
* @lucene.internal
* @return trigger thread
*/
public synchronized OverseerThread getTriggerThread() {
return triggerThread;
}
public synchronized void close() {
if (this.id != null) {
log.info("Overseer (id=" + id + ") closing");
}
this.closed = true;
doClose();
assert ObjectReleaseTracker.release(this);
}
@Override
public boolean isClosed() {
return closed;
}
private void doClose() {
if (updaterThread != null) {
IOUtils.closeQuietly(updaterThread);
updaterThread.interrupt();
}
if (ccThread != null) {
IOUtils.closeQuietly(ccThread);
ccThread.interrupt();
}
if (triggerThread != null) {
IOUtils.closeQuietly(triggerThread);
triggerThread.interrupt();
}
if (updaterThread != null) {
try {
updaterThread.join();
} catch (InterruptedException e) {}
}
if (ccThread != null) {
try {
ccThread.join();
} catch (InterruptedException e) {}
}
if (triggerThread != null) {
try {
triggerThread.join();
} catch (InterruptedException e) {}
}
updaterThread = null;
ccThread = null;
triggerThread = null;
}
/**
* Get queue that can be used to send messages to Overseer.
* <p>
* Any and all modifications to the cluster state must be sent to
* the overseer via this queue. The complete list of overseer actions
* supported by this queue are documented inside the {@link OverseerAction} enum.
* <p>
* Performance statistics on the returned queue
* are <em>not</em> tracked by the Overseer Stats API,
* see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
* Therefore, this method should be used only by clients for writing to the overseer queue.
* <p>
* This method will create the /overseer znode in ZooKeeper if it does not exist already.
*
* @return a {@link ZkDistributedQueue} object
*/
ZkDistributedQueue getStateUpdateQueue() {
return getStateUpdateQueue(new Stats());
}
/**
* The overseer uses the returned queue to read any operations submitted by clients.
* This method should not be used directly by anyone other than the Overseer itself.
* This method will create the /overseer znode in ZooKeeper if it does not exist already.
*
* @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
* @return a {@link ZkDistributedQueue} object
*/
ZkDistributedQueue getStateUpdateQueue(Stats zkStats) {
return new ZkDistributedQueue(reader.getZkClient(), "/overseer/queue", zkStats, STATE_UPDATE_MAX_QUEUE, new ConnectionManager.IsClosed(){
public boolean isClosed() {
return Overseer.this.isClosed() || zkController.getCoreContainer().isShutDown();
}
});
}
/**
* Internal overseer work queue. This should not be used outside of Overseer.
* <p>
* This queue is used to store overseer operations that have been removed from the
* state update queue but are being executed as part of a batch. Once
* the result of the batch is persisted to zookeeper, these items are removed from the
* work queue. If the overseer dies while processing a batch then a new overseer always
* operates from the work queue first and only then starts processing operations from the
* state update queue.
* This method will create the /overseer znode in ZooKeeper if it does not exist already.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @param zkStats a {@link Stats} object which tracks statistics for all zookeeper operations performed by this queue
* @return a {@link ZkDistributedQueue} object
*/
static ZkDistributedQueue getInternalWorkQueue(final SolrZkClient zkClient, Stats zkStats) {
return new ZkDistributedQueue(zkClient, "/overseer/queue-work", zkStats);
}
/* Internal map for failed tasks, not to be used outside of the Overseer */
static DistributedMap getRunningMap(final SolrZkClient zkClient) {
return new DistributedMap(zkClient, "/overseer/collection-map-running");
}
/* Size-limited map for successfully completed tasks*/
static DistributedMap getCompletedMap(final SolrZkClient zkClient) {
return new SizeLimitedDistributedMap(zkClient, "/overseer/collection-map-completed", NUM_RESPONSES_TO_STORE, (child) -> getAsyncIdsMap(zkClient).remove(child));
}
/* Map for failed tasks, not to be used outside of the Overseer */
static DistributedMap getFailureMap(final SolrZkClient zkClient) {
return new SizeLimitedDistributedMap(zkClient, "/overseer/collection-map-failure", NUM_RESPONSES_TO_STORE, (child) -> getAsyncIdsMap(zkClient).remove(child));
}
/* Map of async IDs currently in use*/
static DistributedMap getAsyncIdsMap(final SolrZkClient zkClient) {
return new DistributedMap(zkClient, "/overseer/async_ids");
}
/**
* Get queue that can be used to submit collection API tasks to the Overseer.
* <p>
* This queue is used internally by the {@link CollectionsHandler} to submit collection API
* tasks which are executed by the {@link OverseerCollectionMessageHandler}. The actions supported
* by this queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
* enum.
* <p>
* Performance statistics on the returned queue
* are <em>not</em> tracked by the Overseer Stats API,
* see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
*/
OverseerTaskQueue getCollectionQueue(final SolrZkClient zkClient) {
return getCollectionQueue(zkClient, new Stats());
}
/**
* Get queue that can be used to read collection API tasks to the Overseer.
* <p>
* This queue is used internally by the {@link OverseerCollectionMessageHandler} to read collection API
* tasks submitted by the {@link CollectionsHandler}. The actions supported
* by this queue are listed in the {@link org.apache.solr.common.params.CollectionParams.CollectionAction}
* enum.
* <p>
* Performance statistics on the returned queue are tracked by the Overseer Stats API,
* see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
*/
OverseerTaskQueue getCollectionQueue(final SolrZkClient zkClient, Stats zkStats) {
return new OverseerTaskQueue(zkClient, "/overseer/collection-queue-work", zkStats);
}
/**
* Get queue that can be used to submit configset API tasks to the Overseer.
* <p>
* This queue is used internally by the {@link org.apache.solr.handler.admin.ConfigSetsHandler} to submit
* tasks which are executed by the {@link OverseerConfigSetMessageHandler}. The actions supported
* by this queue are listed in the {@link org.apache.solr.common.params.ConfigSetParams.ConfigSetAction}
* enum.
* <p>
* Performance statistics on the returned queue
* are <em>not</em> tracked by the Overseer Stats API,
* see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
*/
OverseerTaskQueue getConfigSetQueue(final SolrZkClient zkClient) {
return getConfigSetQueue(zkClient, new Stats());
}
/**
* Get queue that can be used to read configset API tasks to the Overseer.
* <p>
* This queue is used internally by the {@link OverseerConfigSetMessageHandler} to read configset API
* tasks submitted by the {@link org.apache.solr.handler.admin.ConfigSetsHandler}. The actions supported
* by this queue are listed in the {@link org.apache.solr.common.params.ConfigSetParams.ConfigSetAction}
* enum.
* <p>
* Performance statistics on the returned queue are tracked by the Overseer Stats API,
* see {@link org.apache.solr.common.params.CollectionParams.CollectionAction#OVERSEERSTATUS}.
* <p>
* For now, this internally returns the same queue as {@link #getCollectionQueue(SolrZkClient, Stats)}.
* It is the responsibility of the client to ensure that configset API actions are prefixed with
* {@link OverseerConfigSetMessageHandler#CONFIGSETS_ACTION_PREFIX} so that it is processed by
* {@link OverseerConfigSetMessageHandler}.
*
* @param zkClient the {@link SolrZkClient} to be used for reading/writing to the queue
* @return a {@link ZkDistributedQueue} object
*/
OverseerTaskQueue getConfigSetQueue(final SolrZkClient zkClient, Stats zkStats) {
// For now, we use the same queue as the collection queue, but ensure
// that the actions are prefixed with a unique string.
return getCollectionQueue(zkClient, zkStats);
}
private void createOverseerNode(final SolrZkClient zkClient) {
try {
zkClient.create("/overseer", new byte[0], CreateMode.PERSISTENT, true);
} catch (KeeperException.NodeExistsException e) {
//ok
} catch (InterruptedException e) {
log.error("Could not create Overseer node", e);
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (KeeperException e) {
log.error("Could not create Overseer node", e);
throw new RuntimeException(e);
}
}
public static boolean isLegacy(ZkStateReader stateReader) {
String legacyProperty = stateReader.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
return "true".equals(legacyProperty);
}
public static boolean isLegacy(ClusterStateProvider clusterStateProvider) {
String legacyProperty = clusterStateProvider.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
return "true".equals(legacyProperty);
}
public ZkStateReader getZkStateReader() {
return reader;
}
public void offerStateUpdate(byte[] data) throws KeeperException, InterruptedException {
if (zkController.getZkClient().isClosed()) {
throw new AlreadyClosedException();
}
getStateUpdateQueue().offer(data);
}
}
| 1 | 32,207 | I only see new imports. Is there any functional change in this file? | apache-lucene-solr | java |
@@ -1141,6 +1141,18 @@ func (bc *blockchain) pickAndRunActions(ctx context.Context, actionMap map[strin
executedActions = append(executedActions, grant)
}
+ if raCtx.BlockHeight == 1 {
+ tsf, err := bc.createMemorialTransfer(raCtx.Producer.String(), raCtx.ActionGasLimit)
+ receipt, err = ws.RunAction(raCtx, tsf)
+ if err != nil {
+ return hash.ZeroHash256, nil, nil, err
+ }
+ if receipt != nil {
+ receipts = append(receipts, receipt)
+ }
+ executedActions = append(executedActions, tsf)
+ }
+
blockMtc.WithLabelValues("gasConsumed").Set(float64(bc.config.Genesis.BlockGasLimit - raCtx.GasLimit))
return ws.UpdateBlockLevelInfo(raCtx.BlockHeight), receipts, executedActions, nil | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"math/big"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/facebookgo/clock"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/actpool/actioniterator"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/pkg/util/fileutil"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
)
var (
blockMtc = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "iotex_block_metrics",
Help: "Block metrics.",
},
[]string{"type"},
)
errDelegatesNotExist = errors.New("delegates cannot be found")
)
func init() {
prometheus.MustRegister(blockMtc)
}
// Blockchain represents the blockchain data structure and hosts the APIs to access it
type Blockchain interface {
lifecycle.StartStopper
// Balance returns balance of an account
Balance(addr string) (*big.Int, error)
// Nonce returns the nonce if the account exists
Nonce(addr string) (uint64, error)
// CreateState adds a new account with initial balance to the factory
CreateState(addr string, init *big.Int) (*state.Account, error)
// CandidatesByHeight returns the candidate list by a given height
CandidatesByHeight(height uint64) ([]*state.Candidate, error)
// ProductivityByEpoch returns the number of produced blocks per delegate in an epoch
ProductivityByEpoch(epochNum uint64) (uint64, map[string]uint64, error)
// For exposing blockchain states
// GetHeightByHash returns Block's height by hash
GetHeightByHash(h hash.Hash256) (uint64, error)
// GetHashByHeight returns Block's hash by height
GetHashByHeight(height uint64) (hash.Hash256, error)
// GetBlockByHeight returns Block by height
GetBlockByHeight(height uint64) (*block.Block, error)
// GetBlockByHash returns Block by hash
GetBlockByHash(h hash.Hash256) (*block.Block, error)
// BlockHeaderByHeight return block header by height
BlockHeaderByHeight(height uint64) (*block.Header, error)
// BlockHeaderByHash return block header by hash
BlockHeaderByHash(h hash.Hash256) (*block.Header, error)
// BlockFooterByHeight return block footer by height
BlockFooterByHeight(height uint64) (*block.Footer, error)
// BlockFooterByHash return block footer by hash
BlockFooterByHash(h hash.Hash256) (*block.Footer, error)
// GetTotalActions returns the total number of actions
GetTotalActions() (uint64, error)
// GetReceiptByActionHash returns the receipt by action hash
GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error)
// GetActionsFromAddress returns actions from address
GetActionsFromAddress(address string) ([]hash.Hash256, error)
// GetActionsToAddress returns actions to address
GetActionsToAddress(address string) ([]hash.Hash256, error)
// GetActionCountByAddress returns action count by address
GetActionCountByAddress(address string) (uint64, error)
// GetActionByActionHash returns action by action hash
GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error)
// GetBlockHashByActionHash returns Block hash by action hash
GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error)
// GetFactory returns the state factory
GetFactory() factory.Factory
// GetChainID returns the chain ID
ChainID() uint32
// ChainAddress returns chain address on parent chain, the root chain return empty.
ChainAddress() string
// TipHash returns tip block's hash
TipHash() hash.Hash256
// TipHeight returns tip block's height
TipHeight() uint64
// StateByAddr returns account of a given address
StateByAddr(address string) (*state.Account, error)
// RecoverChainAndState recovers the chain to target height and refresh state db if necessary
RecoverChainAndState(targetHeight uint64) error
// GenesisTimestamp returns the timestamp of genesis
GenesisTimestamp() int64
// For block operations
// MintNewBlock creates a new block with given actions
// Note: the coinbase transfer will be added to the given transfers when minting a new block
MintNewBlock(
actionMap map[string][]action.SealedEnvelope,
timestamp time.Time,
) (*block.Block, error)
// CommitBlock validates and appends a block to the chain
CommitBlock(blk *block.Block) error
// ValidateBlock validates a new block before adding it to the blockchain
ValidateBlock(blk *block.Block) error
// For action operations
// Validator returns the current validator object
Validator() Validator
// SetValidator sets the current validator object
SetValidator(val Validator)
// For smart contract operations
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
ExecuteContractRead(caller address.Address, ex *action.Execution) ([]byte, *action.Receipt, error)
// AddSubscriber make you listen to every single produced block
AddSubscriber(BlockCreationSubscriber) error
// RemoveSubscriber make you listen to every single produced block
RemoveSubscriber(BlockCreationSubscriber) error
}
// blockchain implements the Blockchain interface
type blockchain struct {
mu sync.RWMutex // mutex to protect utk, tipHeight and tipHash
dao *blockDAO
config config.Config
tipHeight uint64
tipHash hash.Hash256
validator Validator
lifecycle lifecycle.Lifecycle
clk clock.Clock
blocklistener []BlockCreationSubscriber
timerFactory *prometheustimer.TimerFactory
// used by account-based model
sf factory.Factory
registry *protocol.Registry
enableExperimentalActions bool
}
// Option sets blockchain construction parameter
type Option func(*blockchain, config.Config) error
// DefaultStateFactoryOption sets blockchain's sf from config
func DefaultStateFactoryOption() Option {
return func(bc *blockchain, cfg config.Config) (err error) {
if cfg.Chain.EnableTrielessStateDB {
bc.sf, err = factory.NewStateDB(cfg, factory.DefaultStateDBOption())
} else {
bc.sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
}
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
return nil
}
}
// PrecreatedStateFactoryOption sets blockchain's state.Factory to sf
func PrecreatedStateFactoryOption(sf factory.Factory) Option {
return func(bc *blockchain, conf config.Config) error {
bc.sf = sf
return nil
}
}
// InMemStateFactoryOption sets blockchain's factory.Factory as in memory sf
func InMemStateFactoryOption() Option {
return func(bc *blockchain, cfg config.Config) error {
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedDaoOption sets blockchain's dao
func PrecreatedDaoOption(dao *blockDAO) Option {
return func(bc *blockchain, conf config.Config) error {
bc.dao = dao
return nil
}
}
// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
func BoltDBDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
cfg.DB.DbPath = cfg.Chain.ChainDBPath // TODO: remove this after moving TrieDBPath from cfg.Chain to cfg.DB
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
db.NewOnDiskDB(cfg.DB),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize,
)
return nil
}
}
// InMemDaoOption sets blockchain's dao with MemKVStore
func InMemDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
db.NewMemKVStore(),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize,
)
return nil
}
}
// ClockOption overrides the default clock
func ClockOption(clk clock.Clock) Option {
return func(bc *blockchain, conf config.Config) error {
bc.clk = clk
return nil
}
}
// RegistryOption sets the blockchain with the protocol registry
func RegistryOption(registry *protocol.Registry) Option {
return func(bc *blockchain, conf config.Config) error {
bc.registry = registry
return nil
}
}
// EnableExperimentalActions enables the blockchain to process experimental actions
func EnableExperimentalActions() Option {
return func(bc *blockchain, conf config.Config) error {
bc.enableExperimentalActions = true
return nil
}
}
// NewBlockchain creates a new blockchain and DB instance
func NewBlockchain(cfg config.Config, opts ...Option) Blockchain {
// create the Blockchain
chain := &blockchain{
config: cfg,
clk: clock.New(),
}
for _, opt := range opts {
if err := opt(chain, cfg); err != nil {
log.S().Panicf("Failed to execute blockchain creation option %p: %v", opt, err)
}
}
timerFactory, err := prometheustimer.New(
"iotex_blockchain_perf",
"Performance of blockchain module",
[]string{"topic", "chainID"},
[]string{"default", strconv.FormatUint(uint64(cfg.Chain.ID), 10)},
)
if err != nil {
log.L().Panic("Failed to generate prometheus timer factory.", zap.Error(err))
}
chain.timerFactory = timerFactory
// Set block validator
if err != nil {
log.L().Panic("Failed to get block producer address.", zap.Error(err))
}
chain.validator = &validator{
sf: chain.sf,
validatorAddr: cfg.ProducerAddress().String(),
enableExperimentalActions: chain.enableExperimentalActions,
}
if chain.dao != nil {
chain.lifecycle.Add(chain.dao)
}
if chain.sf != nil {
chain.lifecycle.Add(chain.sf)
}
return chain
}
func (bc *blockchain) ChainID() uint32 {
return atomic.LoadUint32(&bc.config.Chain.ID)
}
func (bc *blockchain) ChainAddress() string {
return bc.config.Chain.Address
}
// Start starts the blockchain
func (bc *blockchain) Start(ctx context.Context) (err error) {
bc.mu.Lock()
defer bc.mu.Unlock()
if err = bc.lifecycle.OnStart(ctx); err != nil {
return err
}
// get blockchain tip height
if bc.tipHeight, err = bc.dao.getBlockchainHeight(); err != nil {
return err
}
if bc.tipHeight == 0 {
return bc.startEmptyBlockchain()
}
// get blockchain tip hash
if bc.tipHash, err = bc.dao.getBlockHash(bc.tipHeight); err != nil {
return err
}
return bc.startExistingBlockchain()
}
// Stop stops the blockchain.
func (bc *blockchain) Stop(ctx context.Context) error {
bc.mu.Lock()
defer bc.mu.Unlock()
return bc.lifecycle.OnStop(ctx)
}
// Balance returns balance of address
func (bc *blockchain) Balance(addr string) (*big.Int, error) {
return bc.sf.Balance(addr)
}
// Nonce returns the nonce if the account exists
func (bc *blockchain) Nonce(addr string) (uint64, error) {
return bc.sf.Nonce(addr)
}
// CandidatesByHeight returns the candidate list by a given height
func (bc *blockchain) CandidatesByHeight(height uint64) ([]*state.Candidate, error) {
return bc.candidatesByHeight(height)
}
// ProductivityByEpoch returns the map of the number of blocks produced per delegate in an epoch
func (bc *blockchain) ProductivityByEpoch(epochNum uint64) (uint64, map[string]uint64, error) {
p, ok := bc.registry.Find(rolldpos.ProtocolID)
if !ok {
return 0, nil, errors.New("rolldpos protocol is not registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
return 0, nil, errors.New("fail to cast rolldpos protocol")
}
var isCurrentEpoch bool
currentEpochNum := rp.GetEpochNum(bc.tipHeight)
if epochNum > currentEpochNum {
return 0, nil, errors.New("epoch number is larger than current epoch number")
}
if epochNum == currentEpochNum {
isCurrentEpoch = true
}
epochStartHeight := rp.GetEpochHeight(epochNum)
var epochEndHeight uint64
if isCurrentEpoch {
epochEndHeight = bc.tipHeight
} else {
epochEndHeight = rp.GetEpochLastBlockHeight(epochNum)
}
numBlks := epochEndHeight - epochStartHeight + 1
p, ok = bc.registry.Find(poll.ProtocolID)
if !ok {
return 0, nil, errors.New("poll protocol is not registered")
}
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: bc.tipHeight,
Registry: bc.registry,
})
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return 0, nil, err
}
s, err := p.ReadState(ctx, ws, []byte("ActiveBlockProducersByEpoch"),
byteutil.Uint64ToBytes(epochNum))
if err != nil {
return 0, nil, status.Error(codes.NotFound, err.Error())
}
var activeConsensusBlockProducers state.CandidateList
if err := activeConsensusBlockProducers.Deserialize(s); err != nil {
return 0, nil, err
}
produce := make(map[string]uint64)
for _, bp := range activeConsensusBlockProducers {
produce[bp.Address] = 0
}
for i := uint64(0); i < numBlks; i++ {
blk, err := bc.blockHeaderByHeight(epochStartHeight + i)
if err != nil {
return 0, nil, err
}
produce[blk.ProducerAddress()]++
}
return numBlks, produce, nil
}
// GetHeightByHash returns block's height by hash
func (bc *blockchain) GetHeightByHash(h hash.Hash256) (uint64, error) {
return bc.dao.getBlockHeight(h)
}
// GetHashByHeight returns block's hash by height
func (bc *blockchain) GetHashByHeight(height uint64) (hash.Hash256, error) {
return bc.dao.getBlockHash(height)
}
// GetBlockByHeight returns block from the blockchain hash by height
func (bc *blockchain) GetBlockByHeight(height uint64) (*block.Block, error) {
blk, err := bc.getBlockByHeight(height)
if blk == nil || err != nil {
return blk, err
}
blk.HeaderLogger(log.L()).Debug("Get block.")
return blk, err
}
// GetBlockByHash returns block from the blockchain hash by hash
func (bc *blockchain) GetBlockByHash(h hash.Hash256) (*block.Block, error) {
return bc.dao.getBlock(h)
}
func (bc *blockchain) BlockHeaderByHeight(height uint64) (*block.Header, error) {
return bc.blockHeaderByHeight(height)
}
func (bc *blockchain) BlockHeaderByHash(h hash.Hash256) (*block.Header, error) {
return bc.dao.Header(h)
}
func (bc *blockchain) BlockFooterByHeight(height uint64) (*block.Footer, error) {
return bc.blockFooterByHeight(height)
}
func (bc *blockchain) BlockFooterByHash(h hash.Hash256) (*block.Footer, error) {
return bc.dao.Footer(h)
}
// GetTotalActions returns the total number of actions
func (bc *blockchain) GetTotalActions() (uint64, error) {
return bc.dao.getTotalActions()
}
// GetReceiptByActionHash returns the receipt by action hash
func (bc *blockchain) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) {
return bc.dao.getReceiptByActionHash(h)
}
// GetActionsFromAddress returns actions from address
func (bc *blockchain) GetActionsFromAddress(addrStr string) ([]hash.Hash256, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
return getActionsBySenderAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
}
// GetActionToAddress returns action to address
func (bc *blockchain) GetActionsToAddress(addrStr string) ([]hash.Hash256, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
return getActionsByRecipientAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
}
// GetActionCountByAddress returns action count by address
func (bc *blockchain) GetActionCountByAddress(addrStr string) (uint64, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return 0, err
}
fromCount, err := getActionCountBySenderAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
if err != nil {
return 0, err
}
toCount, err := getActionCountByRecipientAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
if err != nil {
return 0, err
}
return fromCount + toCount, nil
}
func (bc *blockchain) getActionByActionHashHelper(h hash.Hash256) (hash.Hash256, error) {
return getBlockHashByActionHash(bc.dao.kvstore, h)
}
// GetActionByActionHash returns action by action hash
func (bc *blockchain) GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) {
blkHash, err := bc.getActionByActionHashHelper(h)
if err != nil {
return action.SealedEnvelope{}, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return action.SealedEnvelope{}, err
}
for _, act := range blk.Actions {
if act.Hash() == h {
return act, nil
}
}
return action.SealedEnvelope{}, errors.Errorf("block %x does not have transfer %x", blkHash, h)
}
// GetBlockHashByActionHash returns Block hash by action hash
func (bc *blockchain) GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) {
return getBlockHashByActionHash(bc.dao.kvstore, h)
}
// GetFactory returns the state factory
func (bc *blockchain) GetFactory() factory.Factory {
return bc.sf
}
// TipHash returns tip block's hash
func (bc *blockchain) TipHash() hash.Hash256 {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHash
}
// TipHeight returns tip block's height
func (bc *blockchain) TipHeight() uint64 {
return atomic.LoadUint64(&bc.tipHeight)
}
// ValidateBlock validates a new block before adding it to the blockchain
func (bc *blockchain) ValidateBlock(blk *block.Block) error {
bc.mu.RLock()
defer bc.mu.RUnlock()
timer := bc.timerFactory.NewTimer("ValidateBlock")
defer timer.End()
return bc.validateBlock(blk)
}
func (bc *blockchain) MintNewBlock(
actionMap map[string][]action.SealedEnvelope,
timestamp time.Time,
) (*block.Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
mintNewBlockTimer := bc.timerFactory.NewTimer("MintNewBlock")
defer mintNewBlockTimer.End()
newblockHeight := bc.tipHeight + 1
// run execution and update state trie root hash
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrap(err, "Failed to obtain working set from state factory")
}
gasLimitForContext := bc.config.Genesis.BlockGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
BlockHeight: newblockHeight,
BlockTimeStamp: timestamp,
Producer: bc.config.ProducerAddress(),
GasLimit: gasLimitForContext,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Registry: bc.registry,
})
_, rc, actions, err := bc.pickAndRunActions(ctx, actionMap, ws)
if err != nil {
return nil, errors.Wrapf(err, "Failed to update state changes in new block %d", newblockHeight)
}
blockMtc.WithLabelValues("numActions").Set(float64(len(actions)))
sk := bc.config.ProducerPrivateKey()
ra := block.NewRunnableActionsBuilder().
SetHeight(newblockHeight).
SetTimeStamp(timestamp).
AddActions(actions...).
Build(sk.PublicKey())
prevBlkHash := bc.tipHash
// The first block's previous block hash is pointing to the digest of genesis config. This is to guarantee all nodes
// could verify that they start from the same genesis
if newblockHeight == 1 {
prevBlkHash = bc.config.Genesis.Hash()
}
blk, err := block.NewBuilder(ra).
SetPrevBlockHash(prevBlkHash).
SetDeltaStateDigest(ws.Digest()).
SetReceipts(rc).
SetReceiptRoot(calculateReceiptRoot(rc)).
SignAndBuild(sk)
if err != nil {
return nil, errors.Wrapf(err, "failed to create block")
}
blk.WorkingSet = ws
return &blk, nil
}
// CommitBlock validates and appends a block to the chain
func (bc *blockchain) CommitBlock(blk *block.Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
timer := bc.timerFactory.NewTimer("CommitBlock")
defer timer.End()
return bc.commitBlock(blk)
}
// StateByAddr returns the account of an address
func (bc *blockchain) StateByAddr(address string) (*state.Account, error) {
if bc.sf != nil {
s, err := bc.sf.AccountState(address)
if err != nil {
log.L().Warn("Failed to get account.", zap.String("address", address), zap.Error(err))
return nil, errors.New("account does not exist")
}
return s, nil
}
return nil, errors.New("state factory is nil")
}
// SetValidator sets the current validator object
func (bc *blockchain) SetValidator(val Validator) {
bc.mu.Lock()
defer bc.mu.Unlock()
bc.validator = val
}
// Validator gets the current validator object
func (bc *blockchain) Validator() Validator {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.validator
}
func (bc *blockchain) AddSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
log.L().Info("Add a subscriber.")
if s == nil {
return errors.New("subscriber could not be nil")
}
bc.blocklistener = append(bc.blocklistener, s)
return nil
}
func (bc *blockchain) RemoveSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
for i, sub := range bc.blocklistener {
if sub == s {
bc.blocklistener = append(bc.blocklistener[:i], bc.blocklistener[i+1:]...)
log.L().Info("Successfully unsubscribe block creation.")
return nil
}
}
return errors.New("cannot find subscription")
}
//======================================
// internal functions
//=====================================
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
func (bc *blockchain) ExecuteContractRead(caller address.Address, ex *action.Execution) ([]byte, *action.Receipt, error) {
// use latest block as carrier to run the offline execution
// the block itself is not used
h := bc.TipHeight()
header, err := bc.BlockHeaderByHeight(h)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get block in ExecuteContractRead")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to obtain working set from state factory")
}
producer, err := address.FromString(header.ProducerAddress())
if err != nil {
return nil, nil, err
}
gasLimit := bc.config.Genesis.BlockGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: header.Height(),
BlockTimeStamp: header.Timestamp(),
Producer: producer,
Caller: caller,
GasLimit: gasLimit,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
GasPrice: big.NewInt(0),
IntrinsicGas: 0,
})
return evm.ExecuteContract(
ctx,
ws,
ex,
bc,
)
}
// CreateState adds a new account with initial balance to the factory
func (bc *blockchain) CreateState(addr string, init *big.Int) (*state.Account, error) {
if bc.sf == nil {
return nil, errors.New("empty state factory")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrapf(err, "failed to create clean working set")
}
account, err := accountutil.LoadOrCreateAccount(ws, addr, init)
if err != nil {
return nil, errors.Wrapf(err, "failed to create new account %s", addr)
}
gasLimit := bc.config.Genesis.BlockGasLimit
callerAddr, err := address.FromString(addr)
if err != nil {
return nil, err
}
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
GasLimit: gasLimit,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Caller: callerAddr,
ActionHash: hash.ZeroHash256,
Nonce: 0,
Registry: bc.registry,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return nil, errors.Wrap(err, "failed to run the account creation")
}
if err = bc.sf.Commit(ws); err != nil {
return nil, errors.Wrap(err, "failed to commit the account creation")
}
return account, nil
}
// RecoverChainAndState recovers the chain to target height and refresh state db if necessary
func (bc *blockchain) RecoverChainAndState(targetHeight uint64) error {
var buildStateFromScratch bool
stateHeight, err := bc.sf.Height()
if err != nil {
buildStateFromScratch = true
}
if targetHeight > 0 {
if err := bc.recoverToHeight(targetHeight); err != nil {
return errors.Wrapf(err, "failed to recover blockchain to target height %d", targetHeight)
}
if stateHeight > bc.tipHeight {
buildStateFromScratch = true
}
}
if buildStateFromScratch {
return bc.refreshStateDB()
}
return nil
}
func (bc *blockchain) GenesisTimestamp() int64 {
return bc.config.Genesis.Timestamp
}
//======================================
// private functions
//=====================================
func (bc *blockchain) protocol(id string) (protocol.Protocol, bool) {
if bc.registry == nil {
return nil, false
}
return bc.registry.Find(id)
}
func (bc *blockchain) mustGetRollDPoSProtocol() *rolldpos.Protocol {
p, ok := bc.protocol(rolldpos.ProtocolID)
if !ok {
log.L().Panic("protocol rolldpos has not been registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
log.L().Panic("failed to cast to rolldpos protocol")
}
return rp
}
func (bc *blockchain) candidatesByHeight(height uint64) (state.CandidateList, error) {
if bc.config.Genesis.EnableGravityChainVoting {
rp := bc.mustGetRollDPoSProtocol()
return bc.sf.CandidatesByHeight(rp.GetEpochHeight(rp.GetEpochNum(height)))
}
for {
candidates, err := bc.sf.CandidatesByHeight(height)
if err == nil {
return candidates, nil
}
if height == 0 {
return nil, err
}
height--
}
}
func (bc *blockchain) getBlockByHeight(height uint64) (*block.Block, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.getBlock(hash)
}
func (bc *blockchain) blockHeaderByHeight(height uint64) (*block.Header, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.Header(hash)
}
func (bc *blockchain) blockFooterByHeight(height uint64) (*block.Footer, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.Footer(hash)
}
func (bc *blockchain) startEmptyBlockchain() error {
var ws factory.WorkingSet
var err error
if ws, err = bc.sf.NewWorkingSet(); err != nil {
return errors.Wrap(err, "failed to obtain working set from state factory")
}
if !bc.config.Chain.EmptyGenesis {
// Initialize the states before any actions happen on the blockchain
if err := bc.createGenesisStates(ws); err != nil {
return err
}
_ = ws.UpdateBlockLevelInfo(0)
}
// add Genesis states
if err := bc.sf.Commit(ws); err != nil {
return errors.Wrap(err, "failed to commit Genesis states")
}
return nil
}
func (bc *blockchain) startExistingBlockchain() error {
if bc.sf == nil {
return errors.New("statefactory cannot be nil")
}
stateHeight, err := bc.sf.Height()
if err != nil {
return err
}
if stateHeight > bc.tipHeight {
return errors.New("factory is higher than blockchain")
}
for i := stateHeight + 1; i <= bc.tipHeight; i++ {
blk, err := bc.getBlockByHeight(i)
if err != nil {
return err
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "failed to obtain working set from state factory")
}
if _, err := bc.runActions(blk.RunnableActions(), ws); err != nil {
return err
}
if err := bc.sf.Commit(ws); err != nil {
return err
}
}
stateHeight, err = bc.sf.Height()
if err != nil {
return errors.Wrap(err, "failed to get factory's height")
}
log.L().Info("Restarting blockchain.",
zap.Uint64("chainHeight",
bc.tipHeight),
zap.Uint64("factoryHeight", stateHeight))
return nil
}
func (bc *blockchain) validateBlock(blk *block.Block) error {
validateTimer := bc.timerFactory.NewTimer("validate")
prevBlkHash := bc.tipHash
if blk.Height() == 1 {
prevBlkHash = bc.config.Genesis.Hash()
}
err := bc.validator.Validate(blk, bc.tipHeight, prevBlkHash)
validateTimer.End()
if err != nil {
return errors.Wrapf(err, "error when validating block %d", blk.Height())
}
// run actions and update state factory
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "Failed to obtain working set from state factory")
}
runTimer := bc.timerFactory.NewTimer("runActions")
receipts, err := bc.runActions(blk.RunnableActions(), ws)
runTimer.End()
if err != nil {
log.L().Panic("Failed to update state.", zap.Uint64("tipHeight", bc.tipHeight), zap.Error(err))
}
if err = blk.VerifyDeltaStateDigest(ws.Digest()); err != nil {
return err
}
if err = blk.VerifyReceiptRoot(calculateReceiptRoot(receipts)); err != nil {
return errors.Wrap(err, "Failed to verify receipt root")
}
blk.Receipts = receipts
// attach working set to be committed to state factory
blk.WorkingSet = ws
return nil
}
// commitBlock commits a block to the chain
func (bc *blockchain) commitBlock(blk *block.Block) error {
// Check if it is already exists, and return earlier
blkHash, err := bc.dao.getBlockHash(blk.Height())
if blkHash != hash.ZeroHash256 {
log.L().Debug("Block already exists.", zap.Uint64("height", blk.Height()))
return nil
}
// If it's a ready db io error, return earlier with the error
if errors.Cause(err) != db.ErrNotExist {
return err
}
// write block into DB
putTimer := bc.timerFactory.NewTimer("putBlock")
err = bc.dao.putBlock(blk)
putTimer.End()
if err != nil {
return err
}
// update tip hash and height
atomic.StoreUint64(&bc.tipHeight, blk.Height())
bc.tipHash = blk.HashBlock()
if bc.sf != nil {
sfTimer := bc.timerFactory.NewTimer("sf.Commit")
err := bc.sf.Commit(blk.WorkingSet)
sfTimer.End()
// detach working set so it can be freed by GC
blk.WorkingSet = nil
if err != nil {
log.L().Panic("Error when committing states.", zap.Error(err))
}
// write smart contract receipt into DB
receiptTimer := bc.timerFactory.NewTimer("putReceipt")
err = bc.dao.putReceipts(blk.Height(), blk.Receipts)
receiptTimer.End()
if err != nil {
return errors.Wrapf(err, "failed to put smart contract receipts into DB on height %d", blk.Height())
}
}
blk.HeaderLogger(log.L()).Info("Committed a block.", log.Hex("tipHash", bc.tipHash[:]))
// emit block to all block subscribers
bc.emitToSubscribers(blk)
return nil
}
func (bc *blockchain) runActions(
acts block.RunnableActions,
ws factory.WorkingSet,
) ([]*action.Receipt, error) {
if bc.sf == nil {
return nil, errors.New("statefactory cannot be nil")
}
gasLimit := bc.config.Genesis.BlockGasLimit
// update state factory
producer, err := address.FromBytes(acts.BlockProducerPubKey().Hash())
if err != nil {
return nil, err
}
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
BlockHeight: acts.BlockHeight(),
BlockTimeStamp: acts.BlockTimeStamp(),
Producer: producer,
GasLimit: gasLimit,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Registry: bc.registry,
})
return ws.RunActions(ctx, acts.BlockHeight(), acts.Actions())
}
func (bc *blockchain) pickAndRunActions(ctx context.Context, actionMap map[string][]action.SealedEnvelope,
ws factory.WorkingSet) (hash.Hash256, []*action.Receipt, []action.SealedEnvelope, error) {
if bc.sf == nil {
return hash.ZeroHash256, nil, nil, errors.New("statefactory cannot be nil")
}
receipts := make([]*action.Receipt, 0)
executedActions := make([]action.SealedEnvelope, 0)
raCtx := protocol.MustGetRunActionsCtx(ctx)
// initial action iterator
actionIterator := actioniterator.NewActionIterator(actionMap)
for {
nextAction, ok := actionIterator.Next()
if !ok {
break
}
receipt, err := ws.RunAction(raCtx, nextAction)
if err != nil {
if errors.Cause(err) == action.ErrHitGasLimit {
// hit block gas limit, we should not process actions belong to this user anymore since we
// need monotonically increasing nounce. But we can continue processing other actions
// that belong other users
actionIterator.PopAccount()
continue
}
return hash.ZeroHash256, nil, nil, errors.Wrapf(err, "Failed to update state changes for selp %x", nextAction.Hash())
}
if receipt != nil {
raCtx.GasLimit -= receipt.GasConsumed
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, nextAction)
// To prevent loop all actions in act_pool, we stop processing action when remaining gas is below
// than certain threshold
if raCtx.GasLimit < bc.config.Chain.AllowedBlockGasResidue {
break
}
}
rp := bc.mustGetRollDPoSProtocol()
epochNum := rp.GetEpochNum(raCtx.BlockHeight)
lastBlkHeight := rp.GetEpochLastBlockHeight(epochNum)
// generate delegates for next round
skip, putPollResult, err := bc.createPutPollResultAction(raCtx.BlockHeight)
switch errors.Cause(err) {
case nil:
if !skip {
receipt, err := ws.RunAction(raCtx, putPollResult)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, putPollResult)
}
case errDelegatesNotExist:
if raCtx.BlockHeight == lastBlkHeight {
// TODO (zhi): if some bp by pass this condition, we need to reject block in validation step
return hash.ZeroHash256, nil, nil, errors.Wrapf(
err,
"failed to prepare delegates for next epoch %d",
epochNum+1,
)
}
default:
return hash.ZeroHash256, nil, nil, err
}
// Process grant block reward action
grant, err := bc.createGrantRewardAction(action.BlockReward, raCtx.BlockHeight)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
receipt, err := ws.RunAction(raCtx, grant)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, grant)
// Process grant epoch reward action if the block is the last one in an epoch
if raCtx.BlockHeight == lastBlkHeight {
grant, err = bc.createGrantRewardAction(action.EpochReward, raCtx.BlockHeight)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
receipt, err = ws.RunAction(raCtx, grant)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, grant)
}
blockMtc.WithLabelValues("gasConsumed").Set(float64(bc.config.Genesis.BlockGasLimit - raCtx.GasLimit))
return ws.UpdateBlockLevelInfo(raCtx.BlockHeight), receipts, executedActions, nil
}
func (bc *blockchain) createPutPollResultAction(height uint64) (skip bool, se action.SealedEnvelope, err error) {
skip = true
if !bc.config.Genesis.EnableGravityChainVoting {
return
}
pl, ok := bc.protocol(poll.ProtocolID)
if !ok {
log.L().Panic("protocol poll has not been registered")
}
pp, ok := pl.(poll.Protocol)
if !ok {
log.L().Panic("Failed to cast to poll.Protocol")
}
rp := bc.mustGetRollDPoSProtocol()
epochNum := rp.GetEpochNum(height)
epochHeight := rp.GetEpochHeight(epochNum)
nextEpochHeight := rp.GetEpochHeight(epochNum + 1)
if height < epochHeight+(nextEpochHeight-epochHeight)/2 {
return
}
log.L().Debug(
"createPutPollResultAction",
zap.Uint64("height", height),
zap.Uint64("epochNum", epochNum),
zap.Uint64("epochHeight", epochHeight),
zap.Uint64("nextEpochHeight", nextEpochHeight),
)
_, err = bc.candidatesByHeight(nextEpochHeight)
switch errors.Cause(err) {
case nil:
return
case state.ErrStateNotExist:
skip = false
default:
return
}
l, err := pp.DelegatesByHeight(epochHeight)
switch errors.Cause(err) {
case nil:
if len(l) == 0 {
err = errors.Wrapf(
errDelegatesNotExist,
"failed to fetch delegates by epoch height %d, empty list",
epochHeight,
)
return
}
case db.ErrNotExist:
err = errors.Wrapf(
errDelegatesNotExist,
"failed to fetch delegates by epoch height %d, original error %v",
epochHeight,
err,
)
return
default:
return
}
sk := bc.config.ProducerPrivateKey()
nonce := uint64(0)
pollAction := action.NewPutPollResult(nonce, nextEpochHeight, l)
builder := action.EnvelopeBuilder{}
se, err = action.Sign(builder.SetNonce(nonce).SetAction(pollAction).Build(), sk)
return skip, se, err
}
func (bc *blockchain) emitToSubscribers(blk *block.Block) {
if bc.blocklistener == nil {
return
}
for _, s := range bc.blocklistener {
go func(bcs BlockCreationSubscriber, b *block.Block) {
if err := bcs.HandleBlock(b); err != nil {
log.L().Error("Failed to handle new block.", zap.Error(err))
}
}(s, blk)
}
}
// RecoverToHeight recovers the blockchain to target height
func (bc *blockchain) recoverToHeight(targetHeight uint64) error {
for bc.tipHeight > targetHeight {
if err := bc.dao.deleteTipBlock(); err != nil {
return err
}
bc.tipHeight--
}
return nil
}
// RefreshStateDB deletes the existing state DB and creates a new one with state changes from genesis block
func (bc *blockchain) refreshStateDB() error {
// Delete existing state DB and reinitialize it
if fileutil.FileExists(bc.config.Chain.TrieDBPath) && os.Remove(bc.config.Chain.TrieDBPath) != nil {
return errors.New("failed to delete existing state DB")
}
if err := DefaultStateFactoryOption()(bc, bc.config); err != nil {
return errors.Wrap(err, "failed to reinitialize state DB")
}
for _, p := range bc.registry.All() {
bc.sf.AddActionHandlers(p)
}
if err := bc.sf.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start state factory")
}
if err := bc.startEmptyBlockchain(); err != nil {
return err
}
if err := bc.sf.Stop(context.Background()); err != nil {
return errors.Wrap(err, "failed to stop state factory")
}
return nil
}
func (bc *blockchain) createGrantRewardAction(rewardType int, height uint64) (action.SealedEnvelope, error) {
gb := action.GrantRewardBuilder{}
grant := gb.SetRewardType(rewardType).SetHeight(height).Build()
eb := action.EnvelopeBuilder{}
envelope := eb.SetNonce(0).
SetGasPrice(big.NewInt(0)).
SetGasLimit(grant.GasLimit()).
SetAction(&grant).
Build()
sk := bc.config.ProducerPrivateKey()
return action.Sign(envelope, sk)
}
func (bc *blockchain) createGenesisStates(ws factory.WorkingSet) error {
if bc.registry == nil {
// TODO: return nil to avoid test cases to blame on missing rewarding protocol
return nil
}
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: 0,
BlockTimeStamp: time.Unix(bc.config.Genesis.Timestamp, 0),
GasLimit: 0,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Producer: nil,
Caller: nil,
ActionHash: hash.ZeroHash256,
Nonce: 0,
Registry: bc.registry,
})
if err := bc.createAccountGenesisStates(ctx, ws); err != nil {
return err
}
if err := bc.createPollGenesisStates(ctx, ws); err != nil {
return err
}
return bc.createRewardingGenesisStates(ctx, ws)
}
func (bc *blockchain) createAccountGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(account.ProtocolID)
if !ok {
return nil
}
ap, ok := p.(*account.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
addrs, balances := bc.config.Genesis.InitBalances()
return ap.Initialize(ctx, ws, addrs, balances)
}
func (bc *blockchain) createRewardingGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(rewarding.ProtocolID)
if !ok {
return nil
}
rp, ok := p.(*rewarding.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
return rp.Initialize(
ctx,
ws,
bc.config.Genesis.InitBalance(),
bc.config.Genesis.BlockReward(),
bc.config.Genesis.EpochReward(),
bc.config.Genesis.NumDelegatesForEpochReward,
bc.config.Genesis.ExemptAddrsFromEpochReward(),
bc.config.Genesis.FoundationBonus(),
bc.config.Genesis.NumDelegatesForFoundationBonus,
bc.config.Genesis.FoundationBonusLastEpoch,
bc.config.Genesis.ProductivityThreshold,
)
}
func (bc *blockchain) createPollGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
if bc.config.Genesis.EnableGravityChainVoting {
p, ok := bc.protocol(poll.ProtocolID)
if !ok {
return errors.Errorf("protocol %s is not found", poll.ProtocolID)
}
pp, ok := p.(poll.Protocol)
if !ok {
return errors.Errorf("error when casting poll protocol")
}
return pp.Initialize(
ctx,
ws,
)
}
p, ok := bc.protocol(vote.ProtocolID)
if !ok {
return errors.Errorf("protocol %s is not found", vote.ProtocolID)
}
vp, ok := p.(*vote.Protocol)
if !ok {
return errors.Errorf("error when casting vote protocol")
}
addrs := make([]address.Address, 0)
for _, d := range bc.config.Genesis.Delegates {
addrs = append(addrs, d.OperatorAddr())
}
return vp.Initialize(ctx, ws, addrs)
}
func calculateReceiptRoot(receipts []*action.Receipt) hash.Hash256 {
h := make([]hash.Hash256, 0, len(receipts))
for _, receipt := range receipts {
h = append(h, receipt.Hash())
}
if len(h) == 0 {
return hash.ZeroHash256
}
res := crypto.NewMerkleTree(h).HashTree()
return res
}
| 1 | 17,401 | ineffectual assignment to `err` (from `ineffassign`) | iotexproject-iotex-core | go |
@@ -198,6 +198,10 @@ class Driver extends webdriver.WebDriver {
* @return {!Driver} A new driver instance.
*/
static createSession(options, service = getDefaultService()) {
+ if (!service) {
+ service = getDefaultService();
+ }
+
let client = service.start().then(url => new http.HttpClient(url));
let executor = new http.Executor(client);
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines a {@linkplain Driver WebDriver} client for
* Microsoft's Edge web browser. Before using this module,
* you must download and install the latest
* [MicrosoftEdgeDriver](http://go.microsoft.com/fwlink/?LinkId=619687) server.
* Ensure that the MicrosoftEdgeDriver is on your
* [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
*
* There are three primary classes exported by this module:
*
* 1. {@linkplain ServiceBuilder}: configures the
* {@link ./remote.DriverService remote.DriverService}
* that manages the [MicrosoftEdgeDriver] child process.
*
* 2. {@linkplain Options}: defines configuration options for each new
* MicrosoftEdgeDriver session, such as which
* {@linkplain Options#setProxy proxy} to use when starting the browser.
*
* 3. {@linkplain Driver}: the WebDriver client; each new instance will control
* a unique browser session.
*
* __Customizing the MicrosoftEdgeDriver Server__ <a id="custom-server"></a>
*
* By default, every MicrosoftEdge session will use a single driver service,
* which is started the first time a {@link Driver} instance is created and
* terminated when this process exits. The default service will inherit its
* environment from the current process.
* You may obtain a handle to this default service using
* {@link #getDefaultService getDefaultService()} and change its configuration
* with {@link #setDefaultService setDefaultService()}.
*
* You may also create a {@link Driver} with its own driver service. This is
* useful if you need to capture the server's log output for a specific session:
*
* var edge = require('selenium-webdriver/edge');
*
* var service = new edge.ServiceBuilder()
* .setPort(55555)
* .build();
*
* var options = new edge.Options();
* // configure browser options ...
*
* var driver = edge.Driver.createSession(options, service);
*
* Users should only instantiate the {@link Driver} class directly when they
* need a custom driver service configuration (as shown above). For normal
* operation, users should start MicrosoftEdge using the
* {@link ./builder.Builder selenium-webdriver.Builder}.
*
* [MicrosoftEdgeDriver]: https://msdn.microsoft.com/en-us/library/mt188085(v=vs.85).aspx
*/
'use strict';
const fs = require('fs');
const util = require('util');
const http = require('./http');
const io = require('./io');
const portprober = require('./net/portprober');
const promise = require('./lib/promise');
const remote = require('./remote');
const Symbols = require('./lib/symbols');
const webdriver = require('./lib/webdriver');
const {Browser, Capabilities} = require('./lib/capabilities');
const EDGEDRIVER_EXE = 'MicrosoftWebDriver.exe';
/**
* _Synchronously_ attempts to locate the edge driver executable on the current
* system.
*
* @return {?string} the located executable, or `null`.
*/
function locateSynchronously() {
return process.platform === 'win32'
? io.findInPath(EDGEDRIVER_EXE, true) : null;
}
/**
* Class for managing MicrosoftEdgeDriver specific options.
*/
class Options extends Capabilities {
/**
* @param {(Capabilities|Map<string, ?>|Object)=} other Another set of
* capabilities to initialize this instance from.
*/
constructor(other = undefined) {
super(other);
this.setBrowserName(Browser.EDGE);
}
}
/**
* Creates {@link remote.DriverService} instances that manage a
* MicrosoftEdgeDriver server in a child process.
*/
class ServiceBuilder extends remote.DriverService.Builder {
/**
* @param {string=} opt_exe Path to the server executable to use. If omitted,
* the builder will attempt to locate the MicrosoftEdgeDriver on the current
* PATH.
* @throws {Error} If provided executable does not exist, or the
* MicrosoftEdgeDriver cannot be found on the PATH.
*/
constructor(opt_exe) {
let exe = opt_exe || locateSynchronously();
if (!exe) {
throw Error(
'The ' + EDGEDRIVER_EXE + ' could not be found on the current PATH. ' +
'Please download the latest version of the MicrosoftEdgeDriver from ' +
'https://www.microsoft.com/en-us/download/details.aspx?id=48212 and ' +
'ensure it can be found on your PATH.');
}
super(exe);
// Binding to the loopback address will fail if not running with
// administrator privileges. Since we cannot test for that in script
// (or can we?), force the DriverService to use "localhost".
this.setHostname('localhost');
}
/**
* Enables verbose logging.
* @return {!ServiceBuilder} A self reference.
*/
enableVerboseLogging() {
return this.addArguments('--verbose');
}
}
/** @type {remote.DriverService} */
var defaultService = null;
/**
* Sets the default service to use for new MicrosoftEdgeDriver instances.
* @param {!remote.DriverService} service The service to use.
* @throws {Error} If the default service is currently running.
*/
function setDefaultService(service) {
if (defaultService && defaultService.isRunning()) {
throw Error(
'The previously configured EdgeDriver service is still running. ' +
'You must shut it down before you may adjust its configuration.');
}
defaultService = service;
}
/**
* Returns the default MicrosoftEdgeDriver service. If such a service has
* not been configured, one will be constructed using the default configuration
* for an MicrosoftEdgeDriver executable found on the system PATH.
* @return {!remote.DriverService} The default MicrosoftEdgeDriver service.
*/
function getDefaultService() {
if (!defaultService) {
defaultService = new ServiceBuilder().build();
}
return defaultService;
}
/**
* Creates a new WebDriver client for Microsoft's Edge.
*/
class Driver extends webdriver.WebDriver {
/**
* Creates a new browser session for Microsoft's Edge browser.
*
* @param {(Capabilities|Options)=} options The configuration options.
* @param {remote.DriverService=} service The session to use; will use
* the {@linkplain #getDefaultService default service} by default.
* @return {!Driver} A new driver instance.
*/
static createSession(options, service = getDefaultService()) {
let client = service.start().then(url => new http.HttpClient(url));
let executor = new http.Executor(client);
options = options || new Options();
return /** @type {!Driver} */(super.createSession(
executor, options, () => service.kill()));
}
/**
* This function is a no-op as file detectors are not supported by this
* implementation.
* @override
*/
setFileDetector() {}
}
// PUBLIC API
exports.Driver = Driver;
exports.Options = Options;
exports.ServiceBuilder = ServiceBuilder;
exports.getDefaultService = getDefaultService;
exports.setDefaultService = setDefaultService;
exports.locateSynchronously = locateSynchronously;
| 1 | 15,395 | Would you mind removing the default parameter above? (I doubt I'll ever use defaults again since you still have to protect against callers explicitly passing `null` or `undefined`) | SeleniumHQ-selenium | py |
@@ -163,6 +163,18 @@ def FindMolChiralCenters(mol, force=True, includeUnassigned=False, includeCIP=Tr
centers.append((idx, code))
return centers
+import warnings
+def WrapLogs():
+ warnings.warn("Deprecated: use the rdkit.rdBase.LogTo*() functions instead.", DeprecationWarning)
+ rdBase.LogToPythonStderr()
+
+def LogWarningMsg(msg):
+ warnings.warn("Deprecated: use rdkit.rdBase.LogWarningMsg() instead.", DeprecationWarning)
+ rdBase.LogWarningMsg(msg)
+
+def LogErrorMsg(msg):
+ warnings.warn("Deprecated: use rdkit.rdBase.LogErrorMsg() instead.", DeprecationWarning)
+ rdBase.LogErrorMsg(msg)
#------------------------------------
# | 1 | #
# Copyright (C) 2000-2017 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" A module for molecules and stuff
see Chem/index.html in the doc tree for documentation
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Geometry import rdGeometry
from rdkit.Chem import rdchem
_HasSubstructMatchStr = rdchem._HasSubstructMatchStr
from rdkit.Chem.rdchem import *
from rdkit.Chem.rdmolfiles import *
from rdkit.Chem.rdmolops import *
from rdkit.Chem.rdCIPLabeler import *
from rdkit.Chem.inchi import *
try:
# This is an optional component of the build
from rdkit.Chem.rdMolInterchange import *
except ImportError:
pass
# Coordgen needs to know where its template file is.
# The default install puts it in RDDataDir
try:
from rdkit.Chem import rdCoordGen
except ImportError:
pass
else:
templDir = RDConfig.RDDataDir
if templDir[-1] != '/':
templDir += '/'
rdCoordGen.SetDefaultTemplateFileDir(templDir)
def QuickSmartsMatch(smi, sma, unique=True, display=False):
m = MolFromSmiles(smi)
p = MolFromSmarts(sma)
res = m.GetSubstructMatches(p, unique)
if display:
pass
return res
def CanonSmiles(smi, useChiral=1):
m = MolFromSmiles(smi)
return MolToSmiles(m, useChiral)
def SupplierFromFilename(fileN, delim='', **kwargs):
ext = fileN.split('.')[-1].lower()
if ext == 'sdf':
suppl = SDMolSupplier(fileN, **kwargs)
elif ext == 'csv':
if not delim:
delim = ','
suppl = SmilesMolSupplier(fileN, delimiter=delim, **kwargs)
elif ext == 'txt':
if not delim:
delim = '\t'
suppl = SmilesMolSupplier(fileN, delimiter=delim, **kwargs)
elif ext == 'tdt':
suppl = TDTMolSupplier(fileN, delimiter=delim, **kwargs)
else:
raise ValueError("unrecognized extension: %s" % ext)
return suppl
def FindMolChiralCenters(mol, force=True, includeUnassigned=False, includeCIP=True,
useLegacyImplementation=True):
"""
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('[C@H](Cl)(F)Br')
>>> FindMolChiralCenters(mol)
[(0, 'R')]
>>> mol = Chem.MolFromSmiles('[C@@H](Cl)(F)Br')
>>> FindMolChiralCenters(mol)
[(0, 'S')]
>>> FindMolChiralCenters(Chem.MolFromSmiles('CCC'))
[]
By default unassigned stereo centers are not reported:
>>> mol = Chem.MolFromSmiles('C[C@H](F)C(F)(Cl)Br')
>>> FindMolChiralCenters(mol,force=True)
[(1, 'S')]
but this can be changed:
>>> FindMolChiralCenters(mol,force=True,includeUnassigned=True)
[(1, 'S'), (3, '?')]
The handling of unassigned stereocenters for dependent stereochemistry is not correct
using the legacy implementation:
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1CC(C)C(C)C(C)C1'),includeUnassigned=True)
[(2, '?'), (6, '?')]
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1C[C@H](C)C(C)[C@H](C)C1'),includeUnassigned=True)
[(2, 'S'), (4, '?'), (6, 'R')]
But works with the new implementation:
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1CC(C)C(C)C(C)C1'),includeUnassigned=True, useLegacyImplementation=False)
[(2, '?'), (4, '?'), (6, '?')]
Note that the new implementation also gets the correct descriptors for para-stereochemistry:
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1C[C@H](C)[C@H](C)[C@H](C)C1'),useLegacyImplementation=False)
[(2, 'S'), (4, 's'), (6, 'R')]
With the new implementation, if you don't care about the CIP labels of stereocenters, you can save
some time by disabling those:
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1C[C@H](C)[C@H](C)[C@H](C)C1'), includeCIP=False, useLegacyImplementation=False)
[(2, 'Tet_CCW'), (4, 'Tet_CCW'), (6, 'Tet_CCW')]
"""
if useLegacyImplementation:
AssignStereochemistry(mol, force=force, flagPossibleStereoCenters=includeUnassigned)
centers = []
for atom in mol.GetAtoms():
if atom.HasProp('_CIPCode'):
centers.append((atom.GetIdx(), atom.GetProp('_CIPCode')))
elif includeUnassigned and atom.HasProp('_ChiralityPossible'):
centers.append((atom.GetIdx(), '?'))
else:
centers = []
itms = FindPotentialStereo(mol)
if includeCIP:
atomsToLabel = []
bondsToLabel = []
for si in itms:
if si.type == StereoType.Atom_Tetrahedral:
atomsToLabel.append(si.centeredOn)
elif si.type == StereoType.Bond_Double:
bondsToLabel.append(si.centeredOn)
AssignCIPLabels(mol, atomsToLabel=atomsToLabel, bondsToLabel=bondsToLabel)
for si in itms:
if si.type == StereoType.Atom_Tetrahedral and (includeUnassigned
or si.specified == StereoSpecified.Specified):
idx = si.centeredOn
atm = mol.GetAtomWithIdx(idx)
if includeCIP and atm.HasProp("_CIPCode"):
code = atm.GetProp("_CIPCode")
else:
if si.specified:
code = str(si.descriptor)
else:
code = '?'
atm.SetIntProp('_ChiralityPossible',1)
centers.append((idx, code))
return centers
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| 1 | 23,890 | Make sure the old logging functions still work, but add deprecation warnings (unfortunately Python suppresses deprecation warnings by default, so I'm not sure if anyone'll notice). | rdkit-rdkit | cpp |
@@ -252,14 +252,17 @@ public class ManagementAuthorizationTest extends AuthorizationTest {
public void testTelemetryEnabledAsCamundaAdmin() {
// given
+ disableAuthorization();
+ managementService.enableTelemetry(true);
+ enableAuthorization();
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
// when
- managementService.enableTelemetry(true);
+ managementService.enableTelemetry(false);
// then
String telemetryPropertyValue = TelemetryHelper.fetchConfigurationProperty(processEngineConfiguration).getValue();
- assertThat(Boolean.parseBoolean(telemetryPropertyValue)).isTrue();
+ assertThat(Boolean.parseBoolean(telemetryPropertyValue)).isFalse();
}
} | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.test.api.authorization;
import java.util.Collections;
import java.util.Map;
import org.camunda.bpm.engine.AuthorizationException;
import org.camunda.bpm.engine.authorization.Groups;
import org.camunda.bpm.engine.impl.persistence.entity.ProcessDefinitionEntity;
import org.camunda.bpm.engine.management.TableMetaData;
import org.camunda.bpm.engine.management.TablePage;
import org.camunda.bpm.engine.test.util.TelemetryHelper;
import static org.assertj.core.api.Assertions.assertThat;
/**
* @author Roman Smirnov
*
*/
public class ManagementAuthorizationTest extends AuthorizationTest {
private static final String REQUIRED_ADMIN_AUTH_EXCEPTION = "ENGINE-03029 Required admin authenticated group or user.";
// get table count //////////////////////////////////////////////
public void testGetTableCountWithoutAuthorization() {
// given
try {
// when
managementService.getTableCount();
fail("Exception expected: It should not be possible to get the table count");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testGetTableCountAsCamundaAdmin() {
// given
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
// when
Map<String, Long> tableCount = managementService.getTableCount();
// then
assertFalse(tableCount.isEmpty());
}
// get table name //////////////////////////////////////////////
public void testGetTableNameWithoutAuthorization() {
// given
try {
// when
managementService.getTableName(ProcessDefinitionEntity.class);
fail("Exception expected: It should not be possible to get the table name");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testGetTableNameAsCamundaAdmin() {
// given
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
String tablePrefix = processEngineConfiguration.getDatabaseTablePrefix();
// when
String tableName = managementService.getTableName(ProcessDefinitionEntity.class);
// then
assertEquals(tablePrefix + "ACT_RE_PROCDEF", tableName);
}
// get table meta data //////////////////////////////////////////////
public void testGetTableMetaDataWithoutAuthorization() {
// given
try {
// when
managementService.getTableMetaData("ACT_RE_PROCDEF");
fail("Exception expected: It should not be possible to get the table meta data");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testGetTableMetaDataAsCamundaAdmin() {
// given
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
// when
TableMetaData tableMetaData = managementService.getTableMetaData("ACT_RE_PROCDEF");
// then
assertNotNull(tableMetaData);
}
// table page query //////////////////////////////////
public void testTablePageQueryWithoutAuthorization() {
// given
try {
// when
managementService.createTablePageQuery().tableName("ACT_RE_PROCDEF").listPage(0, Integer.MAX_VALUE);
fail("Exception expected: It should not be possible to get a table page");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testTablePageQueryAsCamundaAdmin() {
// given
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
String tablePrefix = processEngineConfiguration.getDatabaseTablePrefix();
// when
TablePage page = managementService.createTablePageQuery().tableName(tablePrefix + "ACT_RE_PROCDEF").listPage(0, Integer.MAX_VALUE);
// then
assertNotNull(page);
}
// get history level /////////////////////////////////
public void testGetHistoryLevelWithoutAuthorization() {
//given
try {
// when
managementService.getHistoryLevel();
fail("Exception expected: It should not be possible to get the history level");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testGetHistoryLevelAsCamundaAdmin() {
//given
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
// when
int historyLevel = managementService.getHistoryLevel();
// then
assertEquals(processEngineConfiguration.getHistoryLevel().getId(), historyLevel);
}
// database schema upgrade ///////////////////////////
public void testDataSchemaUpgradeWithoutAuthorization() {
// given
try {
// when
managementService.databaseSchemaUpgrade(null, null, null);
fail("Exception expected: It should not be possible to upgrade the database schema");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
// get properties & set/delete property ///////////////////////////
public void testGetPropertiesWithoutAuthorization() {
// given
try {
// when
managementService.getProperties();
fail("Exception expected: It should not be possible to get properties");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testSetPropertyWithoutAuthorization() {
// given
try {
// when
managementService.setProperty("aPropertyKey", "aPropertyValue");
fail("Exception expected: It should not be possible to set a property");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testDeletePropertyWithoutAuthorization() {
// given
try {
// when
managementService.deleteProperty("aPropertyName");
fail("Exception expected: It should not be possible to delete a property");
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
// configure telemetry /////////////////////////////////////
public void testTelemetryEnabledWithoutAutorization() {
// given
try {
// when
managementService.enableTelemetry(false);
} catch (AuthorizationException e) {
// then
String message = e.getMessage();
assertTextPresent(REQUIRED_ADMIN_AUTH_EXCEPTION, message);
}
}
public void testTelemetryEnabledAsCamundaAdmin() {
// given
identityService.setAuthentication(userId, Collections.singletonList(Groups.CAMUNDA_ADMIN));
// when
managementService.enableTelemetry(true);
// then
String telemetryPropertyValue = TelemetryHelper.fetchConfigurationProperty(processEngineConfiguration).getValue();
assertThat(Boolean.parseBoolean(telemetryPropertyValue)).isTrue();
}
}
| 1 | 10,536 | Not directly related to the topic of this ticket: I think this API design is a bit confusing. To disable delemetry, I would write `managementService.enableTelemetry(false)` which is not intuitive to read. Maybe `toggleTelemetry` instead of `enableTelemetry` is more clear. | camunda-camunda-bpm-platform | java |
@@ -163,9 +163,11 @@ namespace Microsoft.Sarif.Viewer
HelpLink = rule?.HelpUri?.ToString()
};
- CaptureAnnotatedCodeLocationCollections(result.Stacks, AnnotatedCodeLocationKind.Stack, sarifError);
+ IEnumerable<IEnumerable<AnnotatedCodeLocation>> stackLocations = CreateAnnotationsFromStacks(result.Stacks);
+
+ CaptureAnnotatedCodeLocationCollections(stackLocations, AnnotatedCodeLocationKind.Stack, sarifError);
CaptureAnnotatedCodeLocationCollections(result.CodeFlows, AnnotatedCodeLocationKind.CodeFlow, sarifError);
- CaptureAnnotatedCodeLocations(result.RelatedLocations, AnnotatedCodeLocationKind.Stack, sarifError);
+ CaptureAnnotatedCodeLocations(result.RelatedLocations, AnnotatedCodeLocationKind.Stack, sarifError);
if (region != null)
{ | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Microsoft.CodeAnalysis.Sarif;
using Microsoft.CodeAnalysis.Sarif.Driver;
using Microsoft.CodeAnalysis.Sarif.Driver.Sdk;
using Microsoft.CodeAnalysis.Sarif.Readers;
using Microsoft.CodeAnalysis.Sarif.Writers;
using Newtonsoft.Json;
namespace Microsoft.Sarif.Viewer
{
public class ErrorListService
{
public static readonly ErrorListService Instance = new ErrorListService();
public static void ProcessLogFile(string filePath, ToolFormat toolFormat = ToolFormat.None)
{
SarifLog log;
JsonSerializerSettings settings = new JsonSerializerSettings()
{
ContractResolver = SarifContractResolver.Instance,
};
string logText;
if (toolFormat == ToolFormat.None)
{
logText = File.ReadAllText(filePath);
}
else if (toolFormat == ToolFormat.PREfast)
{
logText = ToolFormatConverter.ConvertPREfastToStandardFormat(filePath);
}
else
{
// We have conversion to do
var converter = new ToolFormatConverter();
var sb = new StringBuilder();
using (var input = new MemoryStream(File.ReadAllBytes(filePath)))
{
var outputTextWriter = new StringWriter(sb);
var outputJson = new JsonTextWriter(outputTextWriter);
var output = new ResultLogJsonWriter(outputJson);
input.Seek(0, SeekOrigin.Begin);
converter.ConvertToStandardFormat(toolFormat, input, output);
// This is serving as a flush mechanism
output.Dispose();
logText = sb.ToString();
}
}
log = JsonConvert.DeserializeObject<SarifLog>(logText, settings);
ProcessSarifLog(log);
}
private static void ProcessSarifLog(SarifLog sarifLog)
{
foreach (Run run in sarifLog.Runs)
{
Instance.WriteRunToErrorList(run);
}
SarifTableDataSource.Instance.BringToFront();
}
private ErrorListService()
{
this.documentToLineIndexMap = new Dictionary<string, NewLineIndex>();
}
private Dictionary<string, NewLineIndex> documentToLineIndexMap;
private IRule GetRule(Run runLog, string ruleId)
{
if (runLog.Rules == null)
{
return null;
}
foreach (Rule rule in runLog.Rules.Values)
{
if (rule.Id == ruleId) { return rule; }
}
throw new InvalidOperationException();
}
private void WriteRunToErrorList(Run runLog)
{
List<SarifError> sarifErrors = new List<SarifError>();
// Prefer optional fullName, fall back to required Name property
string toolName = runLog.Tool.FullName ?? runLog.Tool.Name;
foreach (Result result in runLog.Results)
{
string category, document, shortMessage, fullMessage;
Region region;
NewLineIndex newLineIndex;
IRule rule = null;
category = null;
if (result.Properties != null)
{
result.Properties.TryGetValue("category", out category);
}
foreach (Location location in result.Locations)
{
region = null;
PhysicalLocation physicalLocation = null;
if (location.ResultFile != null)
{
physicalLocation = location.ResultFile;
document = physicalLocation.Uri.LocalPath;
region = physicalLocation.Region;
}
else if (location.AnalysisTarget != null)
{
physicalLocation = location.AnalysisTarget;
document = physicalLocation.Uri.LocalPath;
region = physicalLocation.Region;
}
else
{
document = location.FullyQualifiedLogicalName;
}
rule = GetRule(runLog, result.RuleId);
shortMessage = result.GetMessageText(rule, concise: true);
fullMessage = result.GetMessageText(rule, concise: false);
if (shortMessage == fullMessage)
{
fullMessage = null;
}
SarifError sarifError = new SarifError(document)
{
Region = region,
RuleId = result.RuleId,
RuleName = rule?.Name,
Kind = result.Kind,
Category = category,
ShortMessage = shortMessage,
FullMessage = fullMessage,
Tool = toolName,
HelpLink = rule?.HelpUri?.ToString()
};
CaptureAnnotatedCodeLocationCollections(result.Stacks, AnnotatedCodeLocationKind.Stack, sarifError);
CaptureAnnotatedCodeLocationCollections(result.CodeFlows, AnnotatedCodeLocationKind.CodeFlow, sarifError);
CaptureAnnotatedCodeLocations(result.RelatedLocations, AnnotatedCodeLocationKind.Stack, sarifError);
if (region != null)
{
sarifError.ColumnNumber = region.StartColumn - 1;
sarifError.LineNumber = region.StartLine - 1;
}
sarifErrors.Add(sarifError);
}
// TODO zap this on implementing todo above
if (result.RelatedLocations != null)
{
foreach (AnnotatedCodeLocation annotation in result.RelatedLocations)
{
PhysicalLocation physicalLocation = annotation.PhysicalLocation;
region = physicalLocation.Region;
shortMessage = annotation.Message;
document = annotation.PhysicalLocation.Uri.LocalPath;
if (!this.documentToLineIndexMap.TryGetValue(document, out newLineIndex))
{
this.documentToLineIndexMap[document] = newLineIndex = new NewLineIndex(File.ReadAllText(document));
}
if (region != null)
{
region.Populate(newLineIndex);
}
SarifError sarifError = new SarifError(document)
{
Region = region,
RuleId = result.RuleId,
RuleName = rule?.Name,
Kind = ResultKind.Note,
Category = "Related Location", // or should we prefer original result category?
ShortMessage = shortMessage,
FullMessage = null,
Tool = toolName
};
if (region != null)
{
sarifError.ColumnNumber = region.StartColumn - 1;
sarifError.LineNumber = region.StartLine - 1;
}
sarifErrors.Add(sarifError);
}
}
CodeAnalysisResultManager.Instance.SarifErrors = sarifErrors;
SarifTableDataSource.Instance.AddErrors(sarifErrors);
}
}
private static void CaptureAnnotatedCodeLocationCollections(
IEnumerable<IEnumerable<AnnotatedCodeLocation>> codeLocationCollections,
AnnotatedCodeLocationKind annotatedCodeLocationKind,
SarifError sarifError)
{
if (codeLocationCollections == null)
{
return;
}
foreach (IEnumerable<AnnotatedCodeLocation> codeLocations in codeLocationCollections)
{
CaptureAnnotatedCodeLocations(codeLocations, annotatedCodeLocationKind, sarifError);
}
}
private static void CaptureAnnotatedCodeLocations(IEnumerable<AnnotatedCodeLocation> codeLocations, AnnotatedCodeLocationKind annotatedCodeLocationKind, SarifError sarifError)
{
if (codeLocations == null)
{
return;
}
int annotationCollectionCount = 0;
foreach (AnnotatedCodeLocation codeLocation in codeLocations)
{
PhysicalLocation plc = codeLocation.PhysicalLocation;
sarifError.Annotations.Add(new AnnotatedCodeLocationModel()
{
Index = annotationCollectionCount,
Kind = annotatedCodeLocationKind,
Region = plc.Region,
FilePath = plc.Uri.LocalPath,
Message = codeLocation.Message
});
annotationCollectionCount++;
}
}
private static bool IsError(ResultKind kind)
{
return
kind == ResultKind.ConfigurationError ||
kind == ResultKind.Error ||
kind == ResultKind.InternalError;
}
}
} | 1 | 10,491 | Again I suggest returning `AnnotatedCodeLocation[][]`. | microsoft-sarif-sdk | .cs |
@@ -16,11 +16,14 @@ package ec2
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/golang/glog"
"github.com/pkg/errors"
"sigs.k8s.io/cluster-api-provider-aws/cloud/aws/providerconfig/v1alpha1"
)
func (s *Service) reconcileInternetGateways(in *v1alpha1.Network) error {
+ glog.V(2).Infof("Reconciling internet gateways")
+
igs, err := s.describeVpcInternetGateways(&in.VPC)
if IsNotFound(err) {
ig, err := s.createInternetGateway(&in.VPC) | 1 | // Copyright © 2018 The Kubernetes Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ec2
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
"sigs.k8s.io/cluster-api-provider-aws/cloud/aws/providerconfig/v1alpha1"
)
func (s *Service) reconcileInternetGateways(in *v1alpha1.Network) error {
igs, err := s.describeVpcInternetGateways(&in.VPC)
if IsNotFound(err) {
ig, err := s.createInternetGateway(&in.VPC)
if err != nil {
return nil
}
igs = []*ec2.InternetGateway{ig}
} else if err != nil {
return err
}
in.InternetGatewayID = igs[0].InternetGatewayId
return nil
}
func (s *Service) createInternetGateway(vpc *v1alpha1.VPC) (*ec2.InternetGateway, error) {
ig, err := s.EC2.CreateInternetGateway(&ec2.CreateInternetGatewayInput{})
if err != nil {
return nil, errors.Wrap(err, "failed to create internet gateway")
}
_, err = s.EC2.AttachInternetGateway(&ec2.AttachInternetGatewayInput{
InternetGatewayId: ig.InternetGateway.InternetGatewayId,
VpcId: aws.String(vpc.ID),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to attach internet gateway %q to vpc %q", *ig.InternetGateway.InternetGatewayId, vpc.ID)
}
return ig.InternetGateway, nil
}
func (s *Service) describeVpcInternetGateways(vpc *v1alpha1.VPC) ([]*ec2.InternetGateway, error) {
out, err := s.EC2.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{
Filters: []*ec2.Filter{
{
Name: aws.String("attachment.vpc-id"),
Values: []*string{aws.String(vpc.ID)},
},
},
})
if err != nil {
return nil, errors.Wrapf(err, "failed to describe nat gateways in vpc %q", vpc.ID)
}
if len(out.InternetGateways) == 0 {
return nil, NewNotFound(errors.Errorf("no nat gateways found in vpc %q", vpc.ID))
}
return out.InternetGateways, nil
}
| 1 | 6,287 | We should probably require an increased verbosity to output for anything below the cluster itself to avoid spamming the logs. It would also be good to give additional context as to what we are attempting to reconcile since the controller has multiple workers. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -32,5 +32,7 @@ type OptionsNetwork struct {
EtherClientRPC string
EtherPaymentsAddress string
+ EnableDNS bool
+
QualityOracle string
} | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package node
// OptionsNetwork describes possible parameters of network configuration
type OptionsNetwork struct {
Testnet bool
Localnet bool
ExperimentIdentityCheck bool
ExperimentNATPunching bool
MysteriumAPIAddress string
AccessPolicyEndpointAddress string
BrokerAddress string
EtherClientRPC string
EtherPaymentsAddress string
QualityOracle string
}
| 1 | 14,890 | Not needed anymore | mysteriumnetwork-node | go |
@@ -80,6 +80,17 @@ namespace Datadog.Trace.ClrProfiler
Log.Error(ex, "ByRef instrumentation cannot be enabled: ");
}
+ try
+ {
+ Log.Debug("Enabling calltarget state by ref.");
+ NativeMethods.EnableCallTargetStateByRef();
+ Log.Information("CallTarget State ByRef enabled.");
+ }
+ catch (Exception ex)
+ {
+ Log.Error(ex, "CallTarget state ByRef cannot be enabled: ");
+ }
+
try
{
Log.Debug("Sending CallTarget integration definitions to native library."); | 1 | // <copyright file="Instrumentation.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Threading;
using Datadog.Trace.AppSec;
using Datadog.Trace.Ci;
using Datadog.Trace.Configuration;
using Datadog.Trace.DiagnosticListeners;
using Datadog.Trace.Logging;
using Datadog.Trace.ServiceFabric;
namespace Datadog.Trace.ClrProfiler
{
/// <summary>
/// Provides access to the profiler CLSID and whether it is attached to the process.
/// </summary>
[Browsable(false)]
[EditorBrowsable(EditorBrowsableState.Never)]
public static class Instrumentation
{
/// <summary>
/// Indicates whether we're initializing Instrumentation for the first time
/// </summary>
private static int _firstInitialization = 1;
/// <summary>
/// Gets the CLSID for the Datadog .NET profiler
/// </summary>
public static readonly string ProfilerClsid = "{846F5F1C-F9AE-4B07-969E-05C26BC060D8}";
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor(typeof(Instrumentation));
/// <summary>
/// Gets a value indicating whether Datadog's profiler is attached to the current process.
/// </summary>
/// <value>
/// <c>true</c> if the profiler is currently attached; <c>false</c> otherwise.
/// </value>
public static bool ProfilerAttached
{
get
{
try
{
return NativeMethods.IsProfilerAttached();
}
catch (DllNotFoundException)
{
return false;
}
}
}
/// <summary>
/// Initializes global instrumentation values.
/// </summary>
public static void Initialize()
{
if (Interlocked.Exchange(ref _firstInitialization, 0) != 1)
{
// Initialize() was already called before
return;
}
Log.Debug("Initialization started.");
try
{
Log.Debug("Enabling by ref instrumentation.");
NativeMethods.EnableByRefInstrumentation();
Log.Information("ByRef instrumentation enabled.");
}
catch (Exception ex)
{
Log.Error(ex, "ByRef instrumentation cannot be enabled: ");
}
try
{
Log.Debug("Sending CallTarget integration definitions to native library.");
var payload = InstrumentationDefinitions.GetAllDefinitions();
NativeMethods.InitializeProfiler(payload.DefinitionsId, payload.Definitions);
foreach (var def in payload.Definitions)
{
def.Dispose();
}
Log.Information<int>("The profiler has been initialized with {count} definitions.", payload.Definitions.Length);
var asm = typeof(Instrumentation).Assembly;
Log.Information($"[Assembly metadata] Location: {asm.Location}, CodeBase: {asm.CodeBase}, GAC: {asm.GlobalAssemblyCache}, HostContext: {asm.HostContext}, SecurityRuleSet: {asm.SecurityRuleSet}");
}
catch (Exception ex)
{
Log.Error(ex, ex.Message);
}
try
{
// ensure global instance is created if it's not already
if (CIVisibility.Enabled)
{
CIVisibility.Initialize();
}
else
{
Log.Debug("Initializing tracer singleton instance.");
_ = Tracer.Instance;
}
}
catch (Exception ex)
{
Log.Error(ex, ex.Message);
}
try
{
Log.Debug("Initializing security singleton instance.");
_ = Security.Instance;
}
catch (Exception ex)
{
Log.Error(ex, ex.Message);
}
#if !NETFRAMEWORK
try
{
if (GlobalSettings.Source.DiagnosticSourceEnabled)
{
// check if DiagnosticSource is available before trying to use it
var type = Type.GetType("System.Diagnostics.DiagnosticSource, System.Diagnostics.DiagnosticSource", throwOnError: false);
if (type == null)
{
Log.Warning("DiagnosticSource type could not be loaded. Skipping diagnostic observers.");
}
else
{
// don't call this method unless DiagnosticSource is available
StartDiagnosticManager();
}
}
}
catch
{
// ignore
}
// we only support Service Fabric Service Remoting instrumentation on .NET Core (including .NET 5+)
if (FrameworkDescription.Instance.IsCoreClr())
{
Log.Information("Initializing ServiceFabric instrumentation");
try
{
ServiceRemotingClient.StartTracing();
}
catch
{
// ignore
}
try
{
ServiceRemotingService.StartTracing();
}
catch
{
// ignore
}
}
#endif
Log.Debug("Initialization finished.");
}
#if !NETFRAMEWORK
private static void StartDiagnosticManager()
{
var observers = new List<DiagnosticObserver>();
if (!PlatformHelpers.AzureAppServices.Metadata.IsFunctionsApp)
{
// Not adding the `AspNetCoreDiagnosticObserver` is particularly important for Azure Functions.
// The AspNetCoreDiagnosticObserver will be loaded in a separate Assembly Load Context, breaking the connection of AsyncLocal
// This is because user code is loaded within the functions host in a separate context
observers.Add(new AspNetCoreDiagnosticObserver());
}
var diagnosticManager = new DiagnosticManager(observers);
diagnosticManager.Start();
DiagnosticManager.Instance = diagnosticManager;
}
#endif
}
}
| 1 | 25,091 | Am I right in thinking this completely avoids the situation where `enable_calltarget_state_by_ref` is `true`, but the managed integrations don't expect a by ref argument? For example, if there's an exception here, that seems like a fatal problem, as we would have a mismatch for jit rewriting? We should disable instrumentation entirely in that case shouldn't we? i.e. if this (or the existing `EnableByRefInstrumentation`) calls fail, should we bail out of this method entirely as we can't safely instrument? | DataDog-dd-trace-dotnet | .cs |
@@ -40,6 +40,15 @@ const (
package {{ .Package }}
+type ResourceTypes int
+
+const (
+ Unknown ResourceTypes = iota
+ {{- range .KnownTypes }}
+ {{ . }}
+ {{- end }}
+)
+
// Instance describes a single resource annotation
type Instance struct {
// The name of the annotation. | 1 | // Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A simple program that consumes a YAML file describing Kubernetes resource annotations and produces as output
// a Go source file providing references to those annotations, and an HTML documentation file describing those
// annotations (for use on istio.io)
package main
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"text/template"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
)
const (
outputTemplate = `
// GENERATED FILE -- DO NOT EDIT
package {{ .Package }}
// Instance describes a single resource annotation
type Instance struct {
// The name of the annotation.
Name string
// Description of the annotation.
Description string
// Hide the existence of this annotation when outputting usage information.
Hidden bool
// Mark this annotation as deprecated when generating usage information.
Deprecated bool
}
var (
{{ range .Annotations }}
{{.VariableName}} = Instance {
Name: "{{ .Name }}",
Description: {{ wordWrap .Description 24 }},
Hidden: {{ .Hidden }},
Deprecated: {{ .Deprecated }},
}
{{ end }}
)`
htmlOutputTemplate = `---
title: Resource Annotations
description: Resource annotations used by Istio.
location: https://istio.io/docs/reference/config/annotations.html
weight: 29
---
<p>
This page presents the various <a href="https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/">resource annotations</a> that
Istio supports to control its behavior.
</p>
<table class="annotations">
<thead>
<tr>
<th>Annotation Name</th>
<th>Resource Types</th>
<th>Description</th>
</tr>
</thead>
<tbody>
{{ range .Annotations }}
{{ if not .Hidden }}
{{ if .Deprecated }}
<tr class="deprecated">
{{ else }}
<tr>
{{ end }}
<td><code>{{ .Name }}</code></td>
<td>{{ .Resources }}</td>
<td>{{ .Description }}</td>
</tr>
{{ end }}
{{ end }}
</tbody>
</table>
`
)
var (
input string
output string
htmlOutput string
nameSeparator = regexp.MustCompile(`[._\-]`)
rootCmd = cobra.Command{
Use: "annotations_prep",
Short: "Generates a Go source file and HTML file containing annotations.",
Long: "Generates a Go source file and HTML file containing annotation definitions based on an input YAML file.",
Run: func(cmd *cobra.Command, args []string) {
yamlContent, err := ioutil.ReadFile(input)
if err != nil {
log.Fatalf("unable to read input file: %v", err)
}
// Unmarshal the file.
var cfg Configuration
if err = yaml.Unmarshal(yamlContent, &cfg); err != nil {
log.Fatalf("error parsing input file: %v", err)
}
// Generate variable names if not provided in the yaml.
for i := range cfg.Annotations {
if cfg.Annotations[i].Name == "" {
log.Fatalf("annotation %d in input file missing name", i)
}
if cfg.Annotations[i].VariableName == "" {
cfg.Annotations[i].VariableName = generateVariableName(cfg.Annotations[i].Name)
}
}
// sort by name
sort.Slice(cfg.Annotations, func(i, j int) bool {
return strings.Compare(cfg.Annotations[i].Name, cfg.Annotations[j].Name) < 0
})
// Create the output file template.
t, err := template.New("annoTemplate").Funcs(template.FuncMap{
"wordWrap": wordWrap,
}).Parse(outputTemplate)
if err != nil {
log.Fatalf("failed parsing annotation template: %v", err)
}
// Generate the Go source.
var goSource bytes.Buffer
if err := t.Execute(&goSource, map[string]interface{}{
"Package": getPackage(),
"Annotations": cfg.Annotations,
}); err != nil {
log.Fatalf("failed generating output Go source code %s: %v", output, err)
}
if err := ioutil.WriteFile(output, goSource.Bytes(), 0666); err != nil {
log.Fatalf("Failed writing to output file %s: %v", output, err)
}
if htmlOutput != "" {
// Create the HTML output file template.
t, err = template.New("htmlOutputTemplate").Funcs(template.FuncMap{
"wordWrap": wordWrap,
}).Parse(htmlOutputTemplate)
if err != nil {
log.Fatalf("failed parsing HTML output template: %v", err)
}
// Generate the HTML file.
var htmlFile bytes.Buffer
if err := t.Execute(&htmlFile, map[string]interface{}{
"Package": getPackage(),
"Annotations": cfg.Annotations,
}); err != nil {
log.Fatalf("failed generating output HTML file %s: %v", output, err)
}
if err := ioutil.WriteFile(htmlOutput, htmlFile.Bytes(), 0666); err != nil {
log.Fatalf("Failed writing to output file %s: %v", htmlOutput, err)
}
}
},
}
)
func init() {
rootCmd.PersistentFlags().StringVar(&input, "input", "",
"Input YAML file to be parsed.")
rootCmd.PersistentFlags().StringVar(&output, "output", "",
"Output Go file to be generated.")
rootCmd.PersistentFlags().StringVar(&htmlOutput, "html_output", "",
"Output HTML file to be generated.")
flag.CommandLine.VisitAll(func(gf *flag.Flag) {
rootCmd.PersistentFlags().AddGoFlag(gf)
})
}
func main() {
if err := rootCmd.Execute(); err != nil {
os.Exit(-1)
}
}
type AnnotationVariable struct {
Instance
VariableName string `json:"variableName"`
}
type Configuration struct {
Annotations []AnnotationVariable `json:"annotations"`
}
// Instance describes a single resource annotation
type Instance struct {
// The name of the annotation.
Name string `json:"name"`
// Description of the annotation.
Description string `json:"description"`
// Hide the existence of this annotation when outputting usage information.
Hidden bool `json:"hidden"`
// Mark this annotation as deprecated when generating usage information.
Deprecated bool `json:"deprecated"`
// Indicates the types of resources this annotation can be applied to.
Resources []string `json:"resources"`
}
func getPackage() string {
return filepath.Base(filepath.Dir(output))
}
func generateVariableName(annoName string) string {
// Split the annotation name to separate the namespace/name portions.
parts := strings.Split(annoName, "/")
ns := parts[0]
name := parts[1]
// First, process the namespace portion ...
// Strip .istio.io from the namespace portion of the annotation name.
ns = strings.TrimSuffix(ns, ".istio.io")
// Separate the words by spaces and capitalize each word.
ns = strings.ReplaceAll(ns, ".", " ")
ns = strings.Title(ns)
// Reverse the namespace words so that they increase in specificity from left to right.
nsParts := strings.Split(ns, " ")
ns = ""
for i := len(nsParts) - 1; i >= 0; i-- {
ns += nsParts[i]
}
// Now, process the name portion ...
// Separate the words with spaces and capitalize each word.
name = nameSeparator.ReplaceAllString(name, " ")
name = strings.Title(name)
// Remove the spaces to generate a camel case variable name.
name = strings.ReplaceAll(name, " ", "")
// Concatenate the names together.
return ns + name
}
func wordWrap(in string, indent int) string {
words := strings.Split(in, " ")
maxLineLength := 80
out := ""
line := ""
for len(words) > 0 {
// Take the next word.
word := words[0]
words = words[1:]
if indent+len(line)+len(word) > maxLineLength {
// Need to word wrap - emit the current line.
out += "\"" + line + " \""
line = ""
// Wrap to the start of the next line.
out += "+\n"
// Indent to the start position of the next line.
for i := 0; i < indent; i++ {
out += " "
}
}
// Add the word to the current line.
if len(line) > 0 {
line += " "
}
line += word
}
// Emit the final line
out += "\"" + line + "\""
return out
}
| 1 | 7,535 | nit: spaces seem off | istio-tools | go |
@@ -20,7 +20,7 @@
#include "PBFParser.h"
-PBFParser::PBFParser(const char * fileName) : externalMemory(NULL){
+PBFParser::PBFParser(ExtractorCallbacks* em, ScriptingEnvironment& se, const char * fileName) : BaseParser( em, se ) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
//TODO: What is the bottleneck here? Filling the queue or reading the stuff from disk?
//NOTE: With Lua scripting, it is parsing the stuff. I/O is virtually for free. | 1 | /*
open source routing machine
Copyright (C) Dennis Luxen, others 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
#include "PBFParser.h"
PBFParser::PBFParser(const char * fileName) : externalMemory(NULL){
GOOGLE_PROTOBUF_VERIFY_VERSION;
//TODO: What is the bottleneck here? Filling the queue or reading the stuff from disk?
//NOTE: With Lua scripting, it is parsing the stuff. I/O is virtually for free.
threadDataQueue = boost::make_shared<ConcurrentQueue<_ThreadData*> >( 2500 ); /* Max 2500 items in queue, hardcoded. */
input.open(fileName, std::ios::in | std::ios::binary);
if (!input) {
std::cerr << fileName << ": File not found." << std::endl;
}
#ifndef NDEBUG
blockCount = 0;
groupCount = 0;
#endif
}
void PBFParser::RegisterCallbacks(ExtractorCallbacks * em) {
externalMemory = em;
}
void PBFParser::RegisterScriptingEnvironment(ScriptingEnvironment & _se) {
scriptingEnvironment = _se;
if(lua_function_exists(scriptingEnvironment.getLuaStateForThreadID(0), "get_exceptions" )) {
//get list of turn restriction exceptions
try {
luabind::call_function<void>(
scriptingEnvironment.getLuaStateForThreadID(0),
"get_exceptions",
boost::ref(restriction_exceptions_vector)
);
INFO("Found " << restriction_exceptions_vector.size() << " exceptions to turn restriction");
BOOST_FOREACH(std::string & str, restriction_exceptions_vector) {
INFO("ignoring: " << str);
}
} catch (const luabind::error &er) {
lua_State* Ler=er.state();
report_errors(Ler, -1);
ERR(er.what());
}
} else {
INFO("Found no exceptions to turn restrictions");
}
}
PBFParser::~PBFParser() {
if(input.is_open())
input.close();
// Clean up any leftover ThreadData objects in the queue
_ThreadData* td;
while (threadDataQueue->try_pop(td)) {
delete td;
}
google::protobuf::ShutdownProtobufLibrary();
#ifndef NDEBUG
DEBUG("parsed " << blockCount << " blocks from pbf with " << groupCount << " groups");
#endif
}
inline bool PBFParser::Init() {
_ThreadData initData;
/** read Header */
if(!readPBFBlobHeader(input, &initData)) {
return false;
}
if(readBlob(input, &initData)) {
if(!initData.PBFHeaderBlock.ParseFromArray(&(initData.charBuffer[0]), initData.charBuffer.size() ) ) {
std::cerr << "[error] Header not parseable!" << std::endl;
return false;
}
for(int i = 0, featureSize = initData.PBFHeaderBlock.required_features_size(); i < featureSize; ++i) {
const std::string& feature = initData.PBFHeaderBlock.required_features( i );
bool supported = false;
if ( "OsmSchema-V0.6" == feature )
supported = true;
else if ( "DenseNodes" == feature )
supported = true;
if ( !supported ) {
std::cerr << "[error] required feature not supported: " << feature.data() << std::endl;
return false;
}
}
} else {
std::cerr << "[error] blob not loaded!" << std::endl;
}
return true;
}
inline void PBFParser::ReadData() {
bool keepRunning = true;
do {
_ThreadData *threadData = new _ThreadData();
keepRunning = readNextBlock(input, threadData);
if (keepRunning)
threadDataQueue->push(threadData);
else {
threadDataQueue->push(NULL); // No more data to read, parse stops when NULL encountered
delete threadData;
}
} while(keepRunning);
}
inline void PBFParser::ParseData() {
while (true) {
_ThreadData *threadData;
threadDataQueue->wait_and_pop(threadData);
if (threadData == NULL) {
INFO("Parse Data Thread Finished");
threadDataQueue->push(NULL); // Signal end of data for other threads
break;
}
loadBlock(threadData);
for(int i = 0, groupSize = threadData->PBFprimitiveBlock.primitivegroup_size(); i < groupSize; ++i) {
threadData->currentGroupID = i;
loadGroup(threadData);
if(threadData->entityTypeIndicator == TypeNode)
parseNode(threadData);
if(threadData->entityTypeIndicator == TypeWay)
parseWay(threadData);
if(threadData->entityTypeIndicator == TypeRelation)
parseRelation(threadData);
if(threadData->entityTypeIndicator == TypeDenseNode)
parseDenseNode(threadData);
}
delete threadData;
threadData = NULL;
}
}
inline bool PBFParser::Parse() {
// Start the read and parse threads
boost::thread readThread(boost::bind(&PBFParser::ReadData, this));
//Open several parse threads that are synchronized before call to
boost::thread parseThread(boost::bind(&PBFParser::ParseData, this));
// Wait for the threads to finish
readThread.join();
parseThread.join();
return true;
}
inline void PBFParser::parseDenseNode(_ThreadData * threadData) {
const OSMPBF::DenseNodes& dense = threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID ).dense();
int denseTagIndex = 0;
int m_lastDenseID = 0;
int m_lastDenseLatitude = 0;
int m_lastDenseLongitude = 0;
ImportNode n;
std::vector<ImportNode> nodesToParse;
for(int i = 0, idSize = dense.id_size(); i < idSize; ++i) {
n.Clear();
m_lastDenseID += dense.id( i );
m_lastDenseLatitude += dense.lat( i );
m_lastDenseLongitude += dense.lon( i );
n.id = m_lastDenseID;
n.lat = 100000*( ( double ) m_lastDenseLatitude * threadData->PBFprimitiveBlock.granularity() +threadData-> PBFprimitiveBlock.lat_offset() ) / NANO;
n.lon = 100000*( ( double ) m_lastDenseLongitude * threadData->PBFprimitiveBlock.granularity() + threadData->PBFprimitiveBlock.lon_offset() ) / NANO;
while (denseTagIndex < dense.keys_vals_size()) {
const int tagValue = dense.keys_vals( denseTagIndex );
if(tagValue == 0) {
++denseTagIndex;
break;
}
const int keyValue = dense.keys_vals ( denseTagIndex+1 );
const std::string & key = threadData->PBFprimitiveBlock.stringtable().s(tagValue).data();
const std::string & value = threadData->PBFprimitiveBlock.stringtable().s(keyValue).data();
n.keyVals.Add(key, value);
denseTagIndex += 2;
}
nodesToParse.push_back(n);
}
unsigned endi_nodes = nodesToParse.size();
#pragma omp parallel for schedule ( guided )
for(unsigned i = 0; i < endi_nodes; ++i) {
ImportNode &n = nodesToParse[i];
/** Pass the unpacked node to the LUA call back **/
try {
luabind::call_function<int>(
scriptingEnvironment.getLuaStateForThreadID(omp_get_thread_num()),
"node_function",
boost::ref(n)
);
} catch (const luabind::error &er) {
lua_State* Ler=er.state();
report_errors(Ler, -1);
ERR(er.what());
}
// catch (...) {
// ERR("Unknown error occurred during PBF dense node parsing!");
// }
}
BOOST_FOREACH(ImportNode &n, nodesToParse) {
if(!externalMemory->nodeFunction(n))
std::cerr << "[PBFParser] dense node not parsed" << std::endl;
}
}
inline void PBFParser::parseNode(_ThreadData * ) {
ERR("Parsing of simple nodes not supported. PBF should use dense nodes");
}
inline void PBFParser::parseRelation(_ThreadData * threadData) {
//TODO: leave early, if relation is not a restriction
//TODO: reuse rawRestriction container
const OSMPBF::PrimitiveGroup& group = threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID );
for(int i = 0; i < group.relations_size(); ++i ) {
std::string exception_of_restriction_tag;
const OSMPBF::Relation& inputRelation = threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID ).relations(i);
bool isRestriction = false;
bool isOnlyRestriction = false;
for(int k = 0, endOfKeys = inputRelation.keys_size(); k < endOfKeys; ++k) {
const std::string & key = threadData->PBFprimitiveBlock.stringtable().s(inputRelation.keys(k));
const std::string & val = threadData->PBFprimitiveBlock.stringtable().s(inputRelation.vals(k));
if ("type" == key) {
if( "restriction" == val)
isRestriction = true;
else
break;
}
if ("restriction" == key) {
if(val.find("only_") == 0)
isOnlyRestriction = true;
}
if ("except" == key) {
exception_of_restriction_tag = val;
}
}
//Check if restriction shall be ignored
if(isRestriction && ("" != exception_of_restriction_tag) ) {
//Be warned, this is quadratic work here, but we assume that
//only a few exceptions are actually defined.
std::vector<std::string> tokenized_exception_tags_of_restriction;
boost::algorithm::split_regex(tokenized_exception_tags_of_restriction, exception_of_restriction_tag, boost::regex("[;][ ]*"));
BOOST_FOREACH(std::string & str, tokenized_exception_tags_of_restriction) {
if(restriction_exceptions_vector.end() != std::find(restriction_exceptions_vector.begin(), restriction_exceptions_vector.end(), str)) {
isRestriction = false;
break; //BOOST_FOREACH
}
}
}
if(isRestriction) {
int64_t lastRef = 0;
_RawRestrictionContainer currentRestrictionContainer(isOnlyRestriction);
for(int rolesIndex = 0; rolesIndex < inputRelation.roles_sid_size(); ++rolesIndex) {
std::string role(threadData->PBFprimitiveBlock.stringtable().s( inputRelation.roles_sid( rolesIndex ) ).data());
lastRef += inputRelation.memids(rolesIndex);
if(!("from" == role || "to" == role || "via" == role)) {
continue;
}
switch(inputRelation.types(rolesIndex)) {
case 0: //node
if("from" == role || "to" == role) //Only via should be a node
continue;
assert("via" == role);
if(UINT_MAX != currentRestrictionContainer.viaNode)
currentRestrictionContainer.viaNode = UINT_MAX;
assert(UINT_MAX == currentRestrictionContainer.viaNode);
currentRestrictionContainer.restriction.viaNode = lastRef;
break;
case 1: //way
assert("from" == role || "to" == role || "via" == role);
if("from" == role) {
currentRestrictionContainer.fromWay = lastRef;
}
if ("to" == role) {
currentRestrictionContainer.toWay = lastRef;
}
if ("via" == role) {
assert(currentRestrictionContainer.restriction.toNode == UINT_MAX);
currentRestrictionContainer.viaNode = lastRef;
}
break;
case 2: //relation, not used. relations relating to relations are evil.
continue;
assert(false);
break;
default: //should not happen
//cout << "unknown";
assert(false);
break;
}
}
if(!externalMemory->restrictionFunction(currentRestrictionContainer))
std::cerr << "[PBFParser] relation not parsed" << std::endl;
}
}
}
inline void PBFParser::parseWay(_ThreadData * threadData) {
ExtractionWay w;
std::vector<ExtractionWay> waysToParse(threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID ).ways_size());
for(int i = 0, ways_size = threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID ).ways_size(); i < ways_size; ++i) {
w.Clear();
const OSMPBF::Way& inputWay = threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID ).ways( i );
w.id = inputWay.id();
unsigned pathNode(0);
for(int i = 0; i < inputWay.refs_size(); ++i) {
pathNode += inputWay.refs(i);
w.path.push_back(pathNode);
}
assert(inputWay.keys_size() == inputWay.vals_size());
for(int i = 0; i < inputWay.keys_size(); ++i) {
const std::string & key = threadData->PBFprimitiveBlock.stringtable().s(inputWay.keys(i));
const std::string & val = threadData->PBFprimitiveBlock.stringtable().s(inputWay.vals(i));
w.keyVals.Add(key, val);
}
waysToParse.push_back(w);
}
unsigned endi_ways = waysToParse.size();
#pragma omp parallel for schedule ( guided )
for(unsigned i = 0; i < endi_ways; ++i) {
ExtractionWay & w = waysToParse[i];
/** Pass the unpacked way to the LUA call back **/
try {
luabind::call_function<int>(
scriptingEnvironment.getLuaStateForThreadID(omp_get_thread_num()),
"way_function",
boost::ref(w),
w.path.size()
);
} catch (const luabind::error &er) {
lua_State* Ler=er.state();
report_errors(Ler, -1);
ERR(er.what());
}
// catch (...) {
// ERR("Unknown error!");
// }
}
BOOST_FOREACH(ExtractionWay & w, waysToParse) {
if(!externalMemory->wayFunction(w)) {
std::cerr << "[PBFParser] way not parsed" << std::endl;
}
}
}
inline void PBFParser::loadGroup(_ThreadData * threadData) {
#ifndef NDEBUG
++groupCount;
#endif
const OSMPBF::PrimitiveGroup& group = threadData->PBFprimitiveBlock.primitivegroup( threadData->currentGroupID );
threadData->entityTypeIndicator = 0;
if ( group.nodes_size() != 0 ) {
threadData->entityTypeIndicator = TypeNode;
}
if ( group.ways_size() != 0 ) {
threadData->entityTypeIndicator = TypeWay;
}
if ( group.relations_size() != 0 ) {
threadData->entityTypeIndicator = TypeRelation;
}
if ( group.has_dense() ) {
threadData->entityTypeIndicator = TypeDenseNode;
assert( group.dense().id_size() != 0 );
}
assert( threadData->entityTypeIndicator != 0 );
}
inline void PBFParser::loadBlock(_ThreadData * threadData) {
#ifndef NDEBUG
++blockCount;
#endif
threadData->currentGroupID = 0;
threadData->currentEntityID = 0;
}
inline bool PBFParser::readPBFBlobHeader(std::fstream& stream, _ThreadData * threadData) {
int size(0);
stream.read((char *)&size, sizeof(int));
size = swapEndian(size);
if(stream.eof()) {
return false;
}
if ( size > MAX_BLOB_HEADER_SIZE || size < 0 ) {
return false;
}
char *data = new char[size];
stream.read(data, size*sizeof(data[0]));
bool dataSuccessfullyParsed = (threadData->PBFBlobHeader).ParseFromArray( data, size);
delete[] data;
return dataSuccessfullyParsed;
}
inline bool PBFParser::unpackZLIB(std::fstream &, _ThreadData * threadData) {
unsigned rawSize = threadData->PBFBlob.raw_size();
char* unpackedDataArray = new char[rawSize];
z_stream compressedDataStream;
compressedDataStream.next_in = ( unsigned char* ) threadData->PBFBlob.zlib_data().data();
compressedDataStream.avail_in = threadData->PBFBlob.zlib_data().size();
compressedDataStream.next_out = ( unsigned char* ) unpackedDataArray;
compressedDataStream.avail_out = rawSize;
compressedDataStream.zalloc = Z_NULL;
compressedDataStream.zfree = Z_NULL;
compressedDataStream.opaque = Z_NULL;
int ret = inflateInit( &compressedDataStream );
if ( ret != Z_OK ) {
std::cerr << "[error] failed to init zlib stream" << std::endl;
delete[] unpackedDataArray;
return false;
}
ret = inflate( &compressedDataStream, Z_FINISH );
if ( ret != Z_STREAM_END ) {
std::cerr << "[error] failed to inflate zlib stream" << std::endl;
std::cerr << "[error] Error type: " << ret << std::endl;
delete[] unpackedDataArray;
return false;
}
ret = inflateEnd( &compressedDataStream );
if ( ret != Z_OK ) {
std::cerr << "[error] failed to deinit zlib stream" << std::endl;
delete[] unpackedDataArray;
return false;
}
threadData->charBuffer.clear(); threadData->charBuffer.resize(rawSize);
std::copy(unpackedDataArray, unpackedDataArray + rawSize, threadData->charBuffer.begin());
delete[] unpackedDataArray;
return true;
}
inline bool PBFParser::unpackLZMA(std::fstream &, _ThreadData * ) {
return false;
}
inline bool PBFParser::readBlob(std::fstream& stream, _ThreadData * threadData) {
if(stream.eof())
return false;
const int size = threadData->PBFBlobHeader.datasize();
if ( size < 0 || size > MAX_BLOB_SIZE ) {
std::cerr << "[error] invalid Blob size:" << size << std::endl;
return false;
}
char* data = new char[size];
stream.read(data, sizeof(data[0])*size);
if ( !threadData->PBFBlob.ParseFromArray( data, size ) ) {
std::cerr << "[error] failed to parse blob" << std::endl;
delete[] data;
return false;
}
if ( threadData->PBFBlob.has_raw() ) {
const std::string& data = threadData->PBFBlob.raw();
threadData->charBuffer.clear();
threadData->charBuffer.resize( data.size() );
std::copy(data.begin(), data.end(), threadData->charBuffer.begin());
} else if ( threadData->PBFBlob.has_zlib_data() ) {
if ( !unpackZLIB(stream, threadData) ) {
std::cerr << "[error] zlib data encountered that could not be unpacked" << std::endl;
delete[] data;
return false;
}
} else if ( threadData->PBFBlob.has_lzma_data() ) {
if ( !unpackLZMA(stream, threadData) )
std::cerr << "[error] lzma data encountered that could not be unpacked" << std::endl;
delete[] data;
return false;
} else {
std::cerr << "[error] Blob contains no data" << std::endl;
delete[] data;
return false;
}
delete[] data;
return true;
}
bool PBFParser::readNextBlock(std::fstream& stream, _ThreadData * threadData) {
if(stream.eof()) {
return false;
}
if ( !readPBFBlobHeader(stream, threadData) ){
return false;
}
if ( threadData->PBFBlobHeader.type() != "OSMData" ) {
return false;
}
if ( !readBlob(stream, threadData) ) {
return false;
}
if ( !threadData->PBFprimitiveBlock.ParseFromArray( &(threadData->charBuffer[0]), threadData-> charBuffer.size() ) ) {
ERR("failed to parse PrimitiveBlock");
return false;
}
return true;
}
| 1 | 12,312 | shouldn't the em parameter be called ec? | Project-OSRM-osrm-backend | cpp |
@@ -132,7 +132,7 @@ func validateTrustedOperators(o *Options) error {
if o.TrustedKeys == nil {
o.TrustedKeys = make([]string, 0, 4)
}
- o.TrustedKeys = append(o.TrustedKeys, opc.Issuer)
+ o.TrustedKeys = append(o.TrustedKeys, opc.Subject)
o.TrustedKeys = append(o.TrustedKeys, opc.SigningKeys...)
}
for _, key := range o.TrustedKeys { | 1 | // Copyright 2018-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"io/ioutil"
"net"
"regexp"
"strings"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
)
var nscDecoratedRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}[\n]*))`)
// All JWTs once encoded start with this
const jwtPrefix = "eyJ"
// ReadOperatorJWT will read a jwt file for an operator claim. This can be a decorated file.
func ReadOperatorJWT(jwtfile string) (*jwt.OperatorClaims, error) {
contents, err := ioutil.ReadFile(jwtfile)
if err != nil {
// Check to see if the JWT has been inlined.
if !strings.HasPrefix(jwtfile, jwtPrefix) {
return nil, err
}
// We may have an inline jwt here.
contents = []byte(jwtfile)
}
defer wipeSlice(contents)
var claim string
items := nscDecoratedRe.FindAllSubmatch(contents, -1)
if len(items) == 0 {
claim = string(contents)
} else {
// First result should be the JWT.
// We copy here so that if the file contained a seed file too we wipe appropriately.
raw := items[0][1]
tmp := make([]byte, len(raw))
copy(tmp, raw)
claim = string(tmp)
}
opc, err := jwt.DecodeOperatorClaims(claim)
if err != nil {
return nil, err
}
return opc, nil
}
// Just wipe slice with 'x', for clearing contents of nkey seed file.
func wipeSlice(buf []byte) {
for i := range buf {
buf[i] = 'x'
}
}
// validateTrustedOperators will check that we do not have conflicts with
// assigned trusted keys and trusted operators. If operators are defined we
// will expand the trusted keys in options.
func validateTrustedOperators(o *Options) error {
if len(o.TrustedOperators) == 0 {
return nil
}
if o.AllowNewAccounts {
return fmt.Errorf("operators do not allow dynamic creation of new accounts")
}
if o.AccountResolver == nil {
return fmt.Errorf("operators require an account resolver to be configured")
}
if len(o.Accounts) > 0 {
return fmt.Errorf("operators do not allow Accounts to be configured directly")
}
if len(o.Users) > 0 || len(o.Nkeys) > 0 {
return fmt.Errorf("operators do not allow users to be configured directly")
}
if len(o.TrustedOperators) > 0 && len(o.TrustedKeys) > 0 {
return fmt.Errorf("conflicting options for 'TrustedKeys' and 'TrustedOperators'")
}
if o.SystemAccount != "" {
foundSys := false
foundNonEmpty := false
for _, op := range o.TrustedOperators {
if op.SystemAccount != "" {
foundNonEmpty = true
}
if op.SystemAccount == o.SystemAccount {
foundSys = true
break
}
}
if foundNonEmpty && !foundSys {
return fmt.Errorf("system_account in config and operator JWT must be identical")
}
}
srvMajor, srvMinor, srvUpdate, _ := jwt.ParseServerVersion(strings.Split(VERSION, "-")[0])
for _, opc := range o.TrustedOperators {
if major, minor, update, err := jwt.ParseServerVersion(opc.AssertServerVersion); err != nil {
return fmt.Errorf("operator %s expects version %s got error instead: %s",
opc.Subject, opc.AssertServerVersion, err)
} else if major > srvMajor {
return fmt.Errorf("operator %s expected major version %d > server major version %d",
opc.Subject, major, srvMajor)
} else if srvMajor > major {
} else if minor > srvMinor {
return fmt.Errorf("operator %s expected minor version %d > server minor version %d",
opc.Subject, minor, srvMinor)
} else if srvMinor > minor {
} else if update > srvUpdate {
return fmt.Errorf("operator %s expected update version %d > server update version %d",
opc.Subject, update, srvUpdate)
}
}
// If we have operators, fill in the trusted keys.
// FIXME(dlc) - We had TrustedKeys before TrustedOperators. The jwt.OperatorClaims
// has a DidSign(). Use that longer term. For now we can expand in place.
for _, opc := range o.TrustedOperators {
if o.TrustedKeys == nil {
o.TrustedKeys = make([]string, 0, 4)
}
o.TrustedKeys = append(o.TrustedKeys, opc.Issuer)
o.TrustedKeys = append(o.TrustedKeys, opc.SigningKeys...)
}
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
return fmt.Errorf("trusted Keys %q are required to be a valid public operator nkey", key)
}
}
return nil
}
func validateSrc(claims *jwt.UserClaims, host string) bool {
if claims == nil {
return false
} else if len(claims.Src) == 0 {
return true
} else if host == "" {
return false
}
ip := net.ParseIP(host)
if ip == nil {
return false
}
for _, cidr := range claims.Src {
if _, net, err := net.ParseCIDR(cidr); err != nil {
return false // should not happen as this jwt is invalid
} else if net.Contains(ip) {
return true
}
}
return false
}
func validateTimes(claims *jwt.UserClaims) (bool, time.Duration) {
if claims == nil {
return false, time.Duration(0)
} else if len(claims.Times) == 0 {
return true, time.Duration(0)
}
now := time.Now()
loc := time.Local
if claims.Locale != "" {
var err error
if loc, err = time.LoadLocation(claims.Locale); err != nil {
return false, time.Duration(0) // parsing not expected to fail at this point
}
now = now.In(loc)
}
for _, timeRange := range claims.Times {
y, m, d := now.Date()
m = m - 1
d = d - 1
start, err := time.ParseInLocation("15:04:05", timeRange.Start, loc)
if err != nil {
return false, time.Duration(0) // parsing not expected to fail at this point
}
end, err := time.ParseInLocation("15:04:05", timeRange.End, loc)
if err != nil {
return false, time.Duration(0) // parsing not expected to fail at this point
}
if start.After(end) {
start = start.AddDate(y, int(m), d)
d++ // the intent is to be the next day
} else {
start = start.AddDate(y, int(m), d)
}
if start.Before(now) {
end = end.AddDate(y, int(m), d)
if end.After(now) {
return true, end.Sub(now)
}
}
}
return false, time.Duration(0)
}
| 1 | 11,556 | Please change this to use o.DidSign(o) as implements this logic without performing any of these checks that duplicate something that JWT can do correctly. If the JWT deserialized properly, and DidSign returns true, the JWT was signed by one of the listed operator keys. | nats-io-nats-server | go |
@@ -0,0 +1,19 @@
+package de.danoeh.antennapod.core.util.comparator;
+
+import java.util.Comparator;
+
+import de.danoeh.antennapod.core.feed.FeedItem;
+import de.danoeh.antennapod.core.feed.SearchResult;
+
+public class InReverseChronologicalOrder implements Comparator<SearchResult> {
+ /**
+ * Compare items and sort it on chronological order.
+ */
+ @Override
+ public int compare(SearchResult o1, SearchResult o2) {
+ if ((o1.getComponent() instanceof FeedItem) &&
+ (o2.getComponent() instanceof FeedItem))
+ return ((FeedItem) o2.getComponent()).getPubDate().compareTo(((FeedItem) o1.getComponent()).getPubDate());
+ return 0;
+ }
+} | 1 | 1 | 14,711 | Talking about style, IMO extracting either `getComponent()` or `getPubDate()` to variables would make this line easier to read. | AntennaPod-AntennaPod | java |
|
@@ -0,0 +1,18 @@
+// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package task
+
+// Option defines a function that can be executed in some task API methods in order to perform custom configurations
+// of the task
+type Option func(*Task) error | 1 | 1 | 25,263 | package header missing | aws-amazon-ecs-agent | go |
|
@@ -11,7 +11,11 @@ options._catchError = function(error, newVNode, oldVNode) {
for (; (vnode = vnode._parent); ) {
if ((component = vnode._component) && component._childDidSuspend) {
// Don't call oldCatchError if we found a Suspense
- return component._childDidSuspend(error, newVNode._component);
+ return component._childDidSuspend(
+ error,
+ newVNode._component,
+ oldVNode && oldVNode._hydrateDom && oldVNode._hydrating
+ );
}
}
} | 1 | import { Component, createElement, options } from 'preact';
import { assign } from './util';
const oldCatchError = options._catchError;
options._catchError = function(error, newVNode, oldVNode) {
if (error.then) {
/** @type {import('./internal').Component} */
let component;
let vnode = newVNode;
for (; (vnode = vnode._parent); ) {
if ((component = vnode._component) && component._childDidSuspend) {
// Don't call oldCatchError if we found a Suspense
return component._childDidSuspend(error, newVNode._component);
}
}
}
oldCatchError(error, newVNode, oldVNode);
};
function detachedClone(vnode) {
if (vnode) {
vnode = assign({}, vnode);
vnode._component = null;
vnode._children = vnode._children && vnode._children.map(detachedClone);
}
return vnode;
}
// having custom inheritance instead of a class here saves a lot of bytes
export function Suspense(props) {
// we do not call super here to golf some bytes...
this._suspensions = 0;
this._detachOnNextRender = null;
}
// Things we do here to save some bytes but are not proper JS inheritance:
// - call `new Component()` as the prototype
// - do not set `Suspense.prototype.constructor` to `Suspense`
Suspense.prototype = new Component();
/**
* @param {Promise} promise The thrown promise
* @param {Component<any, any>} suspendingComponent The suspending component
*/
Suspense.prototype._childDidSuspend = function(promise, suspendingComponent) {
/** @type {import('./internal').SuspenseComponent} */
const c = this;
const resolve = suspended(c._vnode);
let resolved = false;
const onResolved = () => {
if (resolved) return;
resolved = true;
if (resolve) {
resolve(onSuspensionComplete);
} else {
onSuspensionComplete();
}
};
suspendingComponent._suspendedComponentWillUnmount =
suspendingComponent.componentWillUnmount;
suspendingComponent.componentWillUnmount = () => {
onResolved();
if (suspendingComponent._suspendedComponentWillUnmount) {
suspendingComponent._suspendedComponentWillUnmount();
}
};
const onSuspensionComplete = () => {
if (!--c._suspensions) {
c._vnode._children[0] = c.state._suspended;
c.setState({ _suspended: (c._detachOnNextRender = null) });
}
};
if (!c._suspensions++) {
c.setState({ _suspended: (c._detachOnNextRender = c._vnode._children[0]) });
}
promise.then(onResolved, onResolved);
};
Suspense.prototype.render = function(props, state) {
if (this._detachOnNextRender) {
this._vnode._children[0] = detachedClone(this._detachOnNextRender);
this._detachOnNextRender = null;
}
return [
createElement(Component, null, state._suspended ? null : props.children),
state._suspended && props.fallback
];
};
/**
* Checks and calls the parent component's _suspended method, passing in the
* suspended vnode. This is a way for a parent (e.g. SuspenseList) to get notified
* that one of its children/descendants suspended.
*
* The parent MAY return a callback. The callback will get called when the
* suspension resolves, notifying the parent of the fact.
* Moreover, the callback gets function `unsuspend` as a parameter. The resolved
* child descendant will not actually get unsuspended until `unsuspend` gets called.
* This is a way for the parent to delay unsuspending.
*
* If the parent does not return a callback then the resolved vnode
* gets unsuspended immediately when it resolves.
*
* @param {import('../src/internal').VNode} vnode
* @returns {((unsuspend: () => void) => void)?}
*/
export function suspended(vnode) {
let component = vnode._parent._component;
return component && component._suspended && component._suspended(vnode);
}
export function lazy(loader) {
let prom;
let component;
let error;
function Lazy(props) {
if (!prom) {
prom = loader();
prom.then(
exports => {
component = exports.default || exports;
},
e => {
error = e;
}
);
}
if (error) {
throw error;
}
if (!component) {
throw prom;
}
return createElement(component, props);
}
Lazy.displayName = 'Lazy';
Lazy._forwarded = true;
return Lazy;
}
| 1 | 15,048 | Just a thought (doesn't change the output or anything) - do you think we'll ever want to access the other properties of `oldVNode` from within `_childDidSuspend`? If so we could pass `oldVNode` here and then check these properties in the _childDidSuspend implementation. I don't have strong feelings either way, just occurred to me. | preactjs-preact | js |
@@ -0,0 +1,15 @@
+<?php
+
+namespace Sonata\MediaBundle\PHPCR;
+
+use Doctrine\ODM\PHPCR\DocumentRepository;
+use Doctrine\ODM\PHPCR\Id\RepositoryIdInterface;
+
+class GalleryHasMediaRepository extends DocumentRepository implements RepositoryIdInterface
+{
+ public function generateId($document, $parent = null)
+ {
+ return '/cms/gallery-has-media/' . uniqid();
+ }
+}
+ | 1 | 1 | 6,386 | We should be configuring the base paths somewhere.. | sonata-project-SonataMediaBundle | php |
|
@@ -311,6 +311,17 @@ def test_ssd_anchor_generator():
else:
device = 'cpu'
+ # min_sizes max_sizes must set at the same time
+ with pytest.raises(AssertionError):
+ anchor_generator_cfg = dict(
+ type='SSDAnchorGenerator',
+ scale_major=False,
+ min_sizes=[48, 100, 150, 202, 253, 300],
+ max_sizes=None,
+ strides=[8, 16, 32, 64, 100, 300],
+ ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
+ build_anchor_generator(anchor_generator_cfg)
+
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False, | 1 | """
CommandLine:
pytest tests/test_utils/test_anchor.py
xdoctest tests/test_utils/test_anchor.py zero
"""
import pytest
import torch
def test_standard_points_generator():
from mmdet.core.anchor import build_prior_generator
# teat init
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
assert anchor_generator.num_base_priors == [1, 1]
# test_stride
from mmdet.core.anchor import MlvlPointGenerator
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
if torch.cuda.is_available():
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
def test_sparse_prior():
from mmdet.core.anchor import MlvlPointGenerator
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cpu')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert not sparse_prior.is_cuda
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.core.anchor import AnchorGenerator
mlvl_anchors = AnchorGenerator(
strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8])
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cpu')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
if torch.cuda.is_available():
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(6, 8), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cuda')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
assert sparse_prior.is_cuda
mlvl_anchors = AnchorGenerator(
strides=[16, 32],
ratios=[1., 2.5],
scales=[1., 5.],
base_sizes=[4, 8])
prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(13, 5), (16, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cuda')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
def test_standard_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator.num_base_priors == \
anchor_generator.num_base_anchors
assert anchor_generator.num_base_priors == [3, 3]
assert anchor_generator is not None
def test_strides():
from mmdet.core import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = build_head(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = build_head(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
| 1 | 24,355 | May also need to test the normal functionality with min/max_sizes | open-mmlab-mmdetection | py |
@@ -73,6 +73,7 @@ var MainnetDefinition = NetworkDefinition{
ChainID: 1,
MystAddress: "0x4Cf89ca06ad997bC732Dc876ed2A7F26a9E7f361",
EtherClientRPC: []string{
+ "https://ethereum1.mysterium.network/",
"https://main-light.eth.linkpool.io/",
},
}, | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package metadata
// NetworkDefinition structure holds all parameters which describe particular network
type NetworkDefinition struct {
MysteriumAPIAddress string
AccessPolicyOracleAddress string
BrokerAddresses []string
TransactorAddress string
TransactorIdentity string
Chain1 ChainDefinition
Chain2 ChainDefinition
MMNAddress string
MMNAPIAddress string
PilvytisAddress string
DNSMap map[string][]string
DefaultChainID int64
DefaultCurrency string
LocationAddress string
Testnet3HermesURL string
Payments Payments
}
// ChainDefinition defines the configuration for the chain.
type ChainDefinition struct {
RegistryAddress string
HermesID string
ChannelImplAddress string
ChainID int64
MystAddress string
EtherClientRPC []string
}
// Payments defines payments configuration
type Payments struct {
Consumer Consumer
}
// Consumer defines consumer side settings
type Consumer struct {
DataLeewayMegabytes uint64
PriceGiBMax string
PriceHourMax string
EtherClientRPC string
}
// MainnetDefinition defines parameters for mainnet network (currently default network)
var MainnetDefinition = NetworkDefinition{
MysteriumAPIAddress: "https://discovery.mysterium.network/api/v3",
AccessPolicyOracleAddress: "https://trust.mysterium.network/api/v1/access-policies/",
BrokerAddresses: []string{"nats://broker.mysterium.network"},
TransactorAddress: "https://transactor.mysterium.network/api/v1",
Chain1: ChainDefinition{ // TODO: Update when mainnet deployed.
RegistryAddress: "0x87F0F4b7e0FAb14A565C87BAbbA6c40c92281b51",
ChannelImplAddress: "0xbd20839b331a7a8d10e34cdf7219edf334814c4f",
HermesID: "0xa62a2a75949d25e17c6f08a7818e7be97c18a8d2",
ChainID: 1,
MystAddress: "0x4Cf89ca06ad997bC732Dc876ed2A7F26a9E7f361",
EtherClientRPC: []string{
"https://main-light.eth.linkpool.io/",
},
},
Chain2: ChainDefinition{
RegistryAddress: "0x87F0F4b7e0FAb14A565C87BAbbA6c40c92281b51",
ChannelImplAddress: "0x25882f4966065ca13b7bac15cc48391d9a4124f6",
HermesID: "0xa62a2a75949d25e17c6f08a7818e7be97c18a8d2",
ChainID: 137,
MystAddress: "0x1379e8886a944d2d9d440b3d88df536aea08d9f3",
EtherClientRPC: []string{
"https://polygon-rpc.com/",
},
},
MMNAddress: "", // TODO: Figure this out
MMNAPIAddress: "", // TODO: Figure this out
PilvytisAddress: "https://pilvytis.mysterium.network",
DNSMap: map[string][]string{
"discovery.mysterium.network": {"51.158.129.204"},
"trust.mysterium.network": {"51.158.129.204"},
"broker.mysterium.network": {"51.158.129.204"},
"transactor.mysterium.network": {"51.158.129.204"},
"pilvytis.mysterium.network": {"51.158.129.204"},
},
DefaultChainID: 137,
DefaultCurrency: "MYST",
LocationAddress: "https://location.mysterium.network/api/v1/location",
Testnet3HermesURL: "https://testnet3-hermes.mysterium.network/api/v1",
Payments: Payments{
Consumer: Consumer{
DataLeewayMegabytes: 20,
PriceGiBMax: "500000000000000000", // 0.5 MYSTT
PriceHourMax: "180000000000000", // 0.0018 MYSTT
},
},
}
// Testnet3Definition defines parameters for testnet3 network
var Testnet3Definition = NetworkDefinition{
MysteriumAPIAddress: "https://testnet3-discovery.mysterium.network/api/v3",
AccessPolicyOracleAddress: "https://testnet3-trust.mysterium.network/api/v1/access-policies/",
BrokerAddresses: []string{"nats://testnet3-broker.mysterium.network"},
TransactorIdentity: "0x7d72db0c2db675ea5107caba80acac2154ca362b",
TransactorAddress: "https://testnet3-transactor.mysterium.network/api/v1",
Chain1: ChainDefinition{
RegistryAddress: "0xDFAB03C9fbDbef66dA105B88776B35bfd7743D39",
ChannelImplAddress: "0x1aDF7Ef34b9d48DCc8EBC47D989bfdE55933B6ea",
HermesID: "0x7119442C7E627438deb0ec59291e31378F88DD06",
ChainID: 5,
MystAddress: "0xf74a5ca65E4552CfF0f13b116113cCb493c580C5",
EtherClientRPC: []string{
"https://goerli1.mysterium.network/",
},
},
Chain2: ChainDefinition{
RegistryAddress: "0xDFAB03C9fbDbef66dA105B88776B35bfd7743D39",
ChannelImplAddress: "0xf8982Ba93D3d9182D095B892DE2A7963eF9807ee",
HermesID: "0x7119442C7E627438deb0ec59291e31378F88DD06",
ChainID: 80001,
MystAddress: "0xB923b52b60E247E34f9afE6B3fa5aCcBAea829E8",
EtherClientRPC: []string{
"https://mumbai1.mysterium.network/",
"https://mumbai2.mysterium.network/",
},
},
MMNAddress: "https://my.mysterium.network/",
MMNAPIAddress: "https://my.mysterium.network/api/v1",
PilvytisAddress: "https://testnet3-pilvytis.mysterium.network",
DNSMap: map[string][]string{
"testnet3-discovery.mysterium.network": {"167.233.11.60"},
"testnet3-trust.mysterium.network": {"167.233.11.60"},
"testnet3-broker.mysterium.network": {"167.233.11.60"},
"testnet3-transactor.mysterium.network": {"167.233.11.60"},
"testnet3-pilvytis.mysterium.network": {"167.233.11.60"},
},
DefaultChainID: 80001,
DefaultCurrency: "MYSTT",
LocationAddress: "https://testnet3-location.mysterium.network/api/v1/location",
Testnet3HermesURL: "https://testnet3-hermes.mysterium.network/api/v1",
Payments: Payments{
Consumer: Consumer{
DataLeewayMegabytes: 20,
PriceGiBMax: "500000000000000000", // 0.5 MYSTT
PriceHourMax: "180000000000000", // 0.0018 MYSTT
},
},
}
// LocalnetDefinition defines parameters for local network
// Expects discovery, broker and morqa services on localhost
var LocalnetDefinition = NetworkDefinition{
MysteriumAPIAddress: "http://localhost:8001/v1",
AccessPolicyOracleAddress: "https://localhost:8081/api/v1/access-policies/",
BrokerAddresses: []string{"localhost"},
MMNAddress: "http://localhost/",
MMNAPIAddress: "http://localhost/api/v1",
PilvytisAddress: "http://localhost:8002/api/v1",
DNSMap: map[string][]string{
"localhost": {"127.0.0.1"},
},
DefaultChainID: 1,
}
// DefaultNetwork defines default network values when no runtime parameters are given
var DefaultNetwork = MainnetDefinition
| 1 | 17,355 | We can remove this one. It's just a random one I got for free for testing. | mysteriumnetwork-node | go |
@@ -7,6 +7,7 @@
package blocksync
import (
+ "github.com/iotexproject/iotex-core/db"
"sync"
"github.com/iotexproject/iotex-core/actpool" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blocksync
import (
"sync"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/logger"
)
type bCheckinResult int
const (
bCheckinValid bCheckinResult = iota + 1
bCheckinLower
bCheckinExisting
bCheckinHigher
bCheckinSkipNil
)
// blockBuffer is used to keep in-coming block in order.
type blockBuffer struct {
mu sync.RWMutex
blocks map[uint64]*blockchain.Block
bc blockchain.Blockchain
ap actpool.ActPool
size uint64
}
// Flush tries to put given block into buffer and flush buffer into blockchain.
func (b *blockBuffer) Flush(blk *blockchain.Block) (bool, bCheckinResult) {
b.mu.Lock()
defer b.mu.Unlock()
if blk == nil {
return false, bCheckinSkipNil
}
confirmedHeight := b.bc.TipHeight()
// check
blkHeight := blk.Height()
if blkHeight <= confirmedHeight {
return false, bCheckinLower
}
if _, ok := b.blocks[blkHeight]; ok {
return false, bCheckinExisting
}
if blkHeight > confirmedHeight+b.size {
return false, bCheckinHigher
}
b.blocks[blkHeight] = blk
l := logger.With().
Uint64("recvHeight", blkHeight).
Uint64("confirmedHeight", confirmedHeight).
Str("source", "blockBuffer").
Logger()
var heightToSync uint64
for heightToSync = confirmedHeight + 1; heightToSync <= confirmedHeight+b.size; heightToSync++ {
blk, ok := b.blocks[heightToSync]
if !ok {
break
}
delete(b.blocks, heightToSync)
if err := commitBlock(b.bc, b.ap, blk); err != nil {
l.Error().Err(err).Uint64("syncHeight", heightToSync).
Msg("Failed to commit the block.")
// unable to commit, check reason
committedBlk, err := b.bc.GetBlockByHeight(heightToSync)
if err != nil || committedBlk.HashBlock() != blk.HashBlock() {
break
}
}
l.Info().Uint64("syncedHeight", heightToSync).Msg("Successfully committed block.")
}
// clean up on memory leak
if len(b.blocks) > int(b.size)*2 {
l.Warn().Int("bufferSize", len(b.blocks)).Msg("blockBuffer is leaking memory.")
for h := range b.blocks {
if h <= confirmedHeight {
delete(b.blocks, h)
}
}
}
return heightToSync > blkHeight, bCheckinValid
}
// GetBlocksIntervalsToSync returns groups of syncBlocksInterval are missing upto targetHeight.
func (b *blockBuffer) GetBlocksIntervalsToSync(targetHeight uint64) []syncBlocksInterval {
var (
start uint64
startSet bool
bi []syncBlocksInterval
)
b.mu.RLock()
defer b.mu.RUnlock()
confirmedHeight := b.bc.TipHeight()
if confirmedHeight >= targetHeight {
return bi
}
if targetHeight > confirmedHeight+b.size {
targetHeight = confirmedHeight + b.size
}
for h := confirmedHeight + 1; h <= targetHeight; h++ {
if _, ok := b.blocks[h]; !ok {
if !startSet {
start = h
startSet = true
}
continue
}
if startSet {
bi = append(bi, syncBlocksInterval{Start: start, End: h - 1})
startSet = false
}
}
// handle last block
if _, ok := b.blocks[targetHeight]; !ok {
if !startSet {
start = targetHeight
}
bi = append(bi, syncBlocksInterval{Start: start, End: targetHeight})
}
return bi
}
| 1 | 13,518 | File is not `goimports`-ed (from `goimports`) | iotexproject-iotex-core | go |
@@ -65,7 +65,11 @@ class AddFiles extends Component {
renderMyDeviceAcquirer = () => {
return (
- <div class="uppy-DashboardTab" role="presentation">
+ <div
+ class="uppy-DashboardTab"
+ role="presentation"
+ data-acquirer-id="Local"
+ >
<button
type="button"
class="uppy-DashboardTab-btn" | 1 | const { h, Component } = require('preact')
class AddFiles extends Component {
triggerFileInputClick = () => {
this.fileInput.click()
}
onFileInputChange = (event) => {
this.props.handleInputChange(event)
// We clear the input after a file is selected, because otherwise
// change event is not fired in Chrome and Safari when a file
// with the same name is selected.
// ___Why not use value="" on <input/> instead?
// Because if we use that method of clearing the input,
// Chrome will not trigger change if we drop the same file twice (Issue #768).
event.target.value = null
}
renderPoweredByUppy () {
const uppyBranding = (
<span>
<svg aria-hidden="true" focusable="false" class="uppy-c-icon uppy-Dashboard-poweredByIcon" width="11" height="11" viewBox="0 0 11 11">
<path d="M7.365 10.5l-.01-4.045h2.612L5.5.806l-4.467 5.65h2.604l.01 4.044h3.718z" fill-rule="evenodd" />
</svg>
<span class="uppy-Dashboard-poweredByUppy">Uppy</span>
</span>
)
// Support both the old word-order-insensitive string `poweredBy` and the new word-order-sensitive string `poweredBy2`
const linkText = this.props.i18nArray('poweredBy2', {
backwardsCompat: this.props.i18n('poweredBy'),
uppy: uppyBranding
})
return (
<a
tabindex="-1"
href="https://uppy.io"
rel="noreferrer noopener"
target="_blank"
class="uppy-Dashboard-poweredBy"
>
{linkText}
</a>
)
}
renderHiddenFileInput = () => {
return (
<input
class="uppy-Dashboard-input"
hidden
aria-hidden="true"
tabindex={-1}
type="file"
name="files[]"
multiple={this.props.maxNumberOfFiles !== 1}
onchange={this.onFileInputChange}
accept={this.props.allowedFileTypes}
ref={(ref) => { this.fileInput = ref }}
/>
)
}
renderMyDeviceAcquirer = () => {
return (
<div class="uppy-DashboardTab" role="presentation">
<button
type="button"
class="uppy-DashboardTab-btn"
role="tab"
tabindex={0}
data-uppy-super-focusable
onclick={this.triggerFileInputClick}
>
<svg aria-hidden="true" focusable="false" width="32" height="32" viewBox="0 0 32 32">
<g fill="none" fill-rule="evenodd">
<rect width="32" height="32" rx="16" fill="#2275D7" />
<path d="M21.973 21.152H9.863l-1.108-5.087h14.464l-1.246 5.087zM9.935 11.37h3.958l.886 1.444a.673.673 0 0 0 .585.316h6.506v1.37H9.935v-3.13zm14.898 3.44a.793.793 0 0 0-.616-.31h-.978v-2.126c0-.379-.275-.613-.653-.613H15.75l-.886-1.445a.673.673 0 0 0-.585-.316H9.232c-.378 0-.667.209-.667.587V14.5h-.782a.793.793 0 0 0-.61.303.795.795 0 0 0-.155.663l1.45 6.633c.078.36.396.618.764.618h13.354c.36 0 .674-.246.76-.595l1.631-6.636a.795.795 0 0 0-.144-.675z" fill="#FFF" />
</g>
</svg>
<div class="uppy-DashboardTab-name">{this.props.i18n('myDevice')}</div>
</button>
</div>
)
}
renderDropPasteBrowseTagline = () => {
const numberOfAcquirers = this.props.acquirers.length
const browse =
<button
type="button"
class="uppy-u-reset uppy-Dashboard-browse"
onclick={this.triggerFileInputClick}
data-uppy-super-focusable={numberOfAcquirers === 0}
>
{this.props.i18n('browse')}
</button>
return (
<div class="uppy-Dashboard-AddFiles-title">
{
numberOfAcquirers > 0
? this.props.i18nArray('dropPasteImport', { browse })
: this.props.i18nArray('dropPaste', { browse })
}
</div>
)
}
renderAcquirer = (acquirer) => {
return (
<div class="uppy-DashboardTab" role="presentation">
<button
type="button"
class="uppy-DashboardTab-btn"
role="tab"
tabindex={0}
aria-controls={`uppy-DashboardContent-panel--${acquirer.id}`}
aria-selected={this.props.activePickerPanel.id === acquirer.id}
data-uppy-super-focusable
onclick={() => this.props.showPanel(acquirer.id)}
>
{acquirer.icon()}
<div class="uppy-DashboardTab-name">{acquirer.name}</div>
</button>
</div>
)
}
renderAcquirers = (acquirers) => {
// Group last two buttons, so we don’t end up with
// just one button on a new line
const acquirersWithoutLastTwo = [...acquirers]
const lastTwoAcquirers = acquirersWithoutLastTwo.splice(acquirers.length - 2, acquirers.length)
return (
<div class="uppy-Dashboard-AddFiles-list" role="tablist">
{this.renderMyDeviceAcquirer()}
{acquirersWithoutLastTwo.map((acquirer) => this.renderAcquirer(acquirer))}
<span role="presentation" style="white-space: nowrap;">
{lastTwoAcquirers.map((acquirer) => this.renderAcquirer(acquirer))}
</span>
</div>
)
}
render () {
return (
<div class="uppy-Dashboard-AddFiles">
{this.renderHiddenFileInput()}
{this.renderDropPasteBrowseTagline()}
{this.props.acquirers.length > 0 && this.renderAcquirers(this.props.acquirers)}
<div class="uppy-Dashboard-AddFiles-info">
{this.props.note && <div class="uppy-Dashboard-note">{this.props.note}</div>}
{this.props.proudlyDisplayPoweredByUppy && this.renderPoweredByUppy(this.props)}
</div>
</div>
)
}
}
module.exports = AddFiles
| 1 | 13,167 | (maybe this should be MyDevice or just removed entirely?) | transloadit-uppy | js |
@@ -337,6 +337,10 @@ func (payloadHandler *payloadRequestHandler) handleUnrecognizedTask(task *ecsacs
TaskARN: *task.Arn,
Status: apitaskstatus.TaskStopped,
Reason: UnrecognizedTaskError{err}.Error(),
+ // The real task cannot be extracted from payload message, so we send an empty task.
+ // This is necessary because the task handler will not send an event whose
+ // Task is nil.
+ Task: &apitask.Task{},
}
payloadHandler.taskHandler.AddStateChangeEvent(taskEvent, payloadHandler.ecsClient) | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package handler
import (
"fmt"
"context"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/amazon-ecs-agent/agent/api"
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/eventhandler"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/wsclient"
"github.com/aws/aws-sdk-go/aws"
"github.com/cihub/seelog"
)
// payloadRequestHandler represents the payload operation for the ACS client
type payloadRequestHandler struct {
// messageBuffer is used to process PayloadMessages received from the server
messageBuffer chan *ecsacs.PayloadMessage
// ackRequest is used to send acks to the backend
ackRequest chan string
ctx context.Context
taskEngine engine.TaskEngine
ecsClient api.ECSClient
saver statemanager.Saver
taskHandler *eventhandler.TaskHandler
// cancel is used to stop go routines started by start() method
cancel context.CancelFunc
cluster string
containerInstanceArn string
acsClient wsclient.ClientServer
refreshHandler refreshCredentialsHandler
credentialsManager credentials.Manager
}
// newPayloadRequestHandler returns a new payloadRequestHandler object
func newPayloadRequestHandler(
ctx context.Context,
taskEngine engine.TaskEngine,
ecsClient api.ECSClient,
cluster string,
containerInstanceArn string,
acsClient wsclient.ClientServer,
saver statemanager.Saver,
refreshHandler refreshCredentialsHandler,
credentialsManager credentials.Manager,
taskHandler *eventhandler.TaskHandler) payloadRequestHandler {
// Create a cancelable context from the parent context
derivedContext, cancel := context.WithCancel(ctx)
return payloadRequestHandler{
messageBuffer: make(chan *ecsacs.PayloadMessage, payloadMessageBufferSize),
ackRequest: make(chan string, payloadMessageBufferSize),
taskEngine: taskEngine,
ecsClient: ecsClient,
saver: saver,
taskHandler: taskHandler,
ctx: derivedContext,
cancel: cancel,
cluster: cluster,
containerInstanceArn: containerInstanceArn,
acsClient: acsClient,
refreshHandler: refreshHandler,
credentialsManager: credentialsManager,
}
}
// handlerFunc returns the request handler function for the ecsacs.PayloadMessage type
func (payloadHandler *payloadRequestHandler) handlerFunc() func(payload *ecsacs.PayloadMessage) {
// return a function that just enqueues PayloadMessages into the message buffer
return func(payload *ecsacs.PayloadMessage) {
payloadHandler.messageBuffer <- payload
}
}
// start invokes go routines to:
// 1. handle messages in the payload message buffer
// 2. handle ack requests to be sent to ACS
func (payloadHandler *payloadRequestHandler) start() {
go payloadHandler.handleMessages()
go payloadHandler.sendAcks()
}
// stop cancels the context being used by the payload handler. This is used
// to stop the go routines started by 'start()'
func (payloadHandler *payloadRequestHandler) stop() {
payloadHandler.cancel()
}
// sendAcks sends ack requests to ACS
func (payloadHandler *payloadRequestHandler) sendAcks() {
for {
select {
case mid := <-payloadHandler.ackRequest:
payloadHandler.ackMessageId(mid)
case <-payloadHandler.ctx.Done():
return
}
}
}
// ackMessageId sends an AckRequest for a message id
func (payloadHandler *payloadRequestHandler) ackMessageId(messageID string) {
seelog.Debugf("Acking payload message id: %s", messageID)
err := payloadHandler.acsClient.MakeRequest(&ecsacs.AckRequest{
Cluster: aws.String(payloadHandler.cluster),
ContainerInstance: aws.String(payloadHandler.containerInstanceArn),
MessageId: aws.String(messageID),
})
if err != nil {
seelog.Warnf("Error 'ack'ing request with messageID: %s, error: %v", messageID, err)
}
}
// handleMessages processes payload messages in the payload message buffer in-order
func (payloadHandler *payloadRequestHandler) handleMessages() {
for {
select {
case payload := <-payloadHandler.messageBuffer:
payloadHandler.handleSingleMessage(payload)
case <-payloadHandler.ctx.Done():
return
}
}
}
// handleSingleMessage processes a single payload message. It adds tasks in the message to the task engine
// An error is returned if the message was not handled correctly. The error is being used only for testing
// today. In the future, it could be used for doing more interesting things.
func (payloadHandler *payloadRequestHandler) handleSingleMessage(payload *ecsacs.PayloadMessage) error {
if aws.StringValue(payload.MessageId) == "" {
seelog.Criticalf("Received a payload with no message id")
return fmt.Errorf("received a payload with no message id")
}
seelog.Debugf("Received payload message, message id: %s", aws.StringValue(payload.MessageId))
credentialsAcks, allTasksHandled := payloadHandler.addPayloadTasks(payload)
// save the state of tasks we know about after passing them to the task engine
err := payloadHandler.saver.Save()
if err != nil {
seelog.Errorf("Error saving state for payload message! err: %v, messageId: %s", err, aws.StringValue(payload.MessageId))
// Don't ack; maybe we can save it in the future.
return fmt.Errorf("error saving state for payload message, with messageId: %s", aws.StringValue(payload.MessageId))
}
if !allTasksHandled {
return fmt.Errorf("did not handle all tasks")
}
go func() {
// Throw the ack in async; it doesn't really matter all that much and this is blocking handling more tasks.
for _, credentialsAck := range credentialsAcks {
payloadHandler.refreshHandler.ackMessage(credentialsAck)
}
payloadHandler.ackRequest <- *payload.MessageId
}()
return nil
}
// addPayloadTasks does validation on each task and, for all valid ones, adds
// it to the task engine. It returns a bool indicating if it could add every
// task to the taskEngine and a slice of credential ack requests
func (payloadHandler *payloadRequestHandler) addPayloadTasks(payload *ecsacs.PayloadMessage) ([]*ecsacs.IAMRoleCredentialsAckRequest, bool) {
// verify thatwe were able to work with all tasks in this payload so we know whether to ack the whole thing or not
allTasksOK := true
validTasks := make([]*apitask.Task, 0, len(payload.Tasks))
for _, task := range payload.Tasks {
if task == nil {
seelog.Criticalf("Received nil task for messageId: %s", aws.StringValue(payload.MessageId))
allTasksOK = false
continue
}
apiTask, err := apitask.TaskFromACS(task, payload)
if err != nil {
payloadHandler.handleUnrecognizedTask(task, err, payload)
allTasksOK = false
continue
}
if task.RoleCredentials != nil {
// The payload from ACS for the task has credentials for the
// task. Add those to the credentials manager and set the
// credentials id for the task as well
taskIAMRoleCredentials := credentials.IAMRoleCredentialsFromACS(task.RoleCredentials, credentials.ApplicationRoleType)
err = payloadHandler.credentialsManager.SetTaskCredentials(
credentials.TaskIAMRoleCredentials{
ARN: aws.StringValue(task.Arn),
IAMRoleCredentials: taskIAMRoleCredentials,
})
if err != nil {
payloadHandler.handleUnrecognizedTask(task, err, payload)
allTasksOK = false
continue
}
apiTask.SetCredentialsID(taskIAMRoleCredentials.CredentialsID)
}
// Adding the eni information to the task struct
if len(task.ElasticNetworkInterfaces) != 0 {
eni, err := apieni.ENIFromACS(task.ElasticNetworkInterfaces)
if err != nil {
payloadHandler.handleUnrecognizedTask(task, err, payload)
allTasksOK = false
continue
}
apiTask.SetTaskENI(eni)
}
if task.ExecutionRoleCredentials != nil {
// The payload message contains execution credentials for the task.
// Add the credentials to the credentials manager and set the
// task executionCredentials id.
taskExecutionIAMRoleCredentials := credentials.IAMRoleCredentialsFromACS(task.ExecutionRoleCredentials, credentials.ExecutionRoleType)
err = payloadHandler.credentialsManager.SetTaskCredentials(
credentials.TaskIAMRoleCredentials{
ARN: aws.StringValue(task.Arn),
IAMRoleCredentials: taskExecutionIAMRoleCredentials,
})
if err != nil {
payloadHandler.handleUnrecognizedTask(task, err, payload)
allTasksOK = false
continue
}
apiTask.SetExecutionRoleCredentialsID(taskExecutionIAMRoleCredentials.CredentialsID)
}
validTasks = append(validTasks, apiTask)
}
// Add 'stop' transitions first to allow seqnum ordering to work out
// Because a 'start' sequence number should only be proceeded if all 'stop's
// of the same sequence number have completed, the 'start' events need to be
// added after the 'stop' events are there to block them.
stoppedTasksCredentialsAcks, stoppedTasksAddedOK := payloadHandler.addTasks(payload, validTasks, isTaskStatusNotStopped)
newTasksCredentialsAcks, newTasksAddedOK := payloadHandler.addTasks(payload, validTasks, isTaskStatusStopped)
if !stoppedTasksAddedOK || !newTasksAddedOK {
allTasksOK = false
}
// Construct a slice with credentials acks from all tasks
var credentialsAcks []*ecsacs.IAMRoleCredentialsAckRequest
credentialsAcks = append(stoppedTasksCredentialsAcks, newTasksCredentialsAcks...)
return credentialsAcks, allTasksOK
}
// addTasks adds the tasks to the task engine based on the skipAddTask condition
// This is used to add non-stopped tasks before adding stopped tasks
func (payloadHandler *payloadRequestHandler) addTasks(payload *ecsacs.PayloadMessage, tasks []*apitask.Task, skipAddTask skipAddTaskComparatorFunc) ([]*ecsacs.IAMRoleCredentialsAckRequest, bool) {
allTasksOK := true
var credentialsAcks []*ecsacs.IAMRoleCredentialsAckRequest
for _, task := range tasks {
if skipAddTask(task.GetDesiredStatus()) {
continue
}
payloadHandler.taskEngine.AddTask(task)
ackCredentials := func(id string, description string) {
ack, err := payloadHandler.ackCredentials(payload.MessageId, id)
if err != nil {
allTasksOK = false
seelog.Errorf("Failed to acknowledge %s credentials for task: %s, err: %v", description, task.String(), err)
return
}
credentialsAcks = append(credentialsAcks, ack)
}
// Generate an ack request for the credentials in the task, if the
// task is associated with an IAM role or the execution role
taskCredentialsID := task.GetCredentialsID()
if taskCredentialsID != "" {
ackCredentials(taskCredentialsID, "task iam role")
}
taskExecutionCredentialsID := task.GetExecutionCredentialsID()
if taskExecutionCredentialsID != "" {
ackCredentials(taskExecutionCredentialsID, "task execution role")
}
}
return credentialsAcks, allTasksOK
}
func (payloadHandler *payloadRequestHandler) ackCredentials(messageID *string, credentialsID string) (*ecsacs.IAMRoleCredentialsAckRequest, error) {
creds, ok := payloadHandler.credentialsManager.GetTaskCredentials(credentialsID)
if !ok {
return nil, fmt.Errorf("credentials could not be retrieved")
} else {
return &ecsacs.IAMRoleCredentialsAckRequest{
MessageId: messageID,
Expiration: aws.String(creds.IAMRoleCredentials.Expiration),
CredentialsId: aws.String(creds.IAMRoleCredentials.CredentialsID),
}, nil
}
}
// skipAddTaskComparatorFunc defines the function pointer that accepts task status
// and returns the boolean comparison result
type skipAddTaskComparatorFunc func(apitaskstatus.TaskStatus) bool
// isTaskStatusStopped returns true if the task status == STOPPTED
func isTaskStatusStopped(status apitaskstatus.TaskStatus) bool {
return status == apitaskstatus.TaskStopped
}
// isTaskStatusNotStopped returns true if the task status != STOPPTED
func isTaskStatusNotStopped(status apitaskstatus.TaskStatus) bool {
return status != apitaskstatus.TaskStopped
}
// handleUnrecognizedTask handles unrecognized tasks by sending 'stopped' with
// a suitable reason to the backend
func (payloadHandler *payloadRequestHandler) handleUnrecognizedTask(task *ecsacs.Task, err error, payload *ecsacs.PayloadMessage) {
seelog.Warnf("Received unexpected acs message, messageID: %s, task: %v, err: %v",
aws.StringValue(payload.MessageId), aws.StringValue(task.Arn), err)
if aws.StringValue(task.Arn) == "" {
seelog.Criticalf("Received task with no arn, messageId: %s", aws.StringValue(payload.MessageId))
return
}
// Only need to stop the task; it brings down the containers too.
taskEvent := api.TaskStateChange{
TaskARN: *task.Arn,
Status: apitaskstatus.TaskStopped,
Reason: UnrecognizedTaskError{err}.Error(),
}
payloadHandler.taskHandler.AddStateChangeEvent(taskEvent, payloadHandler.ecsClient)
}
// clearAcks drains the ack request channel
func (payloadHandler *payloadRequestHandler) clearAcks() {
for {
select {
case <-payloadHandler.ackRequest:
default:
return
}
}
}
| 1 | 20,456 | Can you do a nil check here, as the task isn't always nil here? | aws-amazon-ecs-agent | go |
@@ -30,9 +30,12 @@ import Layout from 'GoogleComponents/layout/layout';
*/
import { isDataZeroForReporting, getTopPagesReportDataDefaults } from '../util';
-const { __ } = wp.i18n;
-const { map } = lodash;
-const { Component } = wp.element;
+/**
+ * WordPress dependencies
+ */
+import { __ } from '@wordpress/i18n';
+import { map } from 'lodash';
+import { Component } from '@wordpress/element';
class AnalyticsDashboardWidgetPopularPagesTable extends Component {
static renderLayout( component ) { | 1 | /**
* AnalyticsDashboardWidgetPopularPagesTable component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import withData from 'GoogleComponents/higherorder/withdata';
import { TYPE_MODULES } from 'GoogleComponents/data';
import { getTimeInSeconds, numberFormat } from 'GoogleUtil';
import { getDataTableFromData, TableOverflowContainer } from 'GoogleComponents/data-table';
import PreviewTable from 'GoogleComponents/preview-table';
import Layout from 'GoogleComponents/layout/layout';
/**
* Internal dependencies
*/
import { isDataZeroForReporting, getTopPagesReportDataDefaults } from '../util';
const { __ } = wp.i18n;
const { map } = lodash;
const { Component } = wp.element;
class AnalyticsDashboardWidgetPopularPagesTable extends Component {
static renderLayout( component ) {
return (
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-4-tablet
">
<Layout
className="googlesitekit-popular-content"
footer
footerCtaLabel={ __( 'Analytics', 'google-site-kit' ) }
footerCtaLink={ googlesitekit.modules.analytics.homepage }
fill
>
{ component }
</Layout>
</div>
);
}
render() {
const { data } = this.props;
const { siteURL: siteURL } = googlesitekit.admin;
if ( ! data || ! data.length ) {
return null;
}
const headers = [
{
title: __( 'Most popular content', 'google-site-kit' ),
},
{
title: __( 'Views', 'google-site-kit' ),
},
];
const links = [];
const dataMapped = map( data[ 0 ].data.rows, ( row, i ) => {
const [ title, url ] = row.dimensions;
links[ i ] = siteURL + url;
return [
title,
numberFormat( row.metrics[ 0 ].values[ 0 ] ),
];
} );
const options = {
hideHeader: false,
chartsEnabled: false,
links,
showURLs: true,
};
const dataTable = getDataTableFromData( dataMapped, headers, options );
return (
AnalyticsDashboardWidgetPopularPagesTable.renderLayout(
<TableOverflowContainer>
{ dataTable }
</TableOverflowContainer>
)
);
}
}
export default withData(
AnalyticsDashboardWidgetPopularPagesTable,
[
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'report',
data: getTopPagesReportDataDefaults(),
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: [ 'Single', 'Dashboard' ],
},
],
AnalyticsDashboardWidgetPopularPagesTable.renderLayout(
<PreviewTable padding />
),
{
inGrid: true,
createGrid: true,
},
isDataZeroForReporting
);
| 1 | 24,748 | `lodash` shouldn't be grouped under WordPress dependencies | google-site-kit-wp | js |
@@ -76,7 +76,11 @@ public class DiscoveryConfiguration {
"enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303",
"enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303",
"enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313",
- "enode://b5948a2d3e9d486c4d75bf32713221c2bd6cf86463302339299bd227dc2e276cd5a1c7ca4f43a0e9122fe9af884efed563bd2a1fd28661f3b5f5ad7bf1de5949@18.218.250.66:30303")
+ "enode://b5948a2d3e9d486c4d75bf32713221c2bd6cf86463302339299bd227dc2e276cd5a1c7ca4f43a0e9122fe9af884efed563bd2a1fd28661f3b5f5ad7bf1de5949@18.218.250.66:30303",
+ "enode://c1f8b7c2ac4453271fa07d8e9ecf9a2e8285aa0bd0c07df0131f47153306b0736fd3db8924e7a9bf0bed6b1d8d4f87362a71b033dc7c64547728d953e43e59b2@52.64.155.147:30303",
+ "enode://f4a9c6ee28586009fb5a96c8af13a58ed6d8315a9eee4772212c1d4d9cebe5a8b8a78ea4434f318726317d04a3f531a1ef0420cf9752605a562cfe858c46e263@213.186.16.82:30303",
+ // Ethereum Foundation bootnode
+ "enode://a61215641fb8714a373c80edbfa0ea8878243193f57c96eeb44d0bc019ef295abd4e044fd619bfc4c59731a73fb79afe84e9ab6da0c743ceb479cbb6d263fa91@3.11.147.67:30303")
.map(EnodeURL::fromString)
.collect(toList()));
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.config;
import static java.util.stream.Collectors.toList;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.util.NetworkUtility;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class DiscoveryConfiguration {
public static List<EnodeURL> MAINNET_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
// Ethereum Foundation Bootnodes
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // Singapore AWS
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // Virginia AWS
"enode://ca6de62fce278f96aea6ec5a2daadb877e51651247cb96ee310a318def462913b653963c155a0ef6c7d50048bba6e6cea881130857413d9f50a621546b590758@34.255.23.113:30303", // Ireland AWS
"enode://279944d8dcd428dffaa7436f25ca0ca43ae19e7bcf94a8fb7d1641651f92d121e972ac2e8f381414b80cc8e5555811c2ec6e1a99bb009b3f53c4c69923e11bd8@35.158.244.151:30303", // Frankfurt AWS
"enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303", // Australia Azure
"enode://103858bdb88756c71f15e9b5e09b56dc1be52f0a5021d46301dbbfb7e130029cc9d0d6f73f693bc29b665770fff7da4d34f3c6379fe12721b5d7a0bcb5ca1fc1@191.234.162.198:30303", // Brazil Azure
"enode://715171f50508aba88aecd1250af392a45a330af91d7b90701c436b618c86aaa1589c9184561907bebbb56439b8f8787bc01f49a7c77276c58c1b09822d75e8e8@52.231.165.108:30303", // South Korea Azure
"enode://5d6d7cd20d6da4bb83a1d28cadb5d409b64edf314c0335df658c1a54e32c7c4a7ab7823d57c39b6a757556e68ff1df17c748b698544a55cb488b52479a92b60f@104.42.217.25:30303", // West US Azure
// Old Ethereum Foundation Bootnodes
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", // IE
"enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303", // US-WEST
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303", // BR
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303", // AU
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", // SG
// Ethereum Foundation Aleth Bootnodes
"enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303" // DE
)
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> RINKEBY_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
"enode://a24ac7c5484ef4ed0c5eb2d36620ba4e4aa13b8c84684e1b4aab0cebea2ae45cb4d375b77eab56516d34bfbd3c1a833fc51296ff084b770b94fb9028c4d25ccf@52.169.42.101:30303",
"enode://343149e4feefa15d882d9fe4ac7d88f885bd05ebb735e547f12e12080a9fa07c8014ca6fd7f373123488102fe5e34111f8509cf0b7de3f5b44339c9f25e87cb8@52.3.158.184:30303",
"enode://b6b28890b006743680c52e64e0d16db57f28124885595fa03a562be1d2bf0f3a1da297d56b13da25fb992888fd556d4c1a27b1f39d531bde7de1921c90061cc6@159.89.28.211:30303")
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> ROPSTEN_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
"enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303",
"enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303",
"enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303",
"enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303")
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> GOERLI_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
"enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303",
"enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303",
"enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313",
"enode://b5948a2d3e9d486c4d75bf32713221c2bd6cf86463302339299bd227dc2e276cd5a1c7ca4f43a0e9122fe9af884efed563bd2a1fd28661f3b5f5ad7bf1de5949@18.218.250.66:30303")
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> CLASSIC_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
"enode://158ac5a4817265d0d8b977660b3dbe9abee5694ed212f7091cbf784ddf47623ed015e1cb54594d10c1c46118747ddabe86ebf569cf24ae91f2daa0f1adaae390@159.203.56.33:30303",
"enode://efd48ad0879eeb7f9cb5e50f33f7bc21e805a72e90361f145baaa22dd75d111e7cd9c93f1b7060dcb30aa1b3e620269336dbf32339fea4c18925a4c15fe642df@18.205.66.229:30303",
"enode://5fbfb426fbb46f8b8c1bd3dd140f5b511da558cd37d60844b525909ab82e13a25ee722293c829e52cb65c2305b1637fa9a2ea4d6634a224d5f400bfe244ac0de@162.243.55.45:30303",
"enode://6dd3ac8147fa82e46837ec8c3223d69ac24bcdbab04b036a3705c14f3a02e968f7f1adfcdb002aacec2db46e625c04bf8b5a1f85bb2d40a479b3cc9d45a444af@104.237.131.102:30303",
"enode://b9e893ea9cb4537f4fed154233005ae61b441cd0ecd980136138c304fefac194c25a16b73dac05fc66a4198d0c15dd0f33af99b411882c68a019dfa6bb703b9d@18.130.93.66:30303",
"enode://3fe9705a02487baea45c1ffebfa4d84819f5f1e68a0dbc18031553242a6a08e39499b61e361a52c2a92f9553efd63763f6fdd34692be0d4ba6823bb2fc346009@178.62.238.75:30303",
"enode://d50facc65e46bda6ff594b6e95491efa16e067de41ae96571d9f3cb853d538c44864496fa5e4df10115f02bbbaf47853f932e110a44c89227da7c30e96840596@188.166.163.187:30303",
"enode://a0d5c589dc02d008fe4237da9877a5f1daedee0227ab612677eabe323520f003eb5e311af335de9f7964c2092bbc2b3b7ab1cce5a074d8346959f0868b4e366e@46.101.78.44:30303",
"enode://c071d96b0c0f13006feae3977fb1b3c2f62caedf643df9a3655bc1b60f777f05e69a4e58bf3547bb299210092764c56df1e08380e91265baa845dca8bc0a71da@68.183.99.5:30303",
"enode://83b33409349ffa25e150555f7b4f8deebc68f3d34d782129dc3c8ba07b880c209310a4191e1725f2f6bef59bce9452d821111eaa786deab08a7e6551fca41f4f@206.189.68.191:30303",
"enode://0daae2a30f2c73b0b257746587136efb8e3479496f7ea1e943eeb9a663b72dd04582f699f7010ee02c57fc45d1f09568c20a9050ff937f9139e2973ddd98b87b@159.89.169.103:30303",
"enode://50808461dd73b3d70537e4c1e5fafd1132b3a90f998399af9205f8889987d62096d4e853813562dd43e7270a71c9d9d4e4dd73a534fdb22fbac98c389c1a7362@178.128.55.119:30303",
"enode://5cd218959f8263bc3721d7789070806b0adff1a0ed3f95ec886fb469f9362c7507e3b32b256550b9a7964a23a938e8d42d45a0c34b332bfebc54b29081e83b93@35.187.57.94:30303",
"enode://66498ac935f3f54d873de4719bf2d6d61e0c74dd173b547531325bcef331480f9bedece91099810971c8567eeb1ae9f6954b013c47c6dc51355bbbbae65a8c16@54.148.165.1:30303",
"enode://73e74ce7426a17aa2d8b5bb64d796ec7dc4dcee2af9bbdd4434394d1e4e52e650b9e39202435fca29ce65c242fd6e59b93ed2cf37f04b645fb23e306273816ad@54.148.165.1:30304",
"enode://8809dadb6e6145bbbce832f8e3cb54f23c84db1c4957dac04be26157a16eceb91bb2567e3a2eb794bf8e7df937fd2de46d4214416d3c199f05d3b6fa76a5ef6d@51.141.115.3:30303")
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> KOTTI_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
// Authority Nodes
"enode://06333009fc9ef3c9e174768e495722a7f98fe7afd4660542e983005f85e556028410fd03278944f44cfe5437b1750b5e6bd1738f700fe7da3626d52010d2954c@51.141.15.254:30303", // @q9f Parity Authority
"enode://93c94e999be5dd854c5d82a7cf5c14822973b5d9badb56ad4974586ec4d4f1995c815af795c20bb6e0a6226d3ee55808435c4dc89baf94ee581141b064d19dfc@80.187.116.161:25720",
"enode://ae8658da8d255d1992c3ec6e62e11d6e1c5899aa1566504bc1ff96a0c9c8bd44838372be643342553817f5cc7d78f1c83a8093dee13d77b3b0a583c050c81940@18.232.185.151:30303",
"enode://b477ca6d507a3f57070783eb62ba838847635f8b1a0cbffb8b7f8173f5894cf550f0225a5c279341e2d862a606e778b57180a4f1db3db78c51eadcfa4fdc6963@40.68.240.160:30303",
// @q9f Bootnodes
"enode://4956f6924335c951cb70cbc169a85c081f6ff0b374aa2815453b8a3132b49613f38a1a6b8e103f878dbec86364f60091e92a376d7cd3aca9d82d2f2554794e63@51.15.97.240:41235", // @q9f Core-Geth Ginan
"enode://6c9a052c01bb9995fa53bebfcdbc17733fe90708270d0e6d8e38dc57b32e1dbe8c287590b634ee9753b94ba302f411c96519c7fa07df0df6a6848149d819b2c5@51.15.70.7:41235", // @q9f Core-Geth Polis
"enode://95a7302fd8f35d9ad716a591b90dfe839dbf2d3d6a6737ef39e07899f844ad82f1659dd6212492922dd36705fb0a1e984c1d5d5c42069d5bd329388831e820c1@51.15.97.240:45678", // @q9f Parity Ginan
"enode://8c5c4dec9a0728c7058d3c29698bae888adc244e443cebc21f3d708a20751511acbf98a52b5e5ea472f8715c658913e8084123461fd187a4980a0600420b0791@51.15.70.7:45678", // @q9f Parity Polis
"enode://efd7391a3bed73ad74ae5760319bb48f9c9f1983ff22964422688cdb426c5d681004ece26c47121396653cf9bafe7104aa4ecff70e24cc5b11fd76be8e5afce0@51.158.191.43:45678" // @q9f Besu Mizar
)
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> MORDOR_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
"enode://642cf9650dd8869d42525dbf6858012e3b4d64f475e733847ab6f7742341a4397414865d953874e8f5ed91b0e4e1c533dee14ad1d6bb276a5459b2471460ff0d@157.230.152.87:30303", // @meowbits Mordor
"enode://651b484b652c07c72adebfaaf8bc2bd95b420b16952ef3de76a9c00ef63f07cca02a20bd2363426f9e6fe372cef96a42b0fec3c747d118f79fd5e02f2a4ebd4e@51.158.190.99:45678", // @q9f Core-Geth Lyrae
"enode://859ed8c19ea04eaea41f1cf17c8d2710e2e0affb97328c8392a79f1764118edf2344f1941299f0d676772fa6054447e6f9b3af96444e350b417442bfd7cc832b@34.68.243.226:30303",
"enode://9b1bf9613d859ac2071d88509ab40a111b75c1cfc51f4ad78a1fdbb429ff2405de0dc5ea8ae75e6ac88e03e51a465f0b27b517e78517f7220ae163a2e0692991@51.158.190.99:30426", // @q9f Parity Lyrae
"enode://534d18fd46c5cd5ba48a68250c47cea27a1376869755ed631c94b91386328039eb607cf10dd8d0aa173f5ec21e3fb45c5d7a7aa904f97bc2557e9cb4ccc703f1@51.158.190.99:30303", // @q9f Besu Lyrae
"enode://0d70715514674189792de4ad294b658c96d0ec40fe517fbe9cb7949d3792f25f82357ec77d1bd8bed6ec719ca0c1d608bb34cc702bf3d4bb4507f7280f835452@154.5.137.161:61410",
"enode://f50f52b5fe18fd281748905bf5dad5439471f32cc02d99fecf960a983c1f4eba701ffca96afd2f2a68dcf6f97c5d02b566bafce1f361b51717e1a03c1dd9a836@157.230.42.102:30303",
"enode://111bd28d5b2c1378d748383fd83ff59572967c317c3063a9f475a26ad3f1517642a164338fb5268d4e32ea1cc48e663bd627dec572f1d201c7198518e5a506b1@88.99.216.30:45834",
"enode://07fa944c83597d5e935a2abe6194ed40fc7239e86111c971a43537a33d0184d1cd1b3f1291b8dd3bcfaebfbb802de77c843465a00065b39120c338fdd877ca4a@35.238.126.60:51240",
"enode://4ca79bbff7491fed82221259e3f27492e27b95b600594e2f8d5f1fa011123ea267e71873a0db3993e5109845d519d8b849ba2c7e4b48b09bedebb99e1c2ce304@35.238.132.8:30303",
"enode://06fdbeb591d26f53b2e7250025fe955ca013431ded930920cf1e3cd1f0c920e9a5e727949d209bc25a07288327b525279b11c5551315c50ff0db483e69fc159b@34.218.225.178:32000",
"enode://03b133f731049e3f7be827339c3759be92778c05e54a1847d178c0fdb56fa168aa1e7e61fc77791a7afdd0328a00318f73c01212eb3f3bbe919f5ce8f5b4a314@192.227.105.4:32000",
"enode://2592745efd35b4be443b8ee25fd2099de132e037951f9f6d3e8805e0a78f213537f71264b973f1a83a57372f57bbe6acac8d6ae678f39393221c045ccbe3b18c@51.15.116.226:30304",
"enode://c0afb552bfe932c72598caa245aef82d55c23555622c5ab946d4e49bb0ab694e46086dcff6793a606527f323ef94d0eb499d01ceb26aefb6fa3f8977105d7dd8@157.230.152.87:52138",
"enode://5a1399e6ba3268721dd7656530cd81230dbeb950570c6e3ec2a45effc50c032f66633c5eaaa1a49c51ba1849db37a7bef6e402779ad12dc9943f117e058d7760@35.225.124.17:39306",
"enode://15b6ae4e9e18772f297c90d83645b0fbdb56667ce2d747d6d575b21d7b60c2d3cd52b11dec24e418438caf80ddc433232b3685320ed5d0e768e3972596385bfc@51.158.191.43:41235", // @q9f Core-Geth Mizar
"enode://5a1399e6ba3268721dd7656530cd81230dbeb950570c6e3ec2a45effc50c032f66633c5eaaa1a49c51ba1849db37a7bef6e402779ad12dc9943f117e058d7760@34.69.121.227:30303",
"enode://f840b007500f50c98ea6f9c9e56dabf4690bbbbb7036d43682c531204341aff8315013547e5bee54117eb22bd3603585ae6bf713d9fa710659533fcab65d5b84@34.69.50.155:42078",
"enode://b45f008ab8ad73966d0c8c0c923c50f47c0ae50c37a9ea05cc28b00cb94802145a4158412a526fdadd7e539db5eaab72f06a9046a34576ecf5a68efc41ba9d01@34.68.40.145:30303",
"enode://1f378945c9b2eeb292d910f461911fd99520a23beda1bc5c8aea12be09e249f8d92615aa3d4d75c938004db5281dabad4a9cf7a0f07ec7c1fc8e7721addc7c85@34.205.41.164:40218",
"enode://2b69a3926f36a7748c9021c34050be5e0b64346225e477fe7377070f6289bd363b2be73a06010fd516e6ea3ee90778dd0399bc007bb1281923a79374f842675a@51.15.116.226:30303",
"enode://a59e33ccd2b3e52d578f1fbd70c6f9babda2650f0760d6ff3b37742fdcdfdb3defba5d56d315b40c46b70198c7621e63ffa3f987389c7118634b0fefbbdfa7fd@51.158.191.43:38556", // @q9f Parity Mizar
"enode://f840b007500f50c98ea6f9c9e56dabf4690bbbbb7036d43682c531204341aff8315013547e5bee54117eb22bd3603585ae6bf713d9fa710659533fcab65d5b84@35.238.101.58:30303",
"enode://07fa944c83597d5e935a2abe6194ed40fc7239e86111c971a43537a33d0184d1cd1b3f1291b8dd3bcfaebfbb802de77c843465a00065b39120c338fdd877ca4a@35.238.126.60:30000",
"enode://617a2009783a09085ed0d5d5e7250e2e3c142f73448bf28200284bf4825c5926a80f3e9fb481edf38b89ade2aa0ad5a2f14cc935f3150e36e648eddda674fd70@35.225.5.185:51320")
.map(EnodeURL::fromString)
.collect(toList()));
public static List<EnodeURL> YOLO_V1_BOOTSTRAP_NODES =
Collections.unmodifiableList(
Stream.of(
"enode://9e1096aa59862a6f164994cb5cb16f5124d6c992cdbf4535ff7dea43ea1512afe5448dca9df1b7ab0726129603f1a3336b631e4d7a1a44c94daddd03241587f9@52.56.120.77:30303")
.map(EnodeURL::fromString)
.collect(toList()));
private boolean active = true;
private String bindHost = NetworkUtility.INADDR_ANY;
private int bindPort = 30303;
private String advertisedHost = "127.0.0.1";
private int bucketSize = 16;
private List<EnodeURL> bootnodes = new ArrayList<>();
public static DiscoveryConfiguration create() {
return new DiscoveryConfiguration();
}
public static void assertValidBootnodes(final List<EnodeURL> bootnodes) {
final List<EnodeURL> invalidEnodes =
bootnodes.stream().filter(e -> !e.isRunningDiscovery()).collect(Collectors.toList());
if (invalidEnodes.size() > 0) {
String invalidBootnodes =
invalidEnodes.stream().map(EnodeURL::toString).collect(Collectors.joining(","));
String errorMsg =
"Bootnodes must have discovery enabled. Invalid bootnodes: " + invalidBootnodes + ".";
throw new IllegalArgumentException(errorMsg);
}
}
public String getBindHost() {
return bindHost;
}
public DiscoveryConfiguration setBindHost(final String bindHost) {
this.bindHost = bindHost;
return this;
}
public int getBindPort() {
return bindPort;
}
public DiscoveryConfiguration setBindPort(final int bindPort) {
this.bindPort = bindPort;
return this;
}
public boolean isActive() {
return active;
}
public DiscoveryConfiguration setActive(final boolean active) {
this.active = active;
return this;
}
public List<EnodeURL> getBootnodes() {
return bootnodes;
}
public DiscoveryConfiguration setBootnodes(final List<EnodeURL> bootnodes) {
assertValidBootnodes(bootnodes);
this.bootnodes = bootnodes;
return this;
}
public String getAdvertisedHost() {
return advertisedHost;
}
public DiscoveryConfiguration setAdvertisedHost(final String advertisedHost) {
this.advertisedHost = advertisedHost;
return this;
}
public int getBucketSize() {
return bucketSize;
}
public DiscoveryConfiguration setBucketSize(final int bucketSize) {
this.bucketSize = bucketSize;
return this;
}
@Override
public boolean equals(final Object o) {
if (o == this) {
return true;
}
if (!(o instanceof DiscoveryConfiguration)) {
return false;
}
final DiscoveryConfiguration that = (DiscoveryConfiguration) o;
return active == that.active
&& bindPort == that.bindPort
&& bucketSize == that.bucketSize
&& Objects.equals(bindHost, that.bindHost)
&& Objects.equals(advertisedHost, that.advertisedHost)
&& Objects.equals(bootnodes, that.bootnodes);
}
@Override
public int hashCode() {
return Objects.hash(active, bindHost, bindPort, advertisedHost, bucketSize, bootnodes);
}
@Override
public String toString() {
return "DiscoveryConfiguration{"
+ "active="
+ active
+ ", bindHost='"
+ bindHost
+ '\''
+ ", bindPort="
+ bindPort
+ ", advertisedHost='"
+ advertisedHost
+ '\''
+ ", bucketSize="
+ bucketSize
+ ", bootnodes="
+ bootnodes
+ '}';
}
}
| 1 | 23,448 | Does order matter? Should we sort by enode key to make duplication checking easier? | hyperledger-besu | java |
@@ -132,11 +132,11 @@ func (ash *Handler) Provision(ctx caddy.Context) error {
return err
}
- acmeAuth, err := acme.NewAuthority(
- auth.GetDatabase().(nosql.DB), // stores all the server state
- ash.Host, // used for directory links; TODO: not needed
- strings.Trim(ash.PathPrefix, "/"), // used for directory links
- auth) // configures the signing authority
+ acmeAuth, err := acme.New(auth, acme.AuthorityOptions{
+ DB: auth.GetDatabase().(nosql.DB), // stores all the server state
+ DNS: ash.Host, // used for directory links; TODO: not needed
+ Prefix: strings.Trim(ash.PathPrefix, "/"), // used for directory links
+ })
if err != nil {
return err
} | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acmeserver
import (
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddypki"
"github.com/go-chi/chi"
"github.com/smallstep/certificates/acme"
acmeAPI "github.com/smallstep/certificates/acme/api"
"github.com/smallstep/certificates/authority"
"github.com/smallstep/certificates/authority/provisioner"
"github.com/smallstep/certificates/db"
"github.com/smallstep/nosql"
)
func init() {
caddy.RegisterModule(Handler{})
}
// Handler is an ACME server handler.
type Handler struct {
// The ID of the CA to use for signing. This refers to
// the ID given to the CA in the `pki` app. If omitted,
// the default ID is "local".
CA string `json:"ca,omitempty"`
// The hostname or IP address by which ACME clients
// will access the server. This is used to populate
// the ACME directory endpoint. Default: localhost.
// COMPATIBILITY NOTE / TODO: This property may go away in the
// future, as it is currently only required due to
// limitations in the underlying library. Do not rely
// on this property long-term; check release notes.
Host string `json:"host,omitempty"`
// The path prefix under which to serve all ACME
// endpoints. All other requests will not be served
// by this handler and will be passed through to
// the next one. Default: "/acme/"
// COMPATIBILITY NOTE / TODO: This property may go away in the
// future, as it is currently only required due to
// limitations in the underlying library. Do not rely
// on this property long-term; check release notes.
PathPrefix string `json:"path_prefix,omitempty"`
acmeEndpoints http.Handler
}
// CaddyModule returns the Caddy module information.
func (Handler) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.acme_server",
New: func() caddy.Module { return new(Handler) },
}
}
// Provision sets up the ACME server handler.
func (ash *Handler) Provision(ctx caddy.Context) error {
// set some defaults
if ash.CA == "" {
ash.CA = caddypki.DefaultCAID
}
if ash.Host == "" {
ash.Host = defaultHost
}
if ash.PathPrefix == "" {
ash.PathPrefix = defaultPathPrefix
}
// get a reference to the configured CA
appModule, err := ctx.App("pki")
if err != nil {
return err
}
pkiApp := appModule.(*caddypki.PKI)
ca, ok := pkiApp.CAs[ash.CA]
if !ok {
return fmt.Errorf("no certificate authority configured with id: %s", ash.CA)
}
dbFolder := filepath.Join(caddy.AppDataDir(), "acme_server", "db")
// TODO: See https://github.com/smallstep/nosql/issues/7
err = os.MkdirAll(dbFolder, 0755)
if err != nil {
return fmt.Errorf("making folder for ACME server database: %v", err)
}
authorityConfig := caddypki.AuthorityConfig{
AuthConfig: &authority.AuthConfig{
Provisioners: provisioner.List{
&provisioner.ACME{
Name: ash.CA,
Type: provisioner.TypeACME.String(),
Claims: &provisioner.Claims{
MinTLSDur: &provisioner.Duration{Duration: 5 * time.Minute},
MaxTLSDur: &provisioner.Duration{Duration: 24 * time.Hour * 365},
DefaultTLSDur: &provisioner.Duration{Duration: 12 * time.Hour},
},
},
},
},
DB: &db.Config{
Type: "badger",
DataSource: dbFolder,
},
}
auth, err := ca.NewAuthority(authorityConfig)
if err != nil {
return err
}
acmeAuth, err := acme.NewAuthority(
auth.GetDatabase().(nosql.DB), // stores all the server state
ash.Host, // used for directory links; TODO: not needed
strings.Trim(ash.PathPrefix, "/"), // used for directory links
auth) // configures the signing authority
if err != nil {
return err
}
// create the router for the ACME endpoints
acmeRouterHandler := acmeAPI.New(acmeAuth)
r := chi.NewRouter()
r.Route(ash.PathPrefix, func(r chi.Router) {
acmeRouterHandler.Route(r)
})
ash.acmeEndpoints = r
return nil
}
func (ash Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
if strings.HasPrefix(r.URL.Path, ash.PathPrefix) {
ash.acmeEndpoints.ServeHTTP(w, r)
return nil
}
return next.ServeHTTP(w, r)
}
const (
defaultHost = "localhost"
defaultPathPrefix = "/acme/"
)
// Interface guards
var (
_ caddyhttp.MiddlewareHandler = (*Handler)(nil)
_ caddy.Provisioner = (*Handler)(nil)
)
| 1 | 15,759 | Can you explain this too? This seems to go beyond linting. | caddyserver-caddy | go |
@@ -151,12 +151,6 @@ describe('Components', () => {
expect(scratch.innerHTML).to.equal('');
});
- // Test for #651
- it('should set enumerable boolean attribute', () => {
- render(<input spellcheck={false} />, scratch);
- expect(scratch.firstChild.spellcheck).to.equal(false);
- });
-
// Test for Issue #73
it('should remove orphaned elements replaced by Components', () => {
class Comp extends Component { | 1 | import { h, cloneElement, render, rerender, Component } from '../../src/preact';
/** @jsx h */
let spyAll = obj => Object.keys(obj).forEach( key => sinon.spy(obj,key) );
function getAttributes(node) {
let attrs = {};
if (node.attributes) {
for (let i=node.attributes.length; i--; ) {
attrs[node.attributes[i].name] = node.attributes[i].value;
}
}
return attrs;
}
// hacky normalization of attribute order across browsers.
function sortAttributes(html) {
return html.replace(/<([a-z0-9-]+)((?:\s[a-z0-9:_.-]+=".*?")+)((?:\s*\/)?>)/gi, (s, pre, attrs, after) => {
let list = attrs.match(/\s[a-z0-9:_.-]+=".*?"/gi).sort( (a, b) => a>b ? 1 : -1 );
if (~after.indexOf('/')) after = '></'+pre+'>';
return '<' + pre + list.join('') + after;
});
}
const Empty = () => null;
describe('Components', () => {
let scratch;
before( () => {
scratch = document.createElement('div');
(document.body || document.documentElement).appendChild(scratch);
});
beforeEach( () => {
let c = scratch.firstElementChild;
if (c) render(<Empty />, scratch, c);
scratch.innerHTML = '';
});
after( () => {
scratch.parentNode.removeChild(scratch);
scratch = null;
});
it('should render components', () => {
class C1 extends Component {
render() {
return <div>C1</div>;
}
}
sinon.spy(C1.prototype, 'render');
render(<C1 />, scratch);
expect(C1.prototype.render)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch({}, {})
.and.to.have.returned(sinon.match({ nodeName:'div' }));
expect(scratch.innerHTML).to.equal('<div>C1</div>');
});
it('should render functional components', () => {
const PROPS = { foo:'bar', onBaz:()=>{} };
const C3 = sinon.spy( props => <div {...props} /> );
render(<C3 {...PROPS} />, scratch);
expect(C3)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(sinon.match({
nodeName: 'div',
attributes: PROPS
}));
expect(scratch.innerHTML).to.equal('<div foo="bar"></div>');
});
it('should render components with props', () => {
const PROPS = { foo:'bar', onBaz:()=>{} };
let constructorProps;
class C2 extends Component {
constructor(props) {
super(props);
constructorProps = props;
}
render(props) {
return <div {...props} />;
}
}
sinon.spy(C2.prototype, 'render');
render(<C2 {...PROPS} />, scratch);
expect(constructorProps).to.deep.equal(PROPS);
expect(C2.prototype.render)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS, {})
.and.to.have.returned(sinon.match({
nodeName: 'div',
attributes: PROPS
}));
expect(scratch.innerHTML).to.equal('<div foo="bar"></div>');
});
it('should clone components', () => {
function Comp () {}
let instance = <Comp a />;
let clone = cloneElement(instance);
expect(clone).to.deep.equal(instance);
});
it('should render string', () => {
class StringComponent extends Component {
render() {
return "Hi there";
}
}
render(<StringComponent />, scratch);
expect(scratch.innerHTML).to.equal('Hi there');
});
it('should render number as string', () => {
class NumberComponent extends Component {
render() {
return 42;
}
}
render(<NumberComponent />, scratch);
expect(scratch.innerHTML).to.equal('42');
});
it('should render null as empty string', () => {
class NullComponent extends Component {
render() {
return null;
}
}
render(<NullComponent />, scratch);
expect(scratch.innerHTML).to.equal('');
});
// Test for #651
it('should set enumerable boolean attribute', () => {
render(<input spellcheck={false} />, scratch);
expect(scratch.firstChild.spellcheck).to.equal(false);
});
// Test for Issue #73
it('should remove orphaned elements replaced by Components', () => {
class Comp extends Component {
render() {
return <span>span in a component</span>;
}
}
let root;
function test(content) {
root = render(content, scratch, root);
}
test(<Comp />);
test(<div>just a div</div>);
test(<Comp />);
expect(scratch.innerHTML).to.equal('<span>span in a component</span>');
});
// Test for Issue #176
it('should remove children when root changes to text node', () => {
let comp;
class Comp extends Component {
render(_, { alt }) {
return alt ? 'asdf' : <div>test</div>;
}
}
render(<Comp ref={c=>comp=c} />, scratch);
comp.setState({ alt:true });
comp.forceUpdate();
expect(scratch.innerHTML, 'switching to textnode').to.equal('asdf');
comp.setState({ alt:false });
comp.forceUpdate();
expect(scratch.innerHTML, 'switching to element').to.equal('<div>test</div>');
comp.setState({ alt:true });
comp.forceUpdate();
expect(scratch.innerHTML, 'switching to textnode 2').to.equal('asdf');
});
// Test for Issue #254
it('should not recycle common class children with different keys', () => {
let idx = 0;
let msgs = ['A','B','C','D','E','F','G','H'];
let sideEffect = sinon.spy();
class Comp extends Component {
componentWillMount() {
this.innerMsg = msgs[(idx++ % 8)];
sideEffect();
}
render() {
return <div>{this.innerMsg}</div>;
}
}
sinon.spy(Comp.prototype, 'componentWillMount');
class GoodContainer extends Component {
constructor(props) {
super(props);
this.state.alt = false;
}
render(_, {alt}) {
return (
<div>
{alt ? null : (<Comp key={1} alt={alt}/>)}
{alt ? null : (<Comp key={2} alt={alt}/>)}
{alt ? (<Comp key={3} alt={alt}/>) : null}
</div>
);
}
}
class BadContainer extends Component {
constructor(props) {
super(props);
this.state.alt = false;
}
render(_, {alt}) {
return (
<div>
{alt ? null : (<Comp alt={alt}/>)}
{alt ? null : (<Comp alt={alt}/>)}
{alt ? (<Comp alt={alt}/>) : null}
</div>
);
}
}
let good, bad;
let root = render(<GoodContainer ref={c=>good=c} />, scratch);
expect(scratch.textContent, 'new component with key present').to.equal('AB');
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
sideEffect.resetHistory();
Comp.prototype.componentWillMount.resetHistory();
good.setState({alt: true});
good.forceUpdate();
expect(scratch.textContent, 'new component with key present re-rendered').to.equal('C');
//we are recycling the first 2 components already rendered, just need a new one
expect(Comp.prototype.componentWillMount).to.have.been.calledOnce;
expect(sideEffect).to.have.been.calledOnce;
sideEffect.resetHistory();
Comp.prototype.componentWillMount.resetHistory();
render(<BadContainer ref={c=>bad=c} />, scratch, root);
expect(scratch.textContent, 'new component without key').to.equal('DE');
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
sideEffect.resetHistory();
Comp.prototype.componentWillMount.resetHistory();
bad.setState({alt: true});
bad.forceUpdate();
expect(scratch.textContent, 'new component without key re-rendered').to.equal('D');
expect(Comp.prototype.componentWillMount).to.not.have.been.called;
expect(sideEffect).to.not.have.been.called;
});
describe('props.children', () => {
it('should support passing children as a prop', () => {
const Foo = props => <div {...props} />;
render(<Foo a="b" children={[
<span class="bar">bar</span>,
'123',
456
]} />, scratch);
expect(scratch.innerHTML).to.equal('<div a="b"><span class="bar">bar</span>123456</div>');
});
it('should be ignored when explicit children exist', () => {
const Foo = props => <div {...props}>a</div>;
render(<Foo children={'b'} />, scratch);
expect(scratch.innerHTML).to.equal('<div>a</div>');
});
describe('should always be an array', () => {
let children;
let Foo = props => {
children = props.children;
return <div>{props.children}</div>;
};
let FunctionFoo = props => {
children = props.children;
return <div>{props.children[0](2)}</div>;
};
let Bar = () => <span>Bar</span>;
beforeEach(() => {
children = undefined;
});
it('with no child', () => {
render(<Foo></Foo>, scratch);
expect(children).to.be.an('array');
expect(children).to.have.lengthOf(0);
expect(scratch.innerHTML).to.equal('<div></div>');
});
it('with a text child', () => {
render(<Foo>text</Foo>, scratch);
expect(children).to.be.an('array');
expect(children).to.have.lengthOf(1);
expect(children[0]).to.be.a('string');
expect(scratch.innerHTML).to.equal('<div>text</div>');
});
it('with a DOM node child', () => {
render(<Foo><span /></Foo>, scratch);
expect(children).to.be.an('array');
expect(children).to.have.lengthOf(1);
expect(children[0]).to.be.an('object');
expect(scratch.innerHTML).to.equal('<div><span></span></div>');
});
it('with a Component child', () => {
render(<Foo><Bar /></Foo>, scratch);
expect(children).to.be.an('array');
expect(children).to.have.lengthOf(1);
expect(children[0]).to.be.an('object');
expect(scratch.innerHTML).to.equal('<div><span>Bar</span></div>');
});
it('with a function child', () => {
render(<FunctionFoo>{num => num.toFixed(2)}</FunctionFoo>, scratch);
expect(children).to.be.an('array');
expect(children).to.have.lengthOf(1);
expect(children[0]).to.be.a('function');
expect(scratch.innerHTML).to.equal('<div>2.00</div>');
});
it('with multiple children', () => {
render(<Foo><span /><Bar /><span /></Foo>, scratch);
expect(children).to.be.an('array');
expect(children).to.have.lengthOf(3);
expect(children[0]).to.be.an('object');
expect(scratch.innerHTML).to.equal('<div><span></span><span>Bar</span><span></span></div>');
});
});
});
describe('High-Order Components', () => {
it('should render nested functional components', () => {
const PROPS = { foo:'bar', onBaz:()=>{} };
const Outer = sinon.spy(
props => <Inner {...props} />
);
const Inner = sinon.spy(
props => <div {...props}>inner</div>
);
render(<Outer {...PROPS} />, scratch);
expect(Outer)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(sinon.match({
nodeName: Inner,
attributes: PROPS
}));
expect(Inner)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(sinon.match({
nodeName: 'div',
attributes: PROPS,
children: ['inner']
}));
expect(scratch.innerHTML).to.equal('<div foo="bar">inner</div>');
});
it('should re-render nested functional components', () => {
let doRender = null;
class Outer extends Component {
componentDidMount() {
let i = 1;
doRender = () => this.setState({ i: ++i });
}
componentWillUnmount() {}
render(props, { i }) {
return <Inner i={i} {...props} />;
}
}
sinon.spy(Outer.prototype, 'render');
sinon.spy(Outer.prototype, 'componentWillUnmount');
let j = 0;
const Inner = sinon.spy(
props => <div j={ ++j } {...props}>inner</div>
);
render(<Outer foo="bar" />, scratch);
// update & flush
doRender();
rerender();
expect(Outer.prototype.componentWillUnmount)
.not.to.have.been.called;
expect(Inner).to.have.been.calledTwice;
expect(Inner.secondCall)
.to.have.been.calledWithMatch({ foo:'bar', i:2 })
.and.to.have.returned(sinon.match({
attributes: {
j: 2,
i: 2,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '2',
i: '2',
foo: 'bar'
});
// update & flush
doRender();
rerender();
expect(Inner).to.have.been.calledThrice;
expect(Inner.thirdCall)
.to.have.been.calledWithMatch({ foo:'bar', i:3 })
.and.to.have.returned(sinon.match({
attributes: {
j: 3,
i: 3,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '3',
i: '3',
foo: 'bar'
});
});
it('should re-render nested components', () => {
let doRender = null,
alt = false;
class Outer extends Component {
componentDidMount() {
let i = 1;
doRender = () => this.setState({ i: ++i });
}
componentWillUnmount() {}
render(props, { i }) {
if (alt) return <div is-alt />;
return <Inner i={i} {...props} />;
}
}
sinon.spy(Outer.prototype, 'render');
sinon.spy(Outer.prototype, 'componentDidMount');
sinon.spy(Outer.prototype, 'componentWillUnmount');
let j = 0;
class Inner extends Component {
constructor(...args) {
super();
this._constructor(...args);
}
_constructor() {}
componentWillMount() {}
componentDidMount() {}
componentWillUnmount() {}
render(props) {
return <div j={ ++j } {...props}>inner</div>;
}
}
sinon.spy(Inner.prototype, '_constructor');
sinon.spy(Inner.prototype, 'render');
sinon.spy(Inner.prototype, 'componentWillMount');
sinon.spy(Inner.prototype, 'componentDidMount');
sinon.spy(Inner.prototype, 'componentWillUnmount');
render(<Outer foo="bar" />, scratch);
expect(Outer.prototype.componentDidMount).to.have.been.calledOnce;
// update & flush
doRender();
rerender();
expect(Outer.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype._constructor).to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.render).to.have.been.calledTwice;
expect(Inner.prototype.render.secondCall)
.to.have.been.calledWithMatch({ foo:'bar', i:2 })
.and.to.have.returned(sinon.match({
attributes: {
j: 2,
i: 2,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '2',
i: '2',
foo: 'bar'
});
expect(sortAttributes(scratch.innerHTML)).to.equal(sortAttributes('<div foo="bar" j="2" i="2">inner</div>'));
// update & flush
doRender();
rerender();
expect(Inner.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.render).to.have.been.calledThrice;
expect(Inner.prototype.render.thirdCall)
.to.have.been.calledWithMatch({ foo:'bar', i:3 })
.and.to.have.returned(sinon.match({
attributes: {
j: 3,
i: 3,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '3',
i: '3',
foo: 'bar'
});
// update & flush
alt = true;
doRender();
rerender();
expect(Inner.prototype.componentWillUnmount).to.have.been.calledOnce;
expect(scratch.innerHTML).to.equal('<div is-alt="true"></div>');
// update & flush
alt = false;
doRender();
rerender();
expect(sortAttributes(scratch.innerHTML)).to.equal(sortAttributes('<div foo="bar" j="4" i="5">inner</div>'));
});
it('should resolve intermediary functional component', () => {
let ctx = {};
class Root extends Component {
getChildContext() {
return { ctx };
}
render() {
return <Func />;
}
}
const Func = sinon.spy( () => <Inner /> );
class Inner extends Component {
componentWillMount() {}
componentDidMount() {}
componentWillUnmount() {}
render() {
return <div>inner</div>;
}
}
spyAll(Inner.prototype);
let root = render(<Root />, scratch);
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.componentWillMount).to.have.been.calledBefore(Inner.prototype.componentDidMount);
render(<asdf />, scratch, root);
expect(Inner.prototype.componentWillUnmount).to.have.been.calledOnce;
});
it('should unmount children of high-order components without unmounting parent', () => {
let outer, inner2, counter=0;
class Outer extends Component {
constructor(props, context) {
super(props, context);
outer = this;
this.state = {
child: this.props.child
};
}
componentWillUnmount(){}
componentWillMount(){}
componentDidMount(){}
render(_, { child:C }) {
return <C />;
}
}
spyAll(Outer.prototype);
class Inner extends Component {
componentWillUnmount(){}
componentWillMount(){}
componentDidMount(){}
render() {
return h('element'+(++counter));
}
}
spyAll(Inner.prototype);
class Inner2 extends Component {
constructor(props, context) {
super(props, context);
inner2 = this;
}
componentWillUnmount(){}
componentWillMount(){}
componentDidMount(){}
render() {
return h('element'+(++counter));
}
}
spyAll(Inner2.prototype);
render(<Outer child={Inner} />, scratch);
// outer should only have been mounted once
expect(Outer.prototype.componentWillMount, 'outer initial').to.have.been.calledOnce;
expect(Outer.prototype.componentDidMount, 'outer initial').to.have.been.calledOnce;
expect(Outer.prototype.componentWillUnmount, 'outer initial').not.to.have.been.called;
// inner should only have been mounted once
expect(Inner.prototype.componentWillMount, 'inner initial').to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount, 'inner initial').to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'inner initial').not.to.have.been.called;
outer.setState({ child:Inner2 });
outer.forceUpdate();
expect(Inner2.prototype.render).to.have.been.calledOnce;
// outer should still only have been mounted once
expect(Outer.prototype.componentWillMount, 'outer swap').to.have.been.calledOnce;
expect(Outer.prototype.componentDidMount, 'outer swap').to.have.been.calledOnce;
expect(Outer.prototype.componentWillUnmount, 'outer swap').not.to.have.been.called;
// inner should only have been mounted once
expect(Inner2.prototype.componentWillMount, 'inner2 swap').to.have.been.calledOnce;
expect(Inner2.prototype.componentDidMount, 'inner2 swap').to.have.been.calledOnce;
expect(Inner2.prototype.componentWillUnmount, 'inner2 swap').not.to.have.been.called;
inner2.forceUpdate();
expect(Inner2.prototype.render, 'inner2 update').to.have.been.calledTwice;
expect(Inner2.prototype.componentWillMount, 'inner2 update').to.have.been.calledOnce;
expect(Inner2.prototype.componentDidMount, 'inner2 update').to.have.been.calledOnce;
expect(Inner2.prototype.componentWillUnmount, 'inner2 update').not.to.have.been.called;
});
it('should remount when swapping between HOC child types', () => {
class Outer extends Component {
render({ child: Child }) {
return <Child />;
}
}
class Inner extends Component {
componentWillMount() {}
componentWillUnmount() {}
render() {
return <div class="inner">foo</div>;
}
}
spyAll(Inner.prototype);
const InnerFunc = () => (
<div class="inner-func">bar</div>
);
let root = render(<Outer child={Inner} />, scratch, root);
expect(Inner.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'initial mount').not.to.have.been.called;
Inner.prototype.componentWillMount.resetHistory();
root = render(<Outer child={InnerFunc} />, scratch, root);
expect(Inner.prototype.componentWillMount, 'unmount').not.to.have.been.called;
expect(Inner.prototype.componentWillUnmount, 'unmount').to.have.been.calledOnce;
Inner.prototype.componentWillUnmount.resetHistory();
root = render(<Outer child={Inner} />, scratch, root);
expect(Inner.prototype.componentWillMount, 'remount').to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'remount').not.to.have.been.called;
});
});
describe('Component Nesting', () => {
let useIntermediary = false;
let createComponent = (Intermediary) => {
class C extends Component {
componentWillMount() {}
render({ children }) {
if (!useIntermediary) return children[0];
let I = useIntermediary===true ? Intermediary : useIntermediary;
return <I>{children}</I>;
}
}
spyAll(C.prototype);
return C;
};
let createFunction = () => sinon.spy( ({ children }) => children[0] );
let root;
let rndr = n => root = render(n, scratch, root);
let F1 = createFunction();
let F2 = createFunction();
let F3 = createFunction();
let C1 = createComponent(F1);
let C2 = createComponent(F2);
let C3 = createComponent(F3);
let reset = () => [C1, C2, C3].reduce(
(acc, c) => acc.concat( Object.keys(c.prototype).map(key => c.prototype[key]) ),
[F1, F2, F3]
).forEach( c => c.reset && c.resetHistory() );
it('should handle lifecycle for no intermediary in component tree', () => {
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
reset();
rndr(<C1><C2>Some Text</C2></C1>);
expect(C1.prototype.componentWillMount, 'unmount innermost, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost, C2').not.to.have.been.called;
reset();
rndr(<C1><C3>Some Text</C3></C1>);
expect(C1.prototype.componentWillMount, 'swap innermost').not.to.have.been.called;
expect(C3.prototype.componentWillMount, 'swap innermost').to.have.been.calledOnce;
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'inject between, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3').to.have.been.calledOnce;
});
it('should handle lifecycle for nested intermediary functional components', () => {
useIntermediary = true;
rndr(<div />);
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'initial mount w/ intermediary fn, C1').to.have.been.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount w/ intermediary fn, C2').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount w/ intermediary fn, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C2>Some Text</C2></C1>);
expect(C1.prototype.componentWillMount, 'unmount innermost w/ intermediary fn, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost w/ intermediary fn, C2').not.to.have.been.called;
reset();
rndr(<C1><C3>Some Text</C3></C1>);
expect(C1.prototype.componentWillMount, 'swap innermost w/ intermediary fn').not.to.have.been.called;
expect(C3.prototype.componentWillMount, 'swap innermost w/ intermediary fn').to.have.been.calledOnce;
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'inject between, C1 w/ intermediary fn').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2 w/ intermediary fn').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3 w/ intermediary fn').to.have.been.calledOnce;
});
it('should handle lifecycle for nested intermediary elements', () => {
useIntermediary = 'div';
rndr(<div />);
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'initial mount w/ intermediary div, C1').to.have.been.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount w/ intermediary div, C2').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount w/ intermediary div, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C2>Some Text</C2></C1>);
expect(C1.prototype.componentWillMount, 'unmount innermost w/ intermediary div, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost w/ intermediary div, C2').not.to.have.been.called;
reset();
rndr(<C1><C3>Some Text</C3></C1>);
expect(C1.prototype.componentWillMount, 'swap innermost w/ intermediary div').not.to.have.been.called;
expect(C3.prototype.componentWillMount, 'swap innermost w/ intermediary div').to.have.been.calledOnce;
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'inject between, C1 w/ intermediary div').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2 w/ intermediary div').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3 w/ intermediary div').to.have.been.calledOnce;
});
});
});
| 1 | 12,217 | I just moved this into the render.js UT file since this test doesn't actually use an components | preactjs-preact | js |
@@ -163,6 +163,8 @@ var ConfigCommand *cobra.Command = &cobra.Command{
util.Failed("Could not write ddev config file: %v", err)
}
+ _,_ = app.CreateSettingsFile()
+
// If a provider is specified, prompt about whether to do an import after config.
switch provider {
case ddevapp.DefaultProviderName: | 1 | package cmd
import (
"fmt"
"os"
"strings"
"path/filepath"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"github.com/spf13/cobra"
)
// docrootRelPath is the relative path to the docroot where index.php is
var docrootRelPath string
// siteName is the name of the site
var siteName string
// pantheonEnvironment is the environment for pantheon, dev/test/prod
var pantheonEnvironment string
// fallbackPantheonEnvironment is our assumption that "dev" will be available in any case
const fallbackPantheonEnvironment = "dev"
// appType is the ddev app type, like drupal7/drupal8/wordpress
var appType string
// showConfigLocation if set causes the command to show the config location.
var showConfigLocation bool
// ConfigCommand represents the `ddev config` command
var ConfigCommand *cobra.Command = &cobra.Command{
Use: "config [provider]",
Short: "Create or modify a ddev project configuration in the current directory",
Run: func(cmd *cobra.Command, args []string) {
appRoot, err := os.Getwd()
if err != nil {
util.Failed("Could not determine current working directory: %v", err)
}
provider := ddevapp.DefaultProviderName
if len(args) > 1 {
output.UserOut.Fatal("Invalid argument detected. Please use 'ddev config' or 'ddev config [provider]' to configure a project.")
}
if len(args) == 1 {
provider = args[0]
}
app, err := ddevapp.NewApp(appRoot, provider)
if err != nil {
util.Failed("Could not create new config: %v", err)
}
// Support the show-config-location flag.
if showConfigLocation {
activeApp, err := ddevapp.GetActiveApp("")
if err != nil {
if strings.Contains(err.Error(), "Have you run 'ddev config'") {
util.Failed("No project configuration currently exists")
} else {
util.Failed("Failed to access project configuration: %v", err)
}
}
if activeApp.ConfigPath != "" && activeApp.ConfigExists() {
rawResult := make(map[string]interface{})
rawResult["configpath"] = activeApp.ConfigPath
rawResult["approot"] = activeApp.AppRoot
friendlyMsg := fmt.Sprintf("The project config location is %s", activeApp.ConfigPath)
output.UserOut.WithField("raw", rawResult).Print(friendlyMsg)
return
}
}
// If they have not given us any flags, we prompt for full info. Otherwise, we assume they're in control.
if siteName == "" && docrootRelPath == "" && pantheonEnvironment == "" && appType == "" {
err = app.PromptForConfig()
if err != nil {
util.Failed("There was a problem configuring your project: %v", err)
}
} else { // In this case we have to validate the provided items, or set to sane defaults
// Let them know if we're replacing the config.yaml
app.WarnIfConfigReplace()
// app.Name gets set to basename if not provided, or set to siteName if provided
if app.Name != "" && siteName == "" { // If we already have a c.Name and no siteName, leave c.Name alone
// Sorry this is empty but it makes the logic clearer.
} else if siteName != "" { // if we have a siteName passed in, use it for c.Name
app.Name = siteName
} else { // No siteName passed, c.Name not set: use c.Name from the directory
// nolint: vetshadow
pwd, err := os.Getwd()
util.CheckErr(err)
app.Name = filepath.Base(pwd)
}
// docrootRelPath must exist
if docrootRelPath != "" {
app.Docroot = docrootRelPath
if _, err = os.Stat(docrootRelPath); os.IsNotExist(err) {
util.Failed("The docroot provided (%v) does not exist", docrootRelPath)
}
} else if !cmd.Flags().Changed("docroot") {
app.Docroot = ddevapp.DiscoverDefaultDocroot(app)
}
// pantheonEnvironment must be appropriate, and can only be used with pantheon provider.
if provider != "pantheon" && pantheonEnvironment != "" {
util.Failed("--pantheon-environment can only be used with pantheon provider, for example 'ddev config pantheon --pantheon-environment=dev --docroot=docroot'")
}
if appType != "" && !ddevapp.IsValidAppType(appType) {
validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ")
util.Failed("apptype must be one of %s", validAppTypes)
}
detectedApptype := app.DetectAppType()
fullPath, pathErr := filepath.Abs(app.Docroot)
if pathErr != nil {
util.Failed("Failed to get absolute path to Docroot %s: %v", app.Docroot, pathErr)
}
if appType == "" || appType == detectedApptype { // Found an app, matches passed-in or no apptype passed
appType = detectedApptype
util.Success("Found a %s codebase at %s", detectedApptype, fullPath)
} else if appType != "" { // apptype was passed, but we found no app at all
util.Warning("You have specified a project type of %s but no project of that type is found in %s", appType, fullPath)
} else if appType != "" && detectedApptype != appType { // apptype was passed, app was found, but not the same type
util.Warning("You have specified a project type of %s but a project of type %s was discovered in %s", appType, detectedApptype, fullPath)
}
app.Type = appType
prov, _ := app.GetProvider()
if provider == "pantheon" {
pantheonProvider := prov.(*ddevapp.PantheonProvider)
if pantheonEnvironment == "" {
pantheonEnvironment = fallbackPantheonEnvironment // assume a basic default if they haven't provided one.
}
pantheonProvider.SetSiteNameAndEnv(pantheonEnvironment)
}
// But pantheon *does* validate "Name"
appTypeErr := prov.Validate()
if appTypeErr != nil {
util.Failed("Failed to validate project name %v and environment %v with provider %v: %v", app.Name, pantheonEnvironment, provider, appTypeErr)
} else {
util.Success("Using project name '%s' and environment '%s'.", app.Name, pantheonEnvironment)
}
err = app.ConfigFileOverrideAction()
if err != nil {
util.Failed("Failed to run ConfigFileOverrideAction: %v", err)
}
}
err = app.WriteConfig()
if err != nil {
util.Failed("Could not write ddev config file: %v", err)
}
// If a provider is specified, prompt about whether to do an import after config.
switch provider {
case ddevapp.DefaultProviderName:
util.Success("Configuration complete. You may now run 'ddev start'.")
default:
util.Success("Configuration complete. You may now run 'ddev start' or 'ddev pull'")
}
},
}
func init() {
validAppTypes := strings.Join(ddevapp.GetValidAppTypes(), ", ")
apptypeUsage := fmt.Sprintf("Provide the project type (one of %s). This is autodetected and this flag is necessary only to override the detection.", validAppTypes)
projectNameUsage := fmt.Sprintf("Provide the project name of project to configure (normally the same as the last part of directory name)")
ConfigCommand.Flags().StringVarP(&siteName, "projectname", "", "", projectNameUsage)
ConfigCommand.Flags().StringVarP(&docrootRelPath, "docroot", "", "", "Provide the relative docroot of the project, like 'docroot' or 'htdocs' or 'web', defaults to empty, the current directory")
ConfigCommand.Flags().StringVarP(&pantheonEnvironment, "pantheon-environment", "", "", "Choose the environment for a Pantheon site (dev/test/prod) (Pantheon-only)")
ConfigCommand.Flags().StringVarP(&appType, "projecttype", "", "", apptypeUsage)
// apptype flag is there for backwards compatibility.
ConfigCommand.Flags().StringVarP(&appType, "apptype", "", "", apptypeUsage+" This is the same as --projecttype and is included only for backwards compatibility.")
ConfigCommand.Flags().BoolVarP(&showConfigLocation, "show-config-location", "", false, "Output the location of the config.yaml file if it exists, or error that it doesn't exist.")
ConfigCommand.Flags().StringVarP(&siteName, "sitename", "", "", projectNameUsage+" This is the same as projectname and is included only for backwards compatibility")
err := ConfigCommand.Flags().MarkDeprecated("sitename", "The sitename flag is deprecated in favor of --projectname")
util.CheckErr(err)
err = ConfigCommand.Flags().MarkDeprecated("apptype", "The apptype flag is deprecated in favor of --projecttype")
util.CheckErr(err)
RootCmd.AddCommand(ConfigCommand)
}
| 1 | 12,367 | We do have to capture and check the err here right? I don't think we need to capture the location of the file though for this context. | drud-ddev | php |
@@ -8,7 +8,6 @@ import time
import sys
import logging
from logging.handlers import RotatingFileHandler
-import os
from listenbrainz.listenstore import InfluxListenStore
import redis
| 1 |
from redis import Redis
from listenbrainz import config
import requests
from requests.exceptions import HTTPError
import ujson
import time
import sys
import logging
from logging.handlers import RotatingFileHandler
import os
from listenbrainz.listenstore import InfluxListenStore
import redis
redis_connection = None
LATEST_IMPORT_ENDPOINT = '{root_url}/1/latest-import'.format(root_url=config.BETA_URL)
# create a logger to log messages into LOG_FILE
logger = logging.getLogger('alpha_importer')
logger.setLevel(logging.DEBUG)
if config.IMPORTER_LOG_FILE == "-":
handler = logging.StreamHandler(sys.stdout)
else:
handler = RotatingFileHandler(config.IMPORTER_LOG_FILE, maxBytes=512 * 1024, backupCount=100)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def init_redis_connection():
""" Initiates the connection to Redis """
global redis_connection
while True:
try:
redis_connection = Redis(host=config.REDIS_HOST, port=config.REDIS_PORT, decode_responses=True)
redis_connection.ping()
return
except redis.exceptions.ConnectionError as e:
logger.error("Couldn't connect to redis: {}:{}".format(config.REDIS_HOST, config.REDIS_PORT))
logger.error("Sleeping for 2 seconds and trying again...")
time.sleep(2)
def queue_empty():
return redis_connection.llen(config.IMPORTER_QUEUE_KEY) == 0
def queue_front():
# get the first element from queue and split it to get username and auth_token
token, username = redis_connection.lindex(config.IMPORTER_QUEUE_KEY, 0).split(" ", 1)
return username, token
def get_batch(username, max_ts):
get_url = '{}/1/user/{}/listens'.format(config.ALPHA_URL, username)
payload = {"max_ts": max_ts, "count": config.LISTENS_PER_GET}
r = requests.get(get_url, params = payload)
if r.status_code == 200:
return ujson.loads(r.text)
else:
logger.error("GET from alpha HTTP response code: {}".format(r.status_code))
raise HTTPError
def extract_data(batch):
data = {'listen_type':'import', 'payload': []}
for listen in batch['payload']['listens']:
if 'recording_msid' in listen:
del listen['recording_msid']
if 'release_msid' in listen['track_metadata']['additional_info']:
del listen['track_metadata']['additional_info']['release_msid']
if 'artist_msid' in listen['track_metadata']['additional_info']:
del listen['track_metadata']['additional_info']['artist_msid']
data['payload'].append(listen)
return data
def send_batch(data, token, retries=5):
"""
Sends a batch of data. If fails, then breaks the data into two parts and
recursively tries to send them, until we find the culprit listen
Returns: number of listens successfully sent
"""
if not data['payload']:
return 0
send_url = '{}/1/submit-listens'.format(config.BETA_URL)
done = False
for _ in range(retries):
r = requests.post(send_url, headers={'Authorization': 'Token {}'.format(token)}, data = ujson.dumps(data))
if r.status_code == 200:
done = True
break
else:
time.sleep(config.IMPORTER_DELAY)
if done:
return len(data['payload'])
elif not done and len(data['payload']) == 1:
# try to send the bad listen one more time and if it doesn't work
# log the error
r = requests.post(send_url, headers={'Authorization': 'Token {}'.format(token)}, data = ujson.dumps(data))
if r.status_code == 200:
return 1
else:
logger.error("Unable to submit bad listen to beta:")
logger.error(ujson.dumps(data))
logger.error("Response code from beta: {}".format(r.status_code))
logger.error(r.text)
return 0
else:
slice_index = len(data['payload']) // 2
# send first half
half_data = {
'listen_type': 'import',
'payload': data['payload'][slice_index :],
}
sent = send_batch(half_data, token, retries)
# send second half
half_data['payload'] = data['payload'][: slice_index]
sent += send_batch(half_data, token, retries)
return sent
def get_latest_import_time(username):
""" Send a GET request to the ListenBrainz Beta server to get the latest import time
from previous imports for the user.
"""
response = requests.get(
LATEST_IMPORT_ENDPOINT,
params={
'user_name': username
}
)
if response.status_code == 200:
return int(ujson.loads(response.text)['latest_import'])
else:
raise HTTPError
def update_latest_import_time(token, ts):
""" Send a POST request to the ListenBrainz server after the import is complete to
update the latest import time on the server. This will make future imports stop
when they reach this point of time in the listen history.
"""
response = requests.post(
LATEST_IMPORT_ENDPOINT,
headers={'Authorization': 'Token {user_token}'.format(user_token=token)},
data=ujson.dumps({'ts': ts})
)
if response.status_code != 200:
raise HTTPError
def import_from_alpha(username, token):
# first get the timestamp until which previous imports have already added data
latest_import_ts = get_latest_import_time(username)
next_max = int(time.time()) # variable used for pagination
page_count = 1
# we'll send this to LB Beta at the end of the import
newest_timestamp_imported_this_time = 0
stop = False
while True:
try:
if stop:
update_latest_import_time(token, newest_timestamp_imported_this_time)
logger.info('All pages done for user {}'.format(username))
logger.info('Total number of pages done: {}'.format(page_count - 1))
return
batch = get_batch(username, next_max)
# if this is the last page or if this is the page upto which we had imported earlier, then stop
if batch['payload']['count'] == 0 or batch['payload']['listens'][-1]['listened_at'] <= latest_import_ts:
stop = True
# if this is the first page, then get the value with which we'll update latest_update on LB
if page_count == 1 and batch['payload']['count'] > 0:
newest_timestamp_imported_this_time = batch['payload']['listens'][0]['listened_at']
# send the current batch of data
data = extract_data(batch)
sent = send_batch(data, token)
next_max = data['payload'][-1]['listened_at']
logger.info('Page #{} done.'.format(page_count))
if sent < len(data['payload']):
logger.error("Only wrote {} out of {} listens for page {}".format(sent, len(data['payload']), page_count))
page_count += 1
except HTTPError as e:
time.sleep(config.IMPORTER_DELAY)
def queue_pop():
redis_connection.lpop(config.IMPORTER_QUEUE_KEY)
def update_status(username, status):
redis_connection.set("{} {}".format(config.IMPORTER_SET_KEY_PREFIX, username), status)
def init_influx_connection():
""" Connects to influx and returns an InfluxListenStore instance """
while True:
try:
return InfluxListenStore({
'REDIS_HOST': config.REDIS_HOST,
'REDIS_PORT': config.REDIS_PORT,
'INFLUX_HOST': config.INFLUX_HOST,
'INFLUX_PORT': config.INFLUX_PORT,
'INFLUX_DB_NAME': config.INFLUX_DB_NAME,
})
except Exception as e:
logger.error("Couldn't create InfluxListenStore instance: {}".format(str(e)))
logger.error("Sleeping 2 seconds and then retrying...")
time.sleep(2)
if __name__ == '__main__':
init_redis_connection()
db_connection = init_influx_connection()
logger.info("alpha_importer started")
while True:
if not queue_empty():
username, token = queue_front()
import_from_alpha(username, token)
queue_pop()
update_status(username, "DONE")
db_connection.reset_listen_count(username)
else:
time.sleep(3)
| 1 | 14,436 | an unused import | metabrainz-listenbrainz-server | py |
@@ -152,6 +152,9 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
// policy that is looking for any tag but the last one to be
// loaded won't find it, and TLS handshakes will fail (see end)
// of issue #3004)
+
+ tlsCertTags := h.state["tlsCertTags"].(map[string]string)
+
tag, ok := tlsCertTags[certFilename]
if !ok {
// haven't seen this cert file yet, let's give it a tag | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"fmt"
"html"
"net/http"
"reflect"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"go.uber.org/zap/zapcore"
)
func init() {
RegisterDirective("bind", parseBind)
RegisterDirective("root", parseRoot) // TODO: isn't this a handler directive?
RegisterDirective("tls", parseTLS)
RegisterHandlerDirective("redir", parseRedir)
RegisterHandlerDirective("respond", parseRespond)
RegisterHandlerDirective("route", parseRoute)
RegisterHandlerDirective("handle", parseHandle)
RegisterDirective("handle_errors", parseHandleErrors)
RegisterDirective("log", parseLog)
}
// parseBind parses the bind directive. Syntax:
//
// bind <addresses...>
//
func parseBind(h Helper) ([]ConfigValue, error) {
var lnHosts []string
for h.Next() {
lnHosts = append(lnHosts, h.RemainingArgs()...)
}
return h.NewBindAddresses(lnHosts), nil
}
// parseRoot parses the root directive. Syntax:
//
// root [<matcher>] <path>
//
func parseRoot(h Helper) ([]ConfigValue, error) {
if !h.Next() {
return nil, h.ArgErr()
}
matcherSet, ok, err := h.MatcherToken()
if err != nil {
return nil, err
}
if !ok {
// no matcher token; oops
h.Dispenser.Prev()
}
if !h.NextArg() {
return nil, h.ArgErr()
}
root := h.Val()
if h.NextArg() {
return nil, h.ArgErr()
}
varsHandler := caddyhttp.VarsMiddleware{"root": root}
route := caddyhttp.Route{
HandlersRaw: []json.RawMessage{
caddyconfig.JSONModuleObject(varsHandler, "handler", "vars", nil),
},
}
if matcherSet != nil {
route.MatcherSetsRaw = []caddy.ModuleMap{matcherSet}
}
return []ConfigValue{{Class: "route", Value: route}}, nil
}
// parseTLS parses the tls directive. Syntax:
//
// tls [<email>]|[<cert_file> <key_file>] {
// protocols <min> [<max>]
// ciphers <cipher_suites...>
// curves <curves...>
// alpn <values...>
// load <paths...>
// ca <acme_ca_endpoint>
// dns <provider_name>
// }
//
func parseTLS(h Helper) ([]ConfigValue, error) {
var configVals []ConfigValue
var cp *caddytls.ConnectionPolicy
var fileLoader caddytls.FileLoader
var folderLoader caddytls.FolderLoader
var mgr caddytls.ACMEManagerMaker
// fill in global defaults, if configured
if email := h.Option("email"); email != nil {
mgr.Email = email.(string)
}
if acmeCA := h.Option("acme_ca"); acmeCA != nil {
mgr.CA = acmeCA.(string)
}
if caPemFile := h.Option("acme_ca_root"); caPemFile != nil {
mgr.TrustedRootsPEMFiles = append(mgr.TrustedRootsPEMFiles, caPemFile.(string))
}
for h.Next() {
// file certificate loader
firstLine := h.RemainingArgs()
switch len(firstLine) {
case 0:
case 1:
if !strings.Contains(firstLine[0], "@") {
return nil, h.Err("single argument must be an email address")
}
mgr.Email = firstLine[0]
case 2:
certFilename := firstLine[0]
keyFilename := firstLine[1]
// tag this certificate so if multiple certs match, specifically
// this one that the user has provided will be used, see #2588:
// https://github.com/caddyserver/caddy/issues/2588 ... but we
// must be careful about how we do this; being careless will
// lead to failed handshakes
// we need to remember which cert files we've seen, since we
// must load each cert only once; otherwise, they each get a
// different tag... since a cert loaded twice has the same
// bytes, it will overwrite the first one in the cache, and
// only the last cert (and its tag) will survive, so a any conn
// policy that is looking for any tag but the last one to be
// loaded won't find it, and TLS handshakes will fail (see end)
// of issue #3004)
tag, ok := tlsCertTags[certFilename]
if !ok {
// haven't seen this cert file yet, let's give it a tag
// and add a loader for it
tag = fmt.Sprintf("cert%d", len(tlsCertTags))
fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
Certificate: certFilename,
Key: keyFilename,
Tags: []string{tag},
})
// remember this for next time we see this cert file
tlsCertTags[certFilename] = tag
}
certSelector := caddytls.CustomCertSelectionPolicy{Tag: tag}
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
cp.CertSelection = caddyconfig.JSONModuleObject(certSelector, "policy", "custom", h.warnings)
default:
return nil, h.ArgErr()
}
var hasBlock bool
for h.NextBlock(0) {
hasBlock = true
switch h.Val() {
// connection policy
case "protocols":
args := h.RemainingArgs()
if len(args) == 0 {
return nil, h.SyntaxErr("one or two protocols")
}
if len(args) > 0 {
if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[0])
}
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
cp.ProtocolMin = args[0]
}
if len(args) > 1 {
if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[1])
}
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
cp.ProtocolMax = args[1]
}
case "ciphers":
for h.NextArg() {
if _, ok := caddytls.SupportedCipherSuites[h.Val()]; !ok {
return nil, h.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
}
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
cp.CipherSuites = append(cp.CipherSuites, h.Val())
}
case "curves":
for h.NextArg() {
if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
}
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
cp.Curves = append(cp.Curves, h.Val())
}
case "alpn":
args := h.RemainingArgs()
if len(args) == 0 {
return nil, h.ArgErr()
}
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
cp.ALPN = args
// certificate folder loader
case "load":
folderLoader = append(folderLoader, h.RemainingArgs()...)
// automation policy
case "ca":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
mgr.CA = arg[0]
// DNS provider for ACME DNS challenge
case "dns":
if !h.Next() {
return nil, h.ArgErr()
}
provName := h.Val()
if mgr.Challenges == nil {
mgr.Challenges = new(caddytls.ChallengesConfig)
}
dnsProvModule, err := caddy.GetModule("tls.dns." + provName)
if err != nil {
return nil, h.Errf("getting DNS provider module named '%s': %v", provName, err)
}
mgr.Challenges.DNSRaw = caddyconfig.JSONModuleObject(dnsProvModule.New(), "provider", provName, h.warnings)
case "ca_root":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
mgr.TrustedRootsPEMFiles = append(mgr.TrustedRootsPEMFiles, arg[0])
default:
return nil, h.Errf("unknown subdirective: %s", h.Val())
}
}
// a naked tls directive is not allowed
if len(firstLine) == 0 && !hasBlock {
return nil, h.ArgErr()
}
}
// certificate loaders
if len(fileLoader) > 0 {
configVals = append(configVals, ConfigValue{
Class: "tls.certificate_loader",
Value: fileLoader,
})
// ensure server uses HTTPS by setting non-nil conn policy
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
}
if len(folderLoader) > 0 {
configVals = append(configVals, ConfigValue{
Class: "tls.certificate_loader",
Value: folderLoader,
})
// ensure server uses HTTPS by setting non-nil conn policy
if cp == nil {
cp = new(caddytls.ConnectionPolicy)
}
}
// connection policy
if cp != nil {
configVals = append(configVals, ConfigValue{
Class: "tls.connection_policy",
Value: cp,
})
}
// automation policy
if !reflect.DeepEqual(mgr, caddytls.ACMEManagerMaker{}) {
configVals = append(configVals, ConfigValue{
Class: "tls.automation_manager",
Value: mgr,
})
}
return configVals, nil
}
// parseRedir parses the redir directive. Syntax:
//
// redir [<matcher>] <to> [<code>]
//
func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
if !h.Next() {
return nil, h.ArgErr()
}
if !h.NextArg() {
return nil, h.ArgErr()
}
to := h.Val()
var code string
if h.NextArg() {
code = h.Val()
}
if code == "permanent" {
code = "301"
}
if code == "temporary" || code == "" {
code = "302"
}
var body string
if code == "html" {
// Script tag comes first since that will better imitate a redirect in the browser's
// history, but the meta tag is a fallback for most non-JS clients.
const metaRedir = `<!DOCTYPE html>
<html>
<head>
<title>Redirecting...</title>
<script>window.location.replace("%s");</script>
<meta http-equiv="refresh" content="0; URL='%s'">
</head>
<body>Redirecting to <a href="%s">%s</a>...</body>
</html>
`
safeTo := html.EscapeString(to)
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
}
return caddyhttp.StaticResponse{
StatusCode: caddyhttp.WeakString(code),
Headers: http.Header{"Location": []string{to}},
Body: body,
}, nil
}
// parseRespond parses the respond directive.
func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) {
sr := new(caddyhttp.StaticResponse)
err := sr.UnmarshalCaddyfile(h.Dispenser)
if err != nil {
return nil, err
}
return sr, nil
}
// parseRoute parses the route directive.
func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) {
sr := new(caddyhttp.Subroute)
for h.Next() {
for nesting := h.Nesting(); h.NextBlock(nesting); {
dir := h.Val()
dirFunc, ok := registeredDirectives[dir]
if !ok {
return nil, h.Errf("unrecognized directive: %s", dir)
}
subHelper := h
subHelper.Dispenser = h.NewFromNextSegment()
results, err := dirFunc(subHelper)
if err != nil {
return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
}
for _, result := range results {
handler, ok := result.Value.(caddyhttp.Route)
if !ok {
return nil, h.Errf("%s directive returned something other than an HTTP route: %#v (only handler directives can be used in routes)", dir, result.Value)
}
sr.Routes = append(sr.Routes, handler)
}
}
}
return sr, nil
}
func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) {
return parseSegmentAsSubroute(h)
}
func parseHandleErrors(h Helper) ([]ConfigValue, error) {
subroute, err := parseSegmentAsSubroute(h)
if err != nil {
return nil, err
}
return []ConfigValue{
{
Class: "error_route",
Value: subroute,
},
}, nil
}
// parseLog parses the log directive. Syntax:
//
// log {
// output <writer_module> ...
// format <encoder_module> ...
// level <level>
// }
//
func parseLog(h Helper) ([]ConfigValue, error) {
var configValues []ConfigValue
for h.Next() {
cl := new(caddy.CustomLog)
for h.NextBlock(0) {
switch h.Val() {
case "output":
if !h.NextArg() {
return nil, h.ArgErr()
}
moduleName := h.Val()
// can't use the usual caddyfile.Unmarshaler flow with the
// standard writers because they are in the caddy package
// (because they are the default) and implementing that
// interface there would unfortunately create circular import
var wo caddy.WriterOpener
switch moduleName {
case "stdout":
wo = caddy.StdoutWriter{}
case "stderr":
wo = caddy.StderrWriter{}
case "discard":
wo = caddy.DiscardWriter{}
default:
mod, err := caddy.GetModule("caddy.logging.writers." + moduleName)
if err != nil {
return nil, h.Errf("getting log writer module named '%s': %v", moduleName, err)
}
unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return nil, h.Errf("log writer module '%s' is not a Caddyfile unmarshaler", mod)
}
err = unm.UnmarshalCaddyfile(h.NewFromNextSegment())
if err != nil {
return nil, err
}
wo, ok = unm.(caddy.WriterOpener)
if !ok {
return nil, h.Errf("module %s is not a WriterOpener", mod)
}
}
cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings)
case "format":
if !h.NextArg() {
return nil, h.ArgErr()
}
moduleName := h.Val()
mod, err := caddy.GetModule("caddy.logging.encoders." + moduleName)
if err != nil {
return nil, h.Errf("getting log encoder module named '%s': %v", moduleName, err)
}
unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return nil, h.Errf("log encoder module '%s' is not a Caddyfile unmarshaler", mod)
}
err = unm.UnmarshalCaddyfile(h.NewFromNextSegment())
if err != nil {
return nil, err
}
enc, ok := unm.(zapcore.Encoder)
if !ok {
return nil, h.Errf("module %s is not a zapcore.Encoder", mod)
}
cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings)
case "level":
if !h.NextArg() {
return nil, h.ArgErr()
}
cl.Level = h.Val()
if h.NextArg() {
return nil, h.ArgErr()
}
default:
return nil, h.Errf("unrecognized subdirective: %s", h.Val())
}
}
var val namedCustomLog
if !reflect.DeepEqual(cl, new(caddy.CustomLog)) {
cl.Include = []string{"http.log.access"}
val.name = fmt.Sprintf("log%d", logCounter)
val.log = cl
logCounter++
}
configValues = append(configValues, ConfigValue{
Class: "custom_log",
Value: val,
})
}
return configValues, nil
}
// tlsCertTags maps certificate filenames to their tag.
// This is used to remember which tag is used for each
// certificate files, since we need to avoid loading
// the same certificate files more than once, overwriting
// previous tags
var tlsCertTags = make(map[string]string)
var logCounter int
| 1 | 14,232 | If this value doesn't exist (do an `, ok := ...` check), we should initialize and store it | caddyserver-caddy | go |
@@ -1,6 +1,3 @@
-import { or } from 'ramda';
-
-
-const MIN_SAFE_INTEGER = or(Number.MIN_SAFE_INTEGER, -(2 ** 53) - 1);
+const MIN_SAFE_INTEGER = Number.MIN_SAFE_INTEGER || -(2 ** 53) - 1;
export default MIN_SAFE_INTEGER; | 1 | import { or } from 'ramda';
const MIN_SAFE_INTEGER = or(Number.MIN_SAFE_INTEGER, -(2 ** 53) - 1);
export default MIN_SAFE_INTEGER;
| 1 | 5,218 | Would use parenthesis to explicitly state the associations of operands ```js const MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || (-(2 ** 53) - 1) | char0n-ramda-adjunct | js |
@@ -175,8 +175,7 @@ class Serial(IoBase):
self._origTimeout = self._ser.timeout
# We don't want a timeout while we're waiting for data.
self._setTimeout(None)
- self.inWaiting = self._ser.inWaiting
- super(Serial, self).__init__(self._ser.hComPort, onReceive)
+ super(Serial, self).__init__(self._ser._port_handle, onReceive)
def read(self, size=1):
data = self._ser.read(size) | 1 | #hwIo.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2015-2018 NV Access Limited, Babbage B.V.
"""Raw input/output for braille displays via serial and HID.
See the L{Serial} and L{Hid} classes.
Braille display drivers must be thread-safe to use this, as it utilises a background thread.
See L{braille.BrailleDisplayDriver.isThreadSafe}.
"""
import threading
import ctypes
from ctypes import byref
from ctypes.wintypes import DWORD, USHORT
import serial
from serial.win32 import MAXDWORD, OVERLAPPED, FILE_FLAG_OVERLAPPED, INVALID_HANDLE_VALUE, ERROR_IO_PENDING, COMMTIMEOUTS, CreateFile, SetCommTimeouts
import winKernel
import braille
from logHandler import log
import config
import time
LPOVERLAPPED_COMPLETION_ROUTINE = ctypes.WINFUNCTYPE(None, DWORD, DWORD, serial.win32.LPOVERLAPPED)
def _isDebug():
return config.conf["debugLog"]["hwIo"]
class IoBase(object):
"""Base class for raw I/O.
This watches for data of a specified size and calls a callback when it is received.
"""
def __init__(self, fileHandle, onReceive, writeFileHandle=None, onReceiveSize=1, writeSize=None):
"""Constructor.
@param readFileHandle: A handle to an open I/O device opened for overlapped I/O.
If L{writeFileHandle} is specified, this is only for input
@param onReceive: A callable taking the received data as its only argument.
@type onReceive: callable(str)
@param writeFileHandle: A handle to an open output device opened for overlapped I/O.
@param onReceiveSize: The size (in bytes) of the data with which to call C{onReceive}.
@type onReceiveSize: int
@param writeSize: The size of the buffer for writes,
C{None} to use the length of the data written.
@param writeSize: int or None
"""
self._file = fileHandle
self._writeFile = writeFileHandle if writeFileHandle is not None else fileHandle
self._onReceive = onReceive
self._readSize = onReceiveSize
self._writeSize = writeSize
self._readBuf = ctypes.create_string_buffer(onReceiveSize)
self._readOl = OVERLAPPED()
self._recvEvt = winKernel.createEvent()
self._ioDoneInst = LPOVERLAPPED_COMPLETION_ROUTINE(self._ioDone)
self._writeOl = OVERLAPPED()
# Do the initial read.
@winKernel.PAPCFUNC
def init(param):
self._initApc = None
self._asyncRead()
# Ensure the APC stays alive until it runs.
self._initApc = init
braille._BgThread.queueApc(init)
def waitForRead(self, timeout):
"""Wait for a chunk of data to be received and processed.
This will return after L{onReceive} has been called or when the timeout elapses.
@param timeout: The maximum time to wait in seconds.
@type timeout: int or float
@return: C{True} if received data was processed before the timeout,
C{False} if not.
@rtype: bool
"""
timeout= int(timeout*1000)
while True:
curTime = time.time()
res = winKernel.waitForSingleObjectEx(self._recvEvt, timeout, True)
if res==winKernel.WAIT_OBJECT_0:
return True
elif res==winKernel.WAIT_TIMEOUT:
if _isDebug():
log.debug("Wait timed out")
return False
elif res==winKernel.WAIT_IO_COMPLETION:
if _isDebug():
log.debug("Waiting interrupted by completed i/o")
timeout -= int((time.time()-curTime)*1000)
def write(self, data):
if _isDebug():
log.debug("Write: %r" % data)
size = self._writeSize or len(data)
buf = ctypes.create_string_buffer(size)
buf.raw = data
if not ctypes.windll.kernel32.WriteFile(self._writeFile, data, size, None, byref(self._writeOl)):
if ctypes.GetLastError() != ERROR_IO_PENDING:
if _isDebug():
log.debug("Write failed: %s" % ctypes.WinError())
raise ctypes.WinError()
bytes = DWORD()
ctypes.windll.kernel32.GetOverlappedResult(self._writeFile, byref(self._writeOl), byref(bytes), True)
def close(self):
if _isDebug():
log.debug("Closing")
self._onReceive = None
if hasattr(self, "_file") and self._file is not INVALID_HANDLE_VALUE:
ctypes.windll.kernel32.CancelIoEx(self._file, byref(self._readOl))
if hasattr(self, "_writeFile") and self._writeFile not in (self._file, INVALID_HANDLE_VALUE):
ctypes.windll.kernel32.CancelIoEx(self._writeFile, byref(self._readOl))
winKernel.closeHandle(self._recvEvt)
def __del__(self):
try:
self.close()
except AttributeError:
if _isDebug():
log.debugWarning("Couldn't delete object gracefully", exc_info=True)
def _asyncRead(self):
# Wait for _readSize bytes of data.
# _ioDone will call onReceive once it is received.
# onReceive can then optionally read additional bytes if it knows these are coming.
ctypes.windll.kernel32.ReadFileEx(self._file, self._readBuf, self._readSize, byref(self._readOl), self._ioDoneInst)
def _ioDone(self, error, bytes, overlapped):
if not self._onReceive:
# close has been called.
self._ioDone = None
return
elif error != 0:
raise ctypes.WinError(error)
self._notifyReceive(self._readBuf[:bytes])
winKernel.kernel32.SetEvent(self._recvEvt)
self._asyncRead()
def _notifyReceive(self, data):
"""Called when data is received.
The base implementation just calls the onReceive callback provided to the constructor.
This can be extended to perform tasks before/after the callback.
"""
if _isDebug():
log.debug("Read: %r" % data)
try:
self._onReceive(data)
except:
log.error("", exc_info=True)
class Serial(IoBase):
"""Raw I/O for serial devices.
This extends pyserial to call a callback when data is received.
"""
def __init__(self, *args, **kwargs):
"""Constructor.
Pass the arguments you would normally pass to L{serial.Serial}.
There is also one additional keyword argument.
@param onReceive: A callable taking a byte of received data as its only argument.
This callable can then call C{read} to get additional data if desired.
@type onReceive: callable(str)
"""
onReceive = kwargs.pop("onReceive")
self._ser = None
self.port = args[0] if len(args) >= 1 else kwargs["port"]
if _isDebug():
log.debug("Opening port %s" % self.port)
try:
self._ser = serial.Serial(*args, **kwargs)
except Exception as e:
if _isDebug():
log.debug("Open failed: %s" % e)
raise
self._origTimeout = self._ser.timeout
# We don't want a timeout while we're waiting for data.
self._setTimeout(None)
self.inWaiting = self._ser.inWaiting
super(Serial, self).__init__(self._ser.hComPort, onReceive)
def read(self, size=1):
data = self._ser.read(size)
if _isDebug():
log.debug("Read: %r" % data)
return data
def write(self, data):
if _isDebug():
log.debug("Write: %r" % data)
self._ser.write(data)
def close(self):
if not self._ser:
return
super(Serial, self).close()
self._ser.close()
def _notifyReceive(self, data):
# Set the timeout for onReceive in case it does a sync read.
self._setTimeout(self._origTimeout)
super(Serial, self)._notifyReceive(data)
self._setTimeout(None)
def _setTimeout(self, timeout):
# #6035: pyserial reconfigures all settings of the port when setting a timeout.
# This can cause error 'Cannot configure port, some setting was wrong.'
# Therefore, manually set the timeouts using the Win32 API.
# Adapted from pyserial 3.1.1.
timeouts = COMMTIMEOUTS()
if timeout is not None:
if timeout == 0:
timeouts.ReadIntervalTimeout = win32.MAXDWORD
else:
timeouts.ReadTotalTimeoutConstant = max(int(timeout * 1000), 1)
if timeout != 0 and self._ser._interCharTimeout is not None:
timeouts.ReadIntervalTimeout = max(int(self._ser._interCharTimeout * 1000), 1)
if self._ser._writeTimeout is not None:
if self._ser._writeTimeout == 0:
timeouts.WriteTotalTimeoutConstant = win32.MAXDWORD
else:
timeouts.WriteTotalTimeoutConstant = max(int(self._ser._writeTimeout * 1000), 1)
SetCommTimeouts(self._ser.hComPort, ctypes.byref(timeouts))
class HIDP_CAPS (ctypes.Structure):
_fields_ = (
("Usage", USHORT),
("UsagePage", USHORT),
("InputReportByteLength", USHORT),
("OutputReportByteLength", USHORT),
("FeatureReportByteLength", USHORT),
("Reserved", USHORT * 17),
("NumberLinkCollectionNodes", USHORT),
("NumberInputButtonCaps", USHORT),
("NumberInputValueCaps", USHORT),
("NumberInputDataIndices", USHORT),
("NumberOutputButtonCaps", USHORT),
("NumberOutputValueCaps", USHORT),
("NumberOutputDataIndices", USHORT),
("NumberFeatureButtonCaps", USHORT),
("NumberFeatureValueCaps", USHORT),
("NumberFeatureDataIndices", USHORT)
)
class Hid(IoBase):
"""Raw I/O for HID devices.
"""
def __init__(self, path, onReceive, exclusive=True):
"""Constructor.
@param path: The device path.
This can be retrieved using L{hwPortUtils.listHidDevices}.
@type path: unicode
@param onReceive: A callable taking a received input report as its only argument.
@type onReceive: callable(str)
@param exclusive: Whether to block other application's access to this device.
@type exclusive: bool
"""
if _isDebug():
log.debug("Opening device %s" % path)
handle = CreateFile(
path,
winKernel.GENERIC_READ | winKernel.GENERIC_WRITE,
0 if exclusive else winKernel.FILE_SHARE_READ|winKernel.FILE_SHARE_WRITE,
None,
winKernel.OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None
)
if handle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open failed: %s" % ctypes.WinError())
self._file = None
raise ctypes.WinError()
self._file = handle
pd = ctypes.c_void_p()
if not ctypes.windll.hid.HidD_GetPreparsedData(handle, byref(pd)):
raise ctypes.WinError()
caps = HIDP_CAPS()
ctypes.windll.hid.HidP_GetCaps(pd, byref(caps))
ctypes.windll.hid.HidD_FreePreparsedData(pd)
if _isDebug():
log.debug("Report byte lengths: input %d, output %d, feature %d"
% (caps.InputReportByteLength, caps.OutputReportByteLength,
caps.FeatureReportByteLength))
self._featureSize = caps.FeatureReportByteLength
# Reading any less than caps.InputReportByteLength is an error.
# On Windows 7, writing any less than caps.OutputReportByteLength is also an error.
super(Hid, self).__init__(handle, onReceive,
onReceiveSize=caps.InputReportByteLength,
writeSize=caps.OutputReportByteLength)
def getFeature(self, reportId):
"""Get a feature report from this device.
@param reportId: The report id.
@type reportId: str
@return: The report, including the report id.
@rtype: str
"""
buf = ctypes.create_string_buffer(self._featureSize)
buf[0] = reportId
if not ctypes.windll.hid.HidD_GetFeature(self._file, buf, self._featureSize):
if _isDebug():
log.debug("Get feature %r failed: %s"
% (reportId, ctypes.WinError()))
raise ctypes.WinError()
if _isDebug():
log.debug("Get feature: %r" % buf.raw)
return buf.raw
def setFeature(self, report):
"""Send a feature report to this device.
@param report: The report, including its id.
@type report: str
"""
length = len(report)
buf = ctypes.create_string_buffer(length)
buf.raw = report
if _isDebug():
log.debug("Set feature: %r" % report)
if not ctypes.windll.hid.HidD_SetFeature(self._file, buf, length):
if _isDebug():
log.debug("Set feature failed: %s" % ctypes.WinError())
raise ctypes.WinError()
def setOutputReport(self,report):
"""
Write the given report to the device using HidD_SetOutputReport.
This is instead of using the standard WriteFile which may freeze with some USB HID implementations.
@param report: The report, including its id.
@type report: str
"""
length=len(report)
buf=ctypes.create_string_buffer(length)
buf.raw=report
if _isDebug():
log.debug("Set output report: %r" % report)
if not ctypes.windll.hid.HidD_SetOutputReport(self._writeFile,buf,length):
if _isDebug():
log.debug("Set output report failed: %s" % ctypes.WinError())
raise ctypes.WinError()
def close(self):
if not self._file:
return
super(Hid, self).close()
winKernel.closeHandle(self._file)
self._file = None
class Bulk(IoBase):
"""Raw I/O for bulk USB devices.
This implementation assumes that the used Bulk device has two separate end points for input and output.
"""
def __init__(self, path, epIn, epOut, onReceive, onReceiveSize=1, writeSize=None):
"""Constructor.
@param path: The device path.
@type path: unicode
@param epIn: The endpoint to read data from.
@type epIn: int
@param epOut: The endpoint to write data to.
@type epOut: int
@param onReceive: A callable taking a received input report as its only argument.
@type onReceive: callable(str)
"""
if _isDebug():
log.debug("Opening device %s" % path)
readPath="{path}\\{endpoint}".format(path=path,endpoint=epIn)
writePath="{path}\\{endpoint}".format(path=path,endpoint=epOut)
readHandle = CreateFile(readPath, winKernel.GENERIC_READ,
0, None, winKernel.OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if readHandle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open read handle failed: %s" % ctypes.WinError())
raise ctypes.WinError()
writeHandle = CreateFile(writePath, winKernel.GENERIC_WRITE,
0, None, winKernel.OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if writeHandle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open write handle failed: %s" % ctypes.WinError())
raise ctypes.WinError()
super(Bulk, self).__init__(readHandle, onReceive,
writeFileHandle=writeHandle, onReceiveSize=onReceiveSize,
writeSize=writeSize)
def close(self):
super(Bulk, self).close()
if hasattr(self, "_file") and self._file is not INVALID_HANDLE_VALUE:
winKernel.closeHandle(self._file)
if hasattr(self, "_writeFile") and self._writeFile is not INVALID_HANDLE_VALUE:
winKernel.closeHandle(self._writeFile)
| 1 | 23,415 | This handle is now private to pyserial, but if there is no public function to retrieve it I guess this is the best we can do. | nvaccess-nvda | py |
@@ -581,7 +581,7 @@ uint32_t wlr_xdg_toplevel_set_maximized(struct wlr_xdg_surface *surface,
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.maximized = maximized;
- return schedule_xdg_surface_configure(surface);
+ return wlr_xdg_surface_schedule_configure(surface);
}
uint32_t wlr_xdg_toplevel_set_fullscreen(struct wlr_xdg_surface *surface, | 1 | #define _POSIX_C_SOURCE 200809L
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <wlr/util/log.h>
#include <wlr/util/edges.h>
#include "types/wlr_xdg_shell.h"
#include "util/signal.h"
void handle_xdg_toplevel_ack_configure(
struct wlr_xdg_surface *surface,
struct wlr_xdg_surface_configure *configure) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
assert(configure->toplevel_state != NULL);
surface->toplevel->current.maximized =
configure->toplevel_state->maximized;
surface->toplevel->current.fullscreen =
configure->toplevel_state->fullscreen;
surface->toplevel->current.resizing =
configure->toplevel_state->resizing;
surface->toplevel->current.activated =
configure->toplevel_state->activated;
surface->toplevel->current.tiled =
configure->toplevel_state->tiled;
}
bool compare_xdg_surface_toplevel_state(struct wlr_xdg_toplevel *state) {
struct {
struct wlr_xdg_toplevel_state state;
uint32_t width, height;
} configured;
// is pending state different from current state?
if (!state->base->configured) {
return false;
}
if (wl_list_empty(&state->base->configure_list)) {
// last configure is actually the current state, just use it
configured.state = state->current;
configured.width = state->base->surface->current.width;
configured.height = state->base->surface->current.height;
} else {
struct wlr_xdg_surface_configure *configure =
wl_container_of(state->base->configure_list.prev, configure, link);
configured.state = *configure->toplevel_state;
configured.width = configure->toplevel_state->width;
configured.height = configure->toplevel_state->height;
}
if (state->server_pending.activated != configured.state.activated) {
return false;
}
if (state->server_pending.fullscreen != configured.state.fullscreen) {
return false;
}
if (state->server_pending.maximized != configured.state.maximized) {
return false;
}
if (state->server_pending.resizing != configured.state.resizing) {
return false;
}
if (state->server_pending.tiled != configured.state.tiled) {
return false;
}
if (state->server_pending.width == configured.width &&
state->server_pending.height == configured.height) {
return true;
}
if (state->server_pending.width == 0 && state->server_pending.height == 0) {
return true;
}
return false;
}
void send_xdg_toplevel_configure(struct wlr_xdg_surface *surface,
struct wlr_xdg_surface_configure *configure) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
configure->toplevel_state = malloc(sizeof(*configure->toplevel_state));
if (configure->toplevel_state == NULL) {
wlr_log(WLR_ERROR, "Allocation failed");
wl_resource_post_no_memory(surface->toplevel->resource);
return;
}
*configure->toplevel_state = surface->toplevel->server_pending;
struct wl_array states;
wl_array_init(&states);
if (surface->toplevel->server_pending.maximized) {
uint32_t *s = wl_array_add(&states, sizeof(uint32_t));
if (!s) {
wlr_log(WLR_ERROR, "Could not allocate state for maximized xdg_toplevel");
goto error_out;
}
*s = XDG_TOPLEVEL_STATE_MAXIMIZED;
}
if (surface->toplevel->server_pending.fullscreen) {
uint32_t *s = wl_array_add(&states, sizeof(uint32_t));
if (!s) {
wlr_log(WLR_ERROR, "Could not allocate state for fullscreen xdg_toplevel");
goto error_out;
}
*s = XDG_TOPLEVEL_STATE_FULLSCREEN;
}
if (surface->toplevel->server_pending.resizing) {
uint32_t *s = wl_array_add(&states, sizeof(uint32_t));
if (!s) {
wlr_log(WLR_ERROR, "Could not allocate state for resizing xdg_toplevel");
goto error_out;
}
*s = XDG_TOPLEVEL_STATE_RESIZING;
}
if (surface->toplevel->server_pending.activated) {
uint32_t *s = wl_array_add(&states, sizeof(uint32_t));
if (!s) {
wlr_log(WLR_ERROR, "Could not allocate state for activated xdg_toplevel");
goto error_out;
}
*s = XDG_TOPLEVEL_STATE_ACTIVATED;
}
if (surface->toplevel->server_pending.tiled) {
if (wl_resource_get_version(surface->resource) >=
XDG_TOPLEVEL_STATE_TILED_LEFT_SINCE_VERSION) {
const struct {
enum wlr_edges edge;
enum xdg_toplevel_state state;
} tiled[] = {
{ WLR_EDGE_LEFT, XDG_TOPLEVEL_STATE_TILED_LEFT },
{ WLR_EDGE_RIGHT, XDG_TOPLEVEL_STATE_TILED_RIGHT },
{ WLR_EDGE_TOP, XDG_TOPLEVEL_STATE_TILED_TOP },
{ WLR_EDGE_BOTTOM, XDG_TOPLEVEL_STATE_TILED_BOTTOM },
};
for (size_t i = 0; i < sizeof(tiled)/sizeof(tiled[0]); ++i) {
if ((surface->toplevel->server_pending.tiled &
tiled[i].edge) == 0) {
continue;
}
uint32_t *s = wl_array_add(&states, sizeof(uint32_t));
if (!s) {
wlr_log(WLR_ERROR,
"Could not allocate state for tiled xdg_toplevel");
goto error_out;
}
*s = tiled[i].state;
}
} else if (!surface->toplevel->server_pending.maximized) {
// This version doesn't support tiling, best we can do is make the
// toplevel maximized
uint32_t *s = wl_array_add(&states, sizeof(uint32_t));
if (!s) {
wlr_log(WLR_ERROR,
"Could not allocate state for maximized xdg_toplevel");
goto error_out;
}
*s = XDG_TOPLEVEL_STATE_MAXIMIZED;
}
}
uint32_t width = surface->toplevel->server_pending.width;
uint32_t height = surface->toplevel->server_pending.height;
xdg_toplevel_send_configure(surface->toplevel->resource, width, height,
&states);
wl_array_release(&states);
return;
error_out:
wl_array_release(&states);
wl_resource_post_no_memory(surface->toplevel->resource);
}
void handle_xdg_surface_toplevel_committed(struct wlr_xdg_surface *surface) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
if (!surface->toplevel->added) {
// on the first commit, send a configure request to tell the client it
// is added
schedule_xdg_surface_configure(surface);
surface->toplevel->added = true;
return;
}
// update state that doesn't need compositor approval
surface->toplevel->current.max_width =
surface->toplevel->client_pending.max_width;
surface->toplevel->current.min_width =
surface->toplevel->client_pending.min_width;
surface->toplevel->current.max_height =
surface->toplevel->client_pending.max_height;
surface->toplevel->current.min_height =
surface->toplevel->client_pending.min_height;
}
static const struct xdg_toplevel_interface xdg_toplevel_implementation;
struct wlr_xdg_surface *wlr_xdg_surface_from_toplevel_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &xdg_toplevel_interface,
&xdg_toplevel_implementation));
return wl_resource_get_user_data(resource);
}
static void set_parent(struct wlr_xdg_surface *surface,
struct wlr_xdg_surface *parent);
static void handle_parent_unmap(struct wl_listener *listener, void *data) {
struct wlr_xdg_toplevel *toplevel =
wl_container_of(listener, toplevel, parent_unmap);
set_parent(toplevel->base, toplevel->parent->toplevel->parent);
}
static void set_parent(struct wlr_xdg_surface *surface,
struct wlr_xdg_surface *parent) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
assert(!parent || parent->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
if (surface->toplevel->parent) {
wl_list_remove(&surface->toplevel->parent_unmap.link);
}
surface->toplevel->parent = parent;
if (surface->toplevel->parent) {
surface->toplevel->parent_unmap.notify = handle_parent_unmap;
wl_signal_add(&surface->toplevel->parent->events.unmap,
&surface->toplevel->parent_unmap);
}
wlr_signal_emit_safe(&surface->toplevel->events.set_parent, surface);
}
static void xdg_toplevel_handle_set_parent(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *parent_resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
struct wlr_xdg_surface *parent = NULL;
if (parent_resource != NULL) {
parent = wlr_xdg_surface_from_toplevel_resource(parent_resource);
}
set_parent(surface, parent);
}
static void xdg_toplevel_handle_set_title(struct wl_client *client,
struct wl_resource *resource, const char *title) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
char *tmp;
tmp = strdup(title);
if (tmp == NULL) {
return;
}
free(surface->toplevel->title);
surface->toplevel->title = tmp;
wlr_signal_emit_safe(&surface->toplevel->events.set_title, surface);
}
static void xdg_toplevel_handle_set_app_id(struct wl_client *client,
struct wl_resource *resource, const char *app_id) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
char *tmp;
tmp = strdup(app_id);
if (tmp == NULL) {
return;
}
free(surface->toplevel->app_id);
surface->toplevel->app_id = tmp;
wlr_signal_emit_safe(&surface->toplevel->events.set_app_id, surface);
}
static void xdg_toplevel_handle_show_window_menu(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *seat_resource,
uint32_t serial, int32_t x, int32_t y) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
struct wlr_seat_client *seat =
wlr_seat_client_from_resource(seat_resource);
if (!surface->configured) {
wl_resource_post_error(surface->toplevel->resource,
XDG_SURFACE_ERROR_NOT_CONSTRUCTED,
"surface has not been configured yet");
return;
}
if (!wlr_seat_validate_grab_serial(seat->seat, serial)) {
wlr_log(WLR_DEBUG, "invalid serial for grab");
return;
}
struct wlr_xdg_toplevel_show_window_menu_event event = {
.surface = surface,
.seat = seat,
.serial = serial,
.x = x,
.y = y,
};
wlr_signal_emit_safe(&surface->toplevel->events.request_show_window_menu, &event);
}
static void xdg_toplevel_handle_move(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *seat_resource,
uint32_t serial) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
struct wlr_seat_client *seat =
wlr_seat_client_from_resource(seat_resource);
if (!surface->configured) {
wl_resource_post_error(surface->toplevel->resource,
XDG_SURFACE_ERROR_NOT_CONSTRUCTED,
"surface has not been configured yet");
return;
}
if (!wlr_seat_validate_grab_serial(seat->seat, serial)) {
wlr_log(WLR_DEBUG, "invalid serial for grab");
return;
}
struct wlr_xdg_toplevel_move_event event = {
.surface = surface,
.seat = seat,
.serial = serial,
};
wlr_signal_emit_safe(&surface->toplevel->events.request_move, &event);
}
static void xdg_toplevel_handle_resize(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *seat_resource,
uint32_t serial, uint32_t edges) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
struct wlr_seat_client *seat =
wlr_seat_client_from_resource(seat_resource);
if (!surface->configured) {
wl_resource_post_error(surface->toplevel->resource,
XDG_SURFACE_ERROR_NOT_CONSTRUCTED,
"surface has not been configured yet");
return;
}
if (!wlr_seat_validate_grab_serial(seat->seat, serial)) {
wlr_log(WLR_DEBUG, "invalid serial for grab");
return;
}
struct wlr_xdg_toplevel_resize_event event = {
.surface = surface,
.seat = seat,
.serial = serial,
.edges = edges,
};
wlr_signal_emit_safe(&surface->toplevel->events.request_resize, &event);
}
static void xdg_toplevel_handle_set_max_size(struct wl_client *client,
struct wl_resource *resource, int32_t width, int32_t height) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
surface->toplevel->client_pending.max_width = width;
surface->toplevel->client_pending.max_height = height;
}
static void xdg_toplevel_handle_set_min_size(struct wl_client *client,
struct wl_resource *resource, int32_t width, int32_t height) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
surface->toplevel->client_pending.min_width = width;
surface->toplevel->client_pending.min_height = height;
}
static void xdg_toplevel_handle_set_maximized(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
surface->toplevel->client_pending.maximized = true;
wlr_signal_emit_safe(&surface->toplevel->events.request_maximize, surface);
}
static void xdg_toplevel_handle_unset_maximized(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
surface->toplevel->client_pending.maximized = false;
wlr_signal_emit_safe(&surface->toplevel->events.request_maximize, surface);
}
static void handle_fullscreen_output_destroy(struct wl_listener *listener,
void *data) {
struct wlr_xdg_toplevel_state *state =
wl_container_of(listener, state, fullscreen_output_destroy);
state->fullscreen_output = NULL;
wl_list_remove(&state->fullscreen_output_destroy.link);
}
static void store_fullscreen_pending(struct wlr_xdg_surface *surface,
bool fullscreen, struct wlr_output *output) {
struct wlr_xdg_toplevel_state *state = &surface->toplevel->client_pending;
state->fullscreen = fullscreen;
if (state->fullscreen_output) {
wl_list_remove(&state->fullscreen_output_destroy.link);
}
state->fullscreen_output = output;
if (state->fullscreen_output) {
state->fullscreen_output_destroy.notify =
handle_fullscreen_output_destroy;
wl_signal_add(&state->fullscreen_output->events.destroy,
&state->fullscreen_output_destroy);
}
}
static void xdg_toplevel_handle_set_fullscreen(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *output_resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
struct wlr_output *output = NULL;
if (output_resource != NULL) {
output = wlr_output_from_resource(output_resource);
}
store_fullscreen_pending(surface, true, output);
struct wlr_xdg_toplevel_set_fullscreen_event event = {
.surface = surface,
.fullscreen = true,
.output = output,
};
wlr_signal_emit_safe(&surface->toplevel->events.request_fullscreen, &event);
}
static void xdg_toplevel_handle_unset_fullscreen(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
store_fullscreen_pending(surface, false, NULL);
struct wlr_xdg_toplevel_set_fullscreen_event event = {
.surface = surface,
.fullscreen = false,
.output = NULL,
};
wlr_signal_emit_safe(&surface->toplevel->events.request_fullscreen, &event);
}
static void xdg_toplevel_handle_set_minimized(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
wlr_signal_emit_safe(&surface->toplevel->events.request_minimize, surface);
}
static void xdg_toplevel_handle_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static const struct xdg_toplevel_interface xdg_toplevel_implementation = {
.destroy = xdg_toplevel_handle_destroy,
.set_parent = xdg_toplevel_handle_set_parent,
.set_title = xdg_toplevel_handle_set_title,
.set_app_id = xdg_toplevel_handle_set_app_id,
.show_window_menu = xdg_toplevel_handle_show_window_menu,
.move = xdg_toplevel_handle_move,
.resize = xdg_toplevel_handle_resize,
.set_max_size = xdg_toplevel_handle_set_max_size,
.set_min_size = xdg_toplevel_handle_set_min_size,
.set_maximized = xdg_toplevel_handle_set_maximized,
.unset_maximized = xdg_toplevel_handle_unset_maximized,
.set_fullscreen = xdg_toplevel_handle_set_fullscreen,
.unset_fullscreen = xdg_toplevel_handle_unset_fullscreen,
.set_minimized = xdg_toplevel_handle_set_minimized,
};
static void xdg_toplevel_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_xdg_surface *surface =
wlr_xdg_surface_from_toplevel_resource(resource);
destroy_xdg_toplevel(surface);
}
const struct wlr_surface_role xdg_toplevel_surface_role = {
.name = "xdg_toplevel",
.commit = handle_xdg_surface_commit,
.precommit = handle_xdg_surface_precommit,
};
void create_xdg_toplevel(struct wlr_xdg_surface *xdg_surface,
uint32_t id) {
if (!wlr_surface_set_role(xdg_surface->surface, &xdg_toplevel_surface_role,
xdg_surface, xdg_surface->resource, XDG_WM_BASE_ERROR_ROLE)) {
return;
}
if (xdg_surface->role != WLR_XDG_SURFACE_ROLE_NONE) {
wl_resource_post_error(xdg_surface->resource,
XDG_SURFACE_ERROR_ALREADY_CONSTRUCTED,
"xdg-surface has already been constructed");
return;
}
assert(xdg_surface->toplevel == NULL);
xdg_surface->toplevel = calloc(1, sizeof(struct wlr_xdg_toplevel));
if (xdg_surface->toplevel == NULL) {
wl_resource_post_no_memory(xdg_surface->resource);
return;
}
xdg_surface->toplevel->base = xdg_surface;
wl_signal_init(&xdg_surface->toplevel->events.request_maximize);
wl_signal_init(&xdg_surface->toplevel->events.request_fullscreen);
wl_signal_init(&xdg_surface->toplevel->events.request_minimize);
wl_signal_init(&xdg_surface->toplevel->events.request_move);
wl_signal_init(&xdg_surface->toplevel->events.request_resize);
wl_signal_init(&xdg_surface->toplevel->events.request_show_window_menu);
wl_signal_init(&xdg_surface->toplevel->events.set_parent);
wl_signal_init(&xdg_surface->toplevel->events.set_title);
wl_signal_init(&xdg_surface->toplevel->events.set_app_id);
xdg_surface->toplevel->resource = wl_resource_create(
xdg_surface->client->client, &xdg_toplevel_interface,
wl_resource_get_version(xdg_surface->resource), id);
if (xdg_surface->toplevel->resource == NULL) {
free(xdg_surface->toplevel);
wl_resource_post_no_memory(xdg_surface->resource);
return;
}
wl_resource_set_implementation(xdg_surface->toplevel->resource,
&xdg_toplevel_implementation, xdg_surface,
xdg_toplevel_handle_resource_destroy);
xdg_surface->role = WLR_XDG_SURFACE_ROLE_TOPLEVEL;
}
void destroy_xdg_toplevel(struct wlr_xdg_surface *xdg_surface) {
if (xdg_surface == NULL) {
return;
}
assert(xdg_surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
reset_xdg_surface(xdg_surface);
}
uint32_t wlr_xdg_toplevel_set_size(struct wlr_xdg_surface *surface,
uint32_t width, uint32_t height) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.width = width;
surface->toplevel->server_pending.height = height;
return schedule_xdg_surface_configure(surface);
}
uint32_t wlr_xdg_toplevel_set_activated(struct wlr_xdg_surface *surface,
bool activated) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.activated = activated;
return schedule_xdg_surface_configure(surface);
}
uint32_t wlr_xdg_toplevel_set_maximized(struct wlr_xdg_surface *surface,
bool maximized) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.maximized = maximized;
return schedule_xdg_surface_configure(surface);
}
uint32_t wlr_xdg_toplevel_set_fullscreen(struct wlr_xdg_surface *surface,
bool fullscreen) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.fullscreen = fullscreen;
return schedule_xdg_surface_configure(surface);
}
uint32_t wlr_xdg_toplevel_set_resizing(struct wlr_xdg_surface *surface,
bool resizing) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.resizing = resizing;
return schedule_xdg_surface_configure(surface);
}
uint32_t wlr_xdg_toplevel_set_tiled(struct wlr_xdg_surface *surface,
uint32_t tiled) {
assert(surface->role == WLR_XDG_SURFACE_ROLE_TOPLEVEL);
surface->toplevel->server_pending.tiled = tiled;
return schedule_xdg_surface_configure(surface);
}
| 1 | 15,901 | This function is what **the compositor** calls when it wants to fullscreen a client, not what the client calls. | swaywm-wlroots | c |
@@ -98,12 +98,13 @@ func TestTraceflow(t *testing.T) {
t.Fatal(err)
}
- // Creates 4 traceflows:
+ // Creates 6 traceflows:
// 1. node1Pods[0] -> node1Pods[1], intra node1.
// 2. node1Pods[0] -> node2Pods[0], inter node1 and node2.
// 3. node1Pods[0] -> node1IPs[1], intra node1.
// 4. node1Pods[0] -> node2IPs[0], inter node1 and node2.
// 5. node1Pods[0] -> service, inter node1 and node2.
+ // 6. node1Pods[0] -> not existed Pod
testcases := []testcase{
{
name: "intraNodeTraceflow", | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/vmware-tanzu/antrea/pkg/apis/ops/v1alpha1"
)
type testcase struct {
name string
tf *v1alpha1.Traceflow
expectedPhase v1alpha1.TraceflowPhase
expectedResults []v1alpha1.NodeResult
}
// TestTraceflow verifies if traceflow can trace intra/inter nodes traffic with some NetworkPolicies set.
func TestTraceflow(t *testing.T) {
skipIfProviderIs(t, "kind", "Inter nodes test needs Geneve tunnel")
skipIfNumNodesLessThan(t, 2)
data, err := setupTest(t)
if err != nil {
t.Fatalf("Error when setting up test: %v", err)
}
defer teardownTest(t, data)
if err = data.enableTraceflow(t); err != nil {
t.Fatal("Error when enabling Traceflow")
}
node1 := nodeName(0)
node2 := nodeName(1)
node1Pods, node1IPs, node1CleanupFn := createTestBusyboxPods(t, data, 2, node1)
node2Pods, node2IPs, node2CleanupFn := createTestBusyboxPods(t, data, 1, node2)
defer node1CleanupFn()
defer node2CleanupFn()
require.NoError(t, data.createNginxPod("nginx", node2))
nginxIP, err := data.podWaitForIP(defaultTimeout, "nginx", testNamespace)
require.NoError(t, err)
svc, err := data.createNginxClusterIPService(false)
require.NoError(t, err)
// Setup 2 NetworkPolicies:
// 1. Allow all egress traffic.
// 2. Deny ingress traffic on pod with label antrea-e2e = node1Pods[1]. So flow node1Pods[0] -> node1Pods[1] will be dropped.
var allowAllEgress *networkingv1.NetworkPolicy
allowAllEgressName := "test-networkpolicy-allow-all-egress"
if allowAllEgress, err = data.createNPAllowAllEgress(allowAllEgressName); err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(allowAllEgress); err != nil {
t.Errorf("Error when deleting network policy: %v", err)
}
}()
var denyAllIngress *networkingv1.NetworkPolicy
denyAllIngressName := "test-networkpolicy-deny-ingress"
if denyAllIngress, err = data.createNPDenyAllIngress("antrea-e2e", node1Pods[1], denyAllIngressName); err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(denyAllIngress); err != nil {
t.Errorf("Error when deleting network policy: %v", err)
}
}()
antreaPod, err := data.getAntreaPodOnNode(node1)
if err = data.waitForNetworkpolicyRealized(antreaPod, allowAllEgressName); err != nil {
t.Fatal(err)
}
if err = data.waitForNetworkpolicyRealized(antreaPod, denyAllIngressName); err != nil {
t.Fatal(err)
}
// Creates 4 traceflows:
// 1. node1Pods[0] -> node1Pods[1], intra node1.
// 2. node1Pods[0] -> node2Pods[0], inter node1 and node2.
// 3. node1Pods[0] -> node1IPs[1], intra node1.
// 4. node1Pods[0] -> node2IPs[0], inter node1 and node2.
// 5. node1Pods[0] -> service, inter node1 and node2.
testcases := []testcase{
{
name: "intraNodeTraceflow",
tf: &v1alpha1.Traceflow{
ObjectMeta: metav1.ObjectMeta{
Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node1Pods[1])),
},
Spec: v1alpha1.TraceflowSpec{
Source: v1alpha1.Source{
Namespace: testNamespace,
Pod: node1Pods[0],
},
Destination: v1alpha1.Destination{
Namespace: testNamespace,
Pod: node1Pods[1],
},
Packet: v1alpha1.Packet{
IPHeader: v1alpha1.IPHeader{
Protocol: 6,
},
TransportHeader: v1alpha1.TransportHeader{
TCP: &v1alpha1.TCPHeader{
DstPort: 80,
Flags: 2,
},
},
},
},
},
expectedPhase: v1alpha1.Succeeded,
expectedResults: []v1alpha1.NodeResult{
{
Node: node1,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.SpoofGuard,
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "EgressRule",
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "IngressDefaultRule",
Action: v1alpha1.Dropped,
},
},
},
},
},
{
name: "interNodeTraceflow",
tf: &v1alpha1.Traceflow{
ObjectMeta: metav1.ObjectMeta{
Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node2Pods[0])),
},
Spec: v1alpha1.TraceflowSpec{
Source: v1alpha1.Source{
Namespace: testNamespace,
Pod: node1Pods[0],
},
Destination: v1alpha1.Destination{
Namespace: testNamespace,
Pod: node2Pods[0],
},
Packet: v1alpha1.Packet{
IPHeader: v1alpha1.IPHeader{
Protocol: 6,
},
TransportHeader: v1alpha1.TransportHeader{
TCP: &v1alpha1.TCPHeader{
DstPort: 80,
Flags: 2,
},
},
},
},
},
expectedPhase: v1alpha1.Succeeded,
expectedResults: []v1alpha1.NodeResult{
{
Node: node1,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.SpoofGuard,
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "EgressRule",
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Output",
Action: v1alpha1.Forwarded,
},
},
},
{
Node: node2,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Classification",
Action: v1alpha1.Received,
},
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Output",
Action: v1alpha1.Delivered,
},
},
},
},
},
{
name: "intraNodeUDPDstIPTraceflow",
tf: &v1alpha1.Traceflow{
ObjectMeta: metav1.ObjectMeta{
Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node1IPs[1])),
},
Spec: v1alpha1.TraceflowSpec{
Source: v1alpha1.Source{
Namespace: testNamespace,
Pod: node1Pods[0],
},
Destination: v1alpha1.Destination{
IP: node1IPs[1],
},
Packet: v1alpha1.Packet{
IPHeader: v1alpha1.IPHeader{
Protocol: 17,
},
TransportHeader: v1alpha1.TransportHeader{
UDP: &v1alpha1.UDPHeader{
DstPort: 321,
},
},
},
},
},
expectedPhase: v1alpha1.Succeeded,
expectedResults: []v1alpha1.NodeResult{
{
Node: node1,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.SpoofGuard,
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "EgressRule",
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "IngressDefaultRule",
Action: v1alpha1.Dropped,
},
},
},
},
},
{
name: "interNodeICMPDstIPTraceflow",
tf: &v1alpha1.Traceflow{
ObjectMeta: metav1.ObjectMeta{
Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node2IPs[0])),
},
Spec: v1alpha1.TraceflowSpec{
Source: v1alpha1.Source{
Namespace: testNamespace,
Pod: node1Pods[0],
},
Destination: v1alpha1.Destination{
IP: node2IPs[0],
},
Packet: v1alpha1.Packet{
IPHeader: v1alpha1.IPHeader{
Protocol: 1,
},
},
},
},
expectedPhase: v1alpha1.Succeeded,
expectedResults: []v1alpha1.NodeResult{
{
Node: node1,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.SpoofGuard,
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "EgressRule",
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Output",
Action: v1alpha1.Forwarded,
},
},
},
{
Node: node2,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Classification",
Action: v1alpha1.Received,
},
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Output",
Action: v1alpha1.Delivered,
},
},
},
},
},
{
name: "serviceTraceflow",
tf: &v1alpha1.Traceflow{
ObjectMeta: metav1.ObjectMeta{
Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", testNamespace, node1Pods[0], svc.Name)),
},
Spec: v1alpha1.TraceflowSpec{
Source: v1alpha1.Source{
Namespace: testNamespace,
Pod: node1Pods[0],
},
Destination: v1alpha1.Destination{
Namespace: testNamespace,
Service: svc.Name,
},
Packet: v1alpha1.Packet{
IPHeader: v1alpha1.IPHeader{
Protocol: 6,
},
TransportHeader: v1alpha1.TransportHeader{
TCP: &v1alpha1.TCPHeader{
DstPort: 80,
Flags: 2,
},
},
},
},
},
expectedPhase: v1alpha1.Succeeded,
expectedResults: []v1alpha1.NodeResult{
{
Node: node1,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.SpoofGuard,
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.LB,
Pod: fmt.Sprintf("%s/%s", testNamespace, "nginx"),
TranslatedDstIP: nginxIP,
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.NetworkPolicy,
ComponentInfo: "EgressRule",
Action: v1alpha1.Forwarded,
},
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Output",
Action: v1alpha1.Forwarded,
},
},
},
{
Node: node2,
Observations: []v1alpha1.Observation{
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Classification",
Action: v1alpha1.Received,
},
{
Component: v1alpha1.Forwarding,
ComponentInfo: "Output",
Action: v1alpha1.Delivered,
},
},
},
},
},
}
t.Run("traceflowGroupTest", func(t *testing.T) {
for _, tc := range testcases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if _, err := data.crdClient.OpsV1alpha1().Traceflows().Create(context.TODO(), tc.tf, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error when creating traceflow: %v", err)
}
defer func() {
if err := data.crdClient.OpsV1alpha1().Traceflows().Delete(context.TODO(), tc.tf.Name, metav1.DeleteOptions{}); err != nil {
t.Errorf("Error when deleting traceflow: %v", err)
}
}()
tf, err := data.waitForTraceflow(tc.tf.Name, tc.expectedPhase)
if err != nil {
t.Fatalf("Error: Get Traceflow failed: %v", err)
return
}
if len(tf.Status.Results) != len(tc.expectedResults) {
t.Fatalf("Error: Traceflow Results should be %v, but got %v", tc.expectedResults, tf.Status.Results)
return
}
if len(tc.expectedResults) == 1 {
if err = compareObservations(tc.expectedResults[0], tf.Status.Results[0]); err != nil {
t.Fatal(err)
return
}
} else {
if tf.Status.Results[0].Observations[0].Component == v1alpha1.SpoofGuard {
if err = compareObservations(tc.expectedResults[0], tf.Status.Results[0]); err != nil {
t.Fatal(err)
return
}
if err = compareObservations(tc.expectedResults[1], tf.Status.Results[1]); err != nil {
t.Fatal(err)
return
}
} else {
if err = compareObservations(tc.expectedResults[0], tf.Status.Results[1]); err != nil {
t.Fatal(err)
return
}
if err = compareObservations(tc.expectedResults[1], tf.Status.Results[0]); err != nil {
t.Fatal(err)
return
}
}
}
})
}
})
}
func (data *TestData) waitForTraceflow(name string, phase v1alpha1.TraceflowPhase) (*v1alpha1.Traceflow, error) {
var tf *v1alpha1.Traceflow
var err error
if err = wait.PollImmediate(1*time.Second, 15*time.Second, func() (bool, error) {
tf, err = data.crdClient.OpsV1alpha1().Traceflows().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil || tf.Status.Phase != phase {
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
return tf, nil
}
func (data *TestData) enableTraceflow(t *testing.T) error {
configMap, err := data.GetAntreaConfigMap(antreaNamespace)
if err != nil {
t.Fatalf("Failed to get ConfigMap: %v", err)
}
// Enable Traceflow in antrea-controller and antrea-agent ConfigMap.
// Use Geneve tunnel.
antreaControllerConf, _ := configMap.Data["antrea-controller.conf"]
antreaControllerConf = strings.Replace(antreaControllerConf, "# Traceflow: false", " Traceflow: true", 1)
configMap.Data["antrea-controller.conf"] = antreaControllerConf
antreaAgentConf, _ := configMap.Data["antrea-agent.conf"]
antreaAgentConf = strings.Replace(antreaAgentConf, "# Traceflow: false", " Traceflow: true", 1)
antreaAgentConf = strings.Replace(antreaAgentConf, "# AntreaProxy: false", " AntreaProxy: true", 1)
antreaAgentConf = strings.Replace(antreaAgentConf, "#tunnelType: geneve", "tunnelType: geneve", 1)
configMap.Data["antrea-agent.conf"] = antreaAgentConf
if _, err := data.clientset.CoreV1().ConfigMaps(antreaNamespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update ConfigMap %s: %v", configMap.Name, err)
}
_, err = data.restartAntreaControllerPod(defaultTimeout)
if err != nil {
return fmt.Errorf("error when restarting antrea-controller Pod: %v", err)
}
err = data.restartAntreaAgentPods(defaultTimeout)
if err != nil {
return fmt.Errorf("error when restarting antrea-agent Pod: %v", err)
}
return nil
}
// compareObservations compares expected results and actual results.
func compareObservations(expected v1alpha1.NodeResult, actual v1alpha1.NodeResult) error {
if expected.Node != actual.Node {
return fmt.Errorf("NodeResult should be on %s, but is on %s", expected.Node, actual.Node)
}
exObs := expected.Observations
acObs := actual.Observations
if len(exObs) != len(acObs) {
return fmt.Errorf("Observations should be %v, but got %v", exObs, acObs)
}
for i := 0; i < len(exObs); i++ {
if exObs[i].Component != acObs[i].Component ||
exObs[i].ComponentInfo != acObs[i].ComponentInfo ||
exObs[i].Pod != acObs[i].Pod ||
exObs[i].TranslatedDstIP != acObs[i].TranslatedDstIP ||
exObs[i].Action != acObs[i].Action {
return fmt.Errorf("Observations should be %v, but got %v", exObs, acObs)
}
}
return nil
}
// createNPDenyAllIngress creates a NetworkPolicy that denies all ingress traffic for pods of specific label.
func (data *TestData) createNPDenyAllIngress(key string, value string, name string) (*networkingv1.NetworkPolicy, error) {
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
key: value,
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
}
return data.createNetworkPolicy(name, spec)
}
// createNPAllowAllEgress creates a NetworkPolicy that allows all egress traffic.
func (data *TestData) createNPAllowAllEgress(name string) (*networkingv1.NetworkPolicy, error) {
spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Egress: []networkingv1.NetworkPolicyEgressRule{
{},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
}
return data.createNetworkPolicy(name, spec)
}
// waitForNetworkpolicyRealized waits for the NetworkPolicy to be realized by the antrea-agent Pod.
func (data *TestData) waitForNetworkpolicyRealized(pod string, networkpolicy string) error {
if err := wait.Poll(200*time.Millisecond, 5*time.Second, func() (bool, error) {
cmds := []string{"antctl", "get", "networkpolicy", networkpolicy, "-n", testNamespace}
if _, stderr, err := runAntctl(pod, cmds, data); err != nil {
if strings.Contains(stderr, "server could not find the requested resource") {
return false, nil
}
return false, err
}
return true, nil
}); err == wait.ErrWaitTimeout {
return fmt.Errorf("NetworkPolicy %s isn't realized in time", networkpolicy)
} else if err != nil {
return fmt.Errorf("Error when executing antctl get NetworkPolicy: %v", err)
}
return nil
}
| 1 | 24,044 | s/not existed Pod/non-existing Pod | antrea-io-antrea | go |
@@ -62,10 +62,10 @@ type ECSStatusDescriber struct {
env string
svc string
- svcDescriber serviceDescriber
- ecsSvcGetter ecsServiceGetter
- cwSvcGetter alarmStatusGetter
- aasSvcGetter autoscalingAlarmNamesGetter
+ ecsSvcDescriber serviceDescriber
+ ecsSvcGetter ecsServiceGetter
+ cwSvcGetter alarmStatusGetter
+ aasSvcGetter autoscalingAlarmNamesGetter
}
// AppRunnerStatusDescriber retrieves status of an AppRunner service. | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package describe
import (
"bytes"
"encoding/json"
"fmt"
"math"
"strings"
"text/tabwriter"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/aws/aas"
"github.com/aws/copilot-cli/internal/pkg/aws/apprunner"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudwatch"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudwatchlogs"
awsECS "github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/ecs"
"github.com/aws/copilot-cli/internal/pkg/term/color"
)
const (
maxAlarmStatusColumnWidth = 30
fmtAppRunnerSvcLogGroupName = "/aws/apprunner/%s/%s/service"
defaultServiceLogsLimit = 10
)
type alarmStatusGetter interface {
AlarmsWithTags(tags map[string]string) ([]cloudwatch.AlarmStatus, error)
AlarmStatus(alarms []string) ([]cloudwatch.AlarmStatus, error)
}
type logGetter interface {
LogEvents(opts cloudwatchlogs.LogEventsOpts) (*cloudwatchlogs.LogEventsOutput, error)
}
type ecsServiceGetter interface {
ServiceTasks(clusterName, serviceName string) ([]*awsECS.Task, error)
Service(clusterName, serviceName string) (*awsECS.Service, error)
}
type serviceDescriber interface {
DescribeService(app, env, svc string) (*ecs.ServiceDesc, error)
}
type apprunnerServiceDescriber interface {
Service() (*apprunner.Service, error)
}
type autoscalingAlarmNamesGetter interface {
ECSServiceAlarmNames(cluster, service string) ([]string, error)
}
// ECSStatusDescriber retrieves status of an ECS service.
type ECSStatusDescriber struct {
app string
env string
svc string
svcDescriber serviceDescriber
ecsSvcGetter ecsServiceGetter
cwSvcGetter alarmStatusGetter
aasSvcGetter autoscalingAlarmNamesGetter
}
// AppRunnerStatusDescriber retrieves status of an AppRunner service.
type AppRunnerStatusDescriber struct {
app string
env string
svc string
svcDescriber apprunnerServiceDescriber
eventsGetter logGetter
}
// ecsServiceStatus contains the status for an ECS service.
type ecsServiceStatus struct {
Service awsECS.ServiceStatus
Tasks []awsECS.TaskStatus `json:"tasks"`
Alarms []cloudwatch.AlarmStatus `json:"alarms"`
}
// apprunnerServiceStatus contains the status for an AppRunner service.
type apprunnerServiceStatus struct {
Service apprunner.Service
LogEvents []*cloudwatchlogs.Event
}
// NewServiceStatusConfig contains fields that initiates ServiceStatus struct.
type NewServiceStatusConfig struct {
App string
Env string
Svc string
ConfigStore ConfigStoreSvc
}
// NewECSStatusDescriber instantiates a new ECSStatusDescriber struct.
func NewECSStatusDescriber(opt *NewServiceStatusConfig) (*ECSStatusDescriber, error) {
env, err := opt.ConfigStore.GetEnvironment(opt.App, opt.Env)
if err != nil {
return nil, fmt.Errorf("get environment %s: %w", opt.Env, err)
}
sess, err := sessions.NewProvider().FromRole(env.ManagerRoleARN, env.Region)
if err != nil {
return nil, fmt.Errorf("session for role %s and region %s: %w", env.ManagerRoleARN, env.Region, err)
}
return &ECSStatusDescriber{
app: opt.App,
env: opt.Env,
svc: opt.Svc,
svcDescriber: ecs.New(sess),
cwSvcGetter: cloudwatch.New(sess),
ecsSvcGetter: awsECS.New(sess),
aasSvcGetter: aas.New(sess),
}, nil
}
// NewAppRunnerStatusDescriber instantiates a new AppRunnerStatusDescriber struct.
func NewAppRunnerStatusDescriber(opt *NewServiceStatusConfig) (*AppRunnerStatusDescriber, error) {
svcDescriber, err := NewAppRunnerServiceDescriber(NewServiceConfig{
App: opt.App,
Env: opt.Env,
Svc: opt.Svc,
ConfigStore: opt.ConfigStore,
})
if err != nil {
return nil, err
}
return &AppRunnerStatusDescriber{
app: opt.App,
env: opt.Env,
svc: opt.Svc,
svcDescriber: svcDescriber,
eventsGetter: cloudwatchlogs.New(svcDescriber.sess),
}, nil
}
// Describe returns status of an ECS service.
func (s *ECSStatusDescriber) Describe() (HumanJSONStringer, error) {
svcDesc, err := s.svcDescriber.DescribeService(s.app, s.env, s.svc)
if err != nil {
return nil, fmt.Errorf("get ECS service description for %s: %w", s.svc, err)
}
service, err := s.ecsSvcGetter.Service(svcDesc.ClusterName, svcDesc.Name)
if err != nil {
return nil, fmt.Errorf("get service %s: %w", svcDesc.Name, err)
}
var taskStatus []awsECS.TaskStatus
for _, task := range svcDesc.Tasks {
status, err := task.TaskStatus()
if err != nil {
return nil, fmt.Errorf("get status for task %s: %w", *task.TaskArn, err)
}
taskStatus = append(taskStatus, *status)
}
var alarms []cloudwatch.AlarmStatus
taggedAlarms, err := s.cwSvcGetter.AlarmsWithTags(map[string]string{
deploy.AppTagKey: s.app,
deploy.EnvTagKey: s.env,
deploy.ServiceTagKey: s.svc,
})
if err != nil {
return nil, fmt.Errorf("get tagged CloudWatch alarms: %w", err)
}
alarms = append(alarms, taggedAlarms...)
autoscalingAlarms, err := s.ecsServiceAutoscalingAlarms(svcDesc.ClusterName, svcDesc.Name)
if err != nil {
return nil, err
}
alarms = append(alarms, autoscalingAlarms...)
return &ecsServiceStatus{
Service: service.ServiceStatus(),
Tasks: taskStatus,
Alarms: alarms,
}, nil
}
// Describe returns status of an AppRunner service.
func (a *AppRunnerStatusDescriber) Describe() (HumanJSONStringer, error) {
svc, err := a.svcDescriber.Service()
if err != nil {
return nil, fmt.Errorf("get AppRunner service description for App Runner service %s in environment %s: %w", a.svc, a.env, err)
}
if err != nil {
return nil, fmt.Errorf("get service id: %w", err)
}
logGroupName := fmt.Sprintf(fmtAppRunnerSvcLogGroupName, svc.Name, svc.ID)
logEventsOpts := cloudwatchlogs.LogEventsOpts{
LogGroup: logGroupName,
Limit: aws.Int64(defaultServiceLogsLimit),
}
logEventsOutput, err := a.eventsGetter.LogEvents(logEventsOpts)
if err != nil {
return nil, fmt.Errorf("get log events for log group %s: %w", logGroupName, err)
}
return &apprunnerServiceStatus{
Service: *svc,
LogEvents: logEventsOutput.Events,
}, nil
}
func (s *ECSStatusDescriber) ecsServiceAutoscalingAlarms(cluster, service string) ([]cloudwatch.AlarmStatus, error) {
alarmNames, err := s.aasSvcGetter.ECSServiceAlarmNames(cluster, service)
if err != nil {
return nil, fmt.Errorf("retrieve auto scaling alarm names for ECS service %s/%s: %w", cluster, service, err)
}
alarms, err := s.cwSvcGetter.AlarmStatus(alarmNames)
if err != nil {
return nil, fmt.Errorf("get auto scaling CloudWatch alarms: %w", err)
}
return alarms, nil
}
// JSONString returns the stringified ecsServiceStatus struct with json format.
func (s *ecsServiceStatus) JSONString() (string, error) {
b, err := json.Marshal(s)
if err != nil {
return "", fmt.Errorf("marshal services: %w", err)
}
return fmt.Sprintf("%s\n", b), nil
}
// JSONString returns the stringified apprunnerServiceStatus struct with json format.
func (a *apprunnerServiceStatus) JSONString() (string, error) {
data := struct {
ARN string `json:"arn"`
Status string `json:"status"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
Source struct {
ImageID string `json:"imageId"`
} `json:"source"`
}{
ARN: a.Service.ServiceARN,
Status: a.Service.Status,
CreatedAt: a.Service.DateCreated,
UpdatedAt: a.Service.DateUpdated,
Source: struct {
ImageID string `json:"imageId"`
}{
ImageID: a.Service.ImageID,
},
}
b, err := json.Marshal(data)
if err != nil {
return "", fmt.Errorf("marshal services: %w", err)
}
return fmt.Sprintf("%s\n", b), nil
}
// HumanString returns the stringified ecsServiceStatus struct with human readable format.
func (s *ecsServiceStatus) HumanString() string {
var b bytes.Buffer
writer := tabwriter.NewWriter(&b, minCellWidth, tabWidth, statusCellPaddingWidth, paddingChar, noAdditionalFormatting)
fmt.Fprint(writer, color.Bold.Sprint("Service Status\n\n"))
writer.Flush()
fmt.Fprintf(writer, " %s %v / %v running tasks (%v pending)\n", statusColor(s.Service.Status),
s.Service.RunningCount, s.Service.DesiredCount, s.Service.DesiredCount-s.Service.RunningCount)
fmt.Fprint(writer, color.Bold.Sprint("\nLast Deployment\n\n"))
writer.Flush()
fmt.Fprintf(writer, " %s\t%s\n", "Updated At", humanizeTime(s.Service.LastDeploymentAt))
fmt.Fprintf(writer, " %s\t%s\n", "Task Definition", s.Service.TaskDefinition)
fmt.Fprint(writer, color.Bold.Sprint("\nTask Status\n\n"))
writer.Flush()
headers := []string{"ID", "Image Digest", "Last Status", "Started At", "Stopped At", "Capacity Provider", "Health Status"}
fmt.Fprintf(writer, " %s\n", strings.Join(headers, "\t"))
fmt.Fprintf(writer, " %s\n", strings.Join(underline(headers), "\t"))
for _, task := range s.Tasks {
fmt.Fprint(writer, task.HumanString())
}
fmt.Fprint(writer, color.Bold.Sprint("\nAlarms\n\n"))
writer.Flush()
headers = []string{"Name", "Condition", "Last Updated", "Health"}
fmt.Fprintf(writer, " %s\n", strings.Join(headers, "\t"))
fmt.Fprintf(writer, " %s\n", strings.Join(underline(headers), "\t"))
for _, alarm := range s.Alarms {
updatedTimeSince := humanizeTime(alarm.UpdatedTimes)
printWithMaxWidth(writer, " %s\t%s\t%s\t%s\n", maxAlarmStatusColumnWidth, alarm.Name, alarm.Condition, updatedTimeSince, alarmHealthColor(alarm.Status))
fmt.Fprintf(writer, " %s\t%s\t%s\t%s\n", "", "", "", "")
}
writer.Flush()
return b.String()
}
// HumanString returns the stringified apprunnerServiceStatus struct with human readable format.
func (a *apprunnerServiceStatus) HumanString() string {
var b bytes.Buffer
writer := tabwriter.NewWriter(&b, minCellWidth, tabWidth, statusCellPaddingWidth, paddingChar, noAdditionalFormatting)
fmt.Fprint(writer, color.Bold.Sprint("Service Status\n\n"))
writer.Flush()
fmt.Fprintf(writer, " Status %s \n", statusColor(a.Service.Status))
fmt.Fprint(writer, color.Bold.Sprint("\nLast Deployment\n\n"))
writer.Flush()
fmt.Fprintf(writer, " %s\t%s\n", "Updated At", humanizeTime(a.Service.DateUpdated))
serviceID := fmt.Sprintf("%s/%s", a.Service.Name, a.Service.ID)
fmt.Fprintf(writer, " %s\t%s\n", "Service ID", serviceID)
imageID := a.Service.ImageID
if strings.Contains(a.Service.ImageID, "/") {
imageID = strings.SplitAfterN(imageID, "/", 2)[1] // strip the registry.
}
fmt.Fprintf(writer, " %s\t%s\n", "Source", imageID)
writer.Flush()
fmt.Fprint(writer, color.Bold.Sprint("\nSystem Logs\n\n"))
writer.Flush()
lo, _ := time.LoadLocation("UTC")
for _, event := range a.LogEvents {
timestamp := time.Unix(event.Timestamp/1000, 0).In(lo)
fmt.Fprintf(writer, " %v\t%s\n", timestamp.Format(time.RFC3339), event.Message)
}
writer.Flush()
return b.String()
}
func printWithMaxWidth(w *tabwriter.Writer, format string, width int, members ...string) {
columns := make([][]string, len(members))
maxNumOfLinesPerCol := 0
for ind, member := range members {
var column []string
builder := new(strings.Builder)
// https://stackoverflow.com/questions/25686109/split-string-by-length-in-golang
for i, r := range []rune(member) {
builder.WriteRune(r)
if i > 0 && (i+1)%width == 0 {
column = append(column, builder.String())
builder.Reset()
}
}
if builder.String() != "" {
column = append(column, builder.String())
}
maxNumOfLinesPerCol = int(math.Max(float64(len(column)), float64(maxNumOfLinesPerCol)))
columns[ind] = column
}
for i := 0; i < maxNumOfLinesPerCol; i++ {
args := make([]interface{}, len(columns))
for ind, memberSlice := range columns {
if i >= len(memberSlice) {
args[ind] = ""
continue
}
args[ind] = memberSlice[i]
}
fmt.Fprintf(w, format, args...)
}
}
func alarmHealthColor(status string) string {
switch status {
case "OK":
return color.Green.Sprint(status)
case "ALARM":
return color.Red.Sprint(status)
case "INSUFFICIENT_DATA":
return color.Yellow.Sprint(status)
default:
return status
}
}
func statusColor(status string) string {
switch status {
case "ACTIVE":
return color.Green.Sprint(status)
case "DRAINING":
return color.Yellow.Sprint(status)
case "RUNNING":
return color.Green.Sprint(status)
case "UPDATING":
return color.Yellow.Sprint(status)
default:
return color.Red.Sprint(status)
}
}
| 1 | 17,548 | Why did we not leave this as just `svcDescriber`? | aws-copilot-cli | go |
@@ -11,12 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Provides the data access object (DAO) for Groups."""
+from Queue import Queue
+
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access.sql_queries import select_data
-from google.cloud.security.common.gcp_type import group_member
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__) | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the data access object (DAO) for Groups."""
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access.sql_queries import select_data
from google.cloud.security.common.gcp_type import group_member
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class GroupDao(dao.Dao):
"""Data access object (DAO) for Groups."""
def get_group_users(self, resource_name, timestamp):
"""Get the group members who are users.
Args:
timestamp: The timestamp of the snapshot.
Returns:
A dict containing the group (gcp_type.group) and a list of
their members (gcp_type.group_members) who are user type.
"""
sql = select_data.GROUP_USERS.format(timestamp)
rows = self.execute_sql_with_fetch(resource_name, sql, None)
group_users = {}
for row in rows:
# TODO: Determine if parenting is needed here, similar to
# project IAM policies.
group_user = group_member.GroupMember(
row.get('member_role'),
row.get('member_type'),
row.get('member_email'))
if row.get('group_id') not in group_users:
group_users[row.get('group_id')] = [group_user]
else:
group_users[row.get('group_id')].append(group_user)
return group_users
| 1 | 25,803 | nit: I think we've been (in)consistent in leaving a blank line here. | forseti-security-forseti-security | py |
@@ -47,13 +47,15 @@ def define_violation(dbengine):
__tablename__ = violations_tablename
id = Column(Integer, primary_key=True)
- model_handle = Column(String(256))
+ inventory_index_id = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
+ full_name = Column(String(1024))
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_type = Column(String(256), nullable=False)
data = Column(Text)
+ inventory_data = Column(Text)
def __repr__(self):
"""String representation. | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database access objects for Forseti Scanner. """
import json
from sqlalchemy import Column
from sqlalchemy import String, Integer, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from google.cloud.forseti.services import db
# pylint: disable=no-member
def define_violation(dbengine):
"""Defines table class for violations.
A violation table will be created on a per-model basis.
Args:
dbengine (engine): sqlalchemy database engine
Returns:
ViolationAcccess: facade for accessing violations.
"""
base = declarative_base()
violations_tablename = 'violations'
class Violation(base):
"""Row entry for a violation."""
__tablename__ = violations_tablename
id = Column(Integer, primary_key=True)
model_handle = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_type = Column(String(256), nullable=False)
data = Column(Text)
def __repr__(self):
"""String representation.
Returns:
str: string representation of the Violation row entry.
"""
string = ("<Violation(violation_type='{}', resource_type='{}' "
"rule_name='{}')>")
return string.format(
self.violation_type, self.resource_type, self.rule_name)
class ViolationAccess(object):
"""Facade for violations, implement APIs against violations table."""
TBL_VIOLATIONS = Violation
def __init__(self, dbengine):
"""Constructor for the Violation Access.
Args:
dbengine (engine): sqlalchemy database engine
"""
self.engine = dbengine
self.violationmaker = self._create_violation_session()
def _create_violation_session(self):
"""Create a session to read from the models table.
Returns:
ScopedSessionmaker: A scoped session maker that will create
a session that is automatically released.
"""
return db.ScopedSessionMaker(
sessionmaker(
bind=self.engine,
expire_on_commit=False),
auto_commit=True)
def create(self, violations, model_handle):
"""Save violations to the db table.
Args:
violations (list): A list of violations.
model_handle (str): Name of the model that the violations
originate from.
"""
with self.violationmaker() as session:
for violation in violations:
violation = self.TBL_VIOLATIONS(
model_handle=model_handle,
resource_id=violation.get('resource_id'),
resource_type=violation.get('resource_type'),
rule_name=violation.get('rule_name'),
rule_index=violation.get('rule_index'),
violation_type=violation.get('violation_type'),
data=json.dumps(violation.get('violation_data'))
)
session.add(violation)
def list(self):
"""List all violations from the db table.
Returns:
list: List of Violation row entry objects.
"""
with self.violationmaker() as session:
return session.query(self.TBL_VIOLATIONS).all()
base.metadata.create_all(dbengine)
return ViolationAccess
| 1 | 28,553 | Maybe we can change it to a more generic name like source_id. | forseti-security-forseti-security | py |
@@ -2019,7 +2019,7 @@ func testKeyManagerRekeyAddDeviceWithPromptViaFolderAccess(t *testing.T, ver Met
defer ops.mdWriterLock.Unlock(lState)
// Now cause a paper prompt unlock via a folder access
- errCh := make(chan error)
+ errCh := make(chan error, 1)
go func() {
_, err := GetRootNodeForTest(ctx, config2Dev2, name, false)
select { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
type shimKMCrypto struct {
Crypto
pure cryptoPure
}
func (c shimKMCrypto) EncryptTLFCryptKeys(oldKeys []kbfscrypto.TLFCryptKey,
key kbfscrypto.TLFCryptKey) (EncryptedTLFCryptKeys, error) {
return c.pure.EncryptTLFCryptKeys(oldKeys, key)
}
func (c shimKMCrypto) DecryptTLFCryptKeys(
encKeys EncryptedTLFCryptKeys, key kbfscrypto.TLFCryptKey) (
[]kbfscrypto.TLFCryptKey, error) {
return c.pure.DecryptTLFCryptKeys(encKeys, key)
}
func (c shimKMCrypto) MakeTLFWriterKeyBundleID(
wkb TLFWriterKeyBundleV3) (TLFWriterKeyBundleID, error) {
return c.pure.MakeTLFWriterKeyBundleID(wkb)
}
func (c shimKMCrypto) MakeTLFReaderKeyBundleID(
wkb TLFReaderKeyBundleV3) (TLFReaderKeyBundleID, error) {
return c.pure.MakeTLFReaderKeyBundleID(wkb)
}
func (c shimKMCrypto) MaskTLFCryptKey(
serverHalf kbfscrypto.TLFCryptKeyServerHalf,
key kbfscrypto.TLFCryptKey) (kbfscrypto.TLFCryptKeyClientHalf, error) {
return c.pure.MaskTLFCryptKey(serverHalf, key)
}
func (c shimKMCrypto) UnmaskTLFCryptKey(
serverHalf kbfscrypto.TLFCryptKeyServerHalf,
clientHalf kbfscrypto.TLFCryptKeyClientHalf) (
kbfscrypto.TLFCryptKey, error) {
return c.pure.UnmaskTLFCryptKey(serverHalf, clientHalf)
}
func keyManagerInit(t *testing.T, ver MetadataVer) (mockCtrl *gomock.Controller,
config *ConfigMock, ctx context.Context) {
ctr := NewSafeTestReporter(t)
mockCtrl = gomock.NewController(ctr)
config = NewConfigMock(mockCtrl, ctr)
keyCache := NewKeyCacheStandard(100)
config.SetKeyCache(keyCache)
keyman := NewKeyManagerStandard(config)
config.SetKeyManager(keyman)
interposeDaemonKBPKI(config, "alice", "bob", "charlie", "dave")
ctx = context.Background()
codec := kbfscodec.NewMsgpack()
config.SetCodec(codec)
cryptoPure := MakeCryptoCommon(codec)
config.SetCrypto(shimKMCrypto{config.Crypto(), cryptoPure})
config.SetMetadataVersion(ver)
return
}
func keyManagerShutdown(mockCtrl *gomock.Controller, config *ConfigMock) {
config.ctr.CheckForFailures()
mockCtrl.Finish()
}
var serverHalf = kbfscrypto.MakeTLFCryptKeyServerHalf([32]byte{0x2})
func expectUncachedGetTLFCryptKey(t *testing.T, config *ConfigMock, tlfID tlf.ID, keyGen, currKeyGen KeyGen,
uid keybase1.UID, subkey kbfscrypto.CryptPublicKey,
storesHistoric bool, tlfCryptKey, currTLFCryptKey kbfscrypto.TLFCryptKey) {
if keyGen == currKeyGen {
require.Equal(t, tlfCryptKey, currTLFCryptKey)
}
if storesHistoric {
if keyGen < currKeyGen {
config.mockKbpki.EXPECT().GetCurrentCryptPublicKey(
gomock.Any()).Return(subkey, nil)
}
clientHalf := kbfscrypto.MaskTLFCryptKey(
serverHalf, currTLFCryptKey)
// get the xor'd key out of the metadata
config.mockKbpki.EXPECT().GetCurrentCryptPublicKey(
gomock.Any()).Return(subkey, nil)
config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalf(
gomock.Any(), kbfscrypto.TLFEphemeralPublicKey{},
gomock.Any()).Return(clientHalf, nil)
// get the server-side half and retrieve the real
// current secret key
config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(),
gomock.Any(), gomock.Any()).Return(serverHalf, nil)
} else {
clientHalf := kbfscrypto.MaskTLFCryptKey(
serverHalf, tlfCryptKey)
// get the xor'd key out of the metadata
config.mockKbpki.EXPECT().GetCurrentCryptPublicKey(
gomock.Any()).Return(subkey, nil)
config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalf(
gomock.Any(), kbfscrypto.TLFEphemeralPublicKey{},
gomock.Any()).Return(clientHalf, nil)
// get the server-side half and retrieve the real secret key
config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(),
gomock.Any(), gomock.Any()).Return(serverHalf, nil)
}
}
func expectUncachedGetTLFCryptKeyAnyDevice(
config *ConfigMock, tlfID tlf.ID, keyGen KeyGen, uid keybase1.UID,
subkey kbfscrypto.CryptPublicKey, tlfCryptKey kbfscrypto.TLFCryptKey) {
clientHalf := kbfscrypto.MaskTLFCryptKey(serverHalf, tlfCryptKey)
// get the xor'd key out of the metadata
config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), uid).
Return([]kbfscrypto.CryptPublicKey{subkey}, nil)
config.mockCrypto.EXPECT().DecryptTLFCryptKeyClientHalfAny(gomock.Any(),
gomock.Any(), false).Return(clientHalf, 0, nil)
// get the server-side half and retrieve the real secret key
config.mockKops.EXPECT().GetTLFCryptKeyServerHalf(gomock.Any(),
gomock.Any(), gomock.Any()).Return(serverHalf, nil)
}
func expectRekey(config *ConfigMock, bh tlf.Handle, numDevices int,
handleChange, expectNewKeyGen bool,
tlfCryptKey kbfscrypto.TLFCryptKey) {
if handleChange {
// if the handle changes the key manager checks for a conflict
config.mockMdops.EXPECT().GetLatestHandleForTLF(gomock.Any(), gomock.Any()).
Return(bh, nil)
}
// generate new keys
config.mockCrypto.EXPECT().MakeRandomTLFEphemeralKeys().Return(
kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.TLFEphemeralPrivateKey{}, nil)
if expectNewKeyGen {
config.mockCrypto.EXPECT().MakeRandomTLFKeys().Return(
kbfscrypto.TLFPublicKey{}, kbfscrypto.TLFPrivateKey{},
tlfCryptKey, nil)
}
config.mockCrypto.EXPECT().MakeRandomTLFCryptKeyServerHalf().Return(
serverHalf, nil).Times(numDevices)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), gomock.Any()).
Return([]kbfscrypto.CryptPublicKey{subkey}, nil).Times(numDevices)
clientHalf := kbfscrypto.MaskTLFCryptKey(serverHalf, tlfCryptKey)
// make keys for the one device
config.mockCrypto.EXPECT().EncryptTLFCryptKeyClientHalf(
kbfscrypto.TLFEphemeralPrivateKey{}, subkey, clientHalf).Return(
EncryptedTLFCryptKeyClientHalf{}, nil).Times(numDevices)
config.mockKops.EXPECT().PutTLFCryptKeyServerHalves(gomock.Any(), gomock.Any()).Return(nil)
config.mockCrypto.EXPECT().GetTLFCryptKeyServerHalfID(gomock.Any(), gomock.Any(), gomock.Any()).Return(TLFCryptKeyServerHalfID{}, nil).Times(numDevices)
// Ignore Notify and Flush calls for now
config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
config.mockKbs.EXPECT().FlushUserFromLocalCache(gomock.Any(),
gomock.Any()).AnyTimes()
}
func TestKeyManagerPublicTLFCryptKey(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerPublicTLFCryptKey)
}
func testKeyManagerPublicTLFCryptKey(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, true)
kmd := emptyKeyMetadata{id, 1}
tlfCryptKey, err := config.KeyManager().
GetTLFCryptKeyForEncryption(ctx, kmd)
if err != nil {
t.Error(err)
}
if tlfCryptKey != kbfscrypto.PublicTLFCryptKey {
t.Errorf("got %v, expected %v",
tlfCryptKey, kbfscrypto.PublicTLFCryptKey)
}
tlfCryptKey, err = config.KeyManager().
GetTLFCryptKeyForMDDecryption(ctx, kmd, kmd)
if err != nil {
t.Error(err)
}
if tlfCryptKey != kbfscrypto.PublicTLFCryptKey {
t.Errorf("got %v, expected %v",
tlfCryptKey, kbfscrypto.PublicTLFCryptKey)
}
tlfCryptKey, err = config.KeyManager().
GetTLFCryptKeyForBlockDecryption(ctx, kmd, BlockPointer{})
if err != nil {
t.Error(err)
}
if tlfCryptKey != kbfscrypto.PublicTLFCryptKey {
t.Errorf("got %v, expected %v",
tlfCryptKey, kbfscrypto.PublicTLFCryptKey)
}
}
func TestKeyManagerCachedSecretKeyForEncryptionSuccess(t *testing.T) {
runTestOverMetadataVers(t,
testKeyManagerCachedSecretKeyForEncryptionSuccess)
}
func testKeyManagerCachedSecretKeyForEncryptionSuccess(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
kmd := emptyKeyMetadata{id, 1}
cachedTLFCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
config.KeyCache().PutTLFCryptKey(id, 1, cachedTLFCryptKey)
tlfCryptKey, err := config.KeyManager().
GetTLFCryptKeyForEncryption(ctx, kmd)
require.NoError(t, err)
require.Equal(t, cachedTLFCryptKey, tlfCryptKey)
}
func TestKeyManagerCachedSecretKeyForMDDecryptionSuccess(t *testing.T) {
runTestOverMetadataVers(t,
testKeyManagerCachedSecretKeyForMDDecryptionSuccess)
}
func testKeyManagerCachedSecretKeyForMDDecryptionSuccess(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
kmd := emptyKeyMetadata{id, 1}
cachedTLFCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
config.KeyCache().PutTLFCryptKey(id, 1, cachedTLFCryptKey)
tlfCryptKey, err := config.KeyManager().
GetTLFCryptKeyForMDDecryption(ctx, kmd, kmd)
require.NoError(t, err)
require.Equal(t, cachedTLFCryptKey, tlfCryptKey)
}
func TestKeyManagerCachedSecretKeyForBlockDecryptionSuccess(t *testing.T) {
runTestOverMetadataVers(t,
testKeyManagerCachedSecretKeyForBlockDecryptionSuccess)
}
func testKeyManagerCachedSecretKeyForBlockDecryptionSuccess(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
kmd := emptyKeyMetadata{id, 2}
cachedTLFCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
config.KeyCache().PutTLFCryptKey(id, 1, cachedTLFCryptKey)
tlfCryptKey, err := config.KeyManager().GetTLFCryptKeyForBlockDecryption(
ctx, kmd, BlockPointer{KeyGen: 1})
require.NoError(t, err)
require.Equal(t, cachedTLFCryptKey, tlfCryptKey)
}
// makeDirWKeyInfoMap creates a new user device key info map with a writer key.
func makeDirWKeyInfoMap(uid keybase1.UID,
cryptPublicKey kbfscrypto.CryptPublicKey) UserDeviceKeyInfoMap {
return UserDeviceKeyInfoMap{
uid: {
cryptPublicKey: TLFCryptKeyInfo{
EPubKeyIndex: 0,
},
},
}
}
func TestKeyManagerUncachedSecretKeyForEncryptionSuccess(t *testing.T) {
runTestOverMetadataVers(t,
testKeyManagerUncachedSecretKeyForEncryptionSuccess)
}
func testKeyManagerUncachedSecretKeyForEncryptionSuccess(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "alice", false)
uid := h.FirstResolvedWriter()
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
storedTLFCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
rmd.addKeyGenerationForTest(config.Codec(), config.Crypto(),
kbfscrypto.TLFCryptKey{}, storedTLFCryptKey,
makeDirWKeyInfoMap(uid, subkey), UserDeviceKeyInfoMap{})
storesHistoric := rmd.StoresHistoricTLFCryptKeys()
expectUncachedGetTLFCryptKey(t, config, rmd.TlfID(),
rmd.LatestKeyGeneration(), rmd.LatestKeyGeneration(), uid,
subkey, storesHistoric, storedTLFCryptKey, storedTLFCryptKey)
tlfCryptKey, err := config.KeyManager().
GetTLFCryptKeyForEncryption(ctx, rmd)
require.NoError(t, err)
require.Equal(t, storedTLFCryptKey, tlfCryptKey)
}
func TestKeyManagerUncachedSecretKeyForMDDecryptionSuccess(t *testing.T) {
runTestOverMetadataVers(t,
testKeyManagerUncachedSecretKeyForMDDecryptionSuccess)
}
func testKeyManagerUncachedSecretKeyForMDDecryptionSuccess(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "alice", false)
uid := h.FirstResolvedWriter()
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
storedTLFCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
rmd.addKeyGenerationForTest(config.Codec(), config.Crypto(),
kbfscrypto.TLFCryptKey{}, storedTLFCryptKey,
makeDirWKeyInfoMap(uid, subkey), UserDeviceKeyInfoMap{})
expectUncachedGetTLFCryptKeyAnyDevice(
config, rmd.TlfID(), rmd.LatestKeyGeneration(), uid, subkey,
storedTLFCryptKey)
tlfCryptKey, err := config.KeyManager().
GetTLFCryptKeyForMDDecryption(ctx, rmd, rmd)
require.NoError(t, err)
require.Equal(t, storedTLFCryptKey, tlfCryptKey)
}
func TestKeyManagerUncachedSecretKeyForBlockDecryptionSuccess(t *testing.T) {
runTestOverMetadataVers(t,
testKeyManagerUncachedSecretKeyForBlockDecryptionSuccess)
}
func testKeyManagerUncachedSecretKeyForBlockDecryptionSuccess(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "alice", false)
uid := h.FirstResolvedWriter()
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
storedTLFCryptKey1 := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
storedTLFCryptKey2 := kbfscrypto.MakeTLFCryptKey([32]byte{0x2})
rmd.addKeyGenerationForTest(config.Codec(), config.Crypto(),
kbfscrypto.TLFCryptKey{}, storedTLFCryptKey1,
makeDirWKeyInfoMap(uid, subkey), UserDeviceKeyInfoMap{})
rmd.addKeyGenerationForTest(config.Codec(), config.Crypto(),
storedTLFCryptKey1, storedTLFCryptKey2,
makeDirWKeyInfoMap(uid, subkey), UserDeviceKeyInfoMap{})
keyGen := rmd.LatestKeyGeneration() - 1
storesHistoric := rmd.StoresHistoricTLFCryptKeys()
expectUncachedGetTLFCryptKey(t, config, rmd.TlfID(),
keyGen, rmd.LatestKeyGeneration(), uid, subkey,
storesHistoric, storedTLFCryptKey1, storedTLFCryptKey2)
tlfCryptKey, err := config.KeyManager().GetTLFCryptKeyForBlockDecryption(
ctx, rmd, BlockPointer{KeyGen: 1})
require.NoError(t, err)
require.Equal(t, storedTLFCryptKey1, tlfCryptKey)
}
func TestKeyManagerRekeySuccessPrivate(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeySuccessPrivate)
}
func testKeyManagerRekeySuccessPrivate(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h := parseTlfHandleOrBust(t, config, "alice", false)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
oldKeyGen := rmd.LatestKeyGeneration()
tlfCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
expectRekey(config, h.ToBareHandleOrBust(), 1, false, true, tlfCryptKey)
if done, _, err := config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Errorf("Got error on rekey: %t, %v", done, err)
} else if rmd.LatestKeyGeneration() != oldKeyGen+1 {
t.Errorf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration())
}
}
func TestKeyManagerRekeyResolveAgainSuccessPublic(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyResolveAgainSuccessPublic)
}
func testKeyManagerRekeyResolveAgainSuccessPublic(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, true)
h, err := ParseTlfHandle(
ctx, config.KBPKI(), "alice,bob@twitter", true)
require.NoError(t, err)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
daemon := config.KeybaseService().(*KeybaseDaemonLocal)
daemon.addNewAssertionForTestOrBust("bob", "bob@twitter")
config.mockMdops.EXPECT().GetLatestHandleForTLF(gomock.Any(), gomock.Any()).
Return(rmd.tlfHandle.ToBareHandleOrBust(), nil)
done, cryptKey, err := config.KeyManager().Rekey(ctx, rmd, false)
require.True(t, done)
require.Nil(t, cryptKey)
require.NoError(t, err)
newH := rmd.GetTlfHandle()
require.Equal(t, CanonicalTlfName("alice,bob"), newH.GetCanonicalName())
// Also check MakeBareTlfHandle.
oldHandle := rmd.tlfHandle
rmd.tlfHandle = nil
newBareH, err := rmd.MakeBareTlfHandle()
require.NoError(t, err)
require.Equal(t, newH.ToBareHandleOrBust(), newBareH)
rmd.tlfHandle = oldHandle
// Rekey again, which shouldn't do anything.
done, cryptKey, err = config.KeyManager().Rekey(ctx, rmd, false)
require.False(t, done)
require.Nil(t, cryptKey)
require.NoError(t, err)
}
func TestKeyManagerRekeyResolveAgainSuccessPublicSelf(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyResolveAgainSuccessPublicSelf)
}
func testKeyManagerRekeyResolveAgainSuccessPublicSelf(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, true)
h, err := ParseTlfHandle(
ctx, config.KBPKI(), "alice@twitter,bob,charlie@twitter", true)
require.NoError(t, err)
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
daemon := config.KeybaseService().(*KeybaseDaemonLocal)
daemon.addNewAssertionForTestOrBust("alice", "alice@twitter")
daemon.addNewAssertionForTestOrBust("charlie", "charlie@twitter")
config.mockMdops.EXPECT().GetLatestHandleForTLF(gomock.Any(), gomock.Any()).
Return(rmd.tlfHandle.ToBareHandleOrBust(), nil)
done, cryptKey, err := config.KeyManager().Rekey(ctx, rmd, false)
require.True(t, done)
require.Nil(t, cryptKey)
require.NoError(t, err)
newH := rmd.GetTlfHandle()
require.Equal(t, CanonicalTlfName("alice,bob,charlie"), newH.GetCanonicalName())
// Also check MakeBareTlfHandle.
oldHandle := rmd.tlfHandle
rmd.tlfHandle = nil
newBareH, err := rmd.MakeBareTlfHandle()
require.NoError(t, err)
require.Equal(t, newH.ToBareHandleOrBust(), newBareH)
rmd.tlfHandle = oldHandle
}
func TestKeyManagerRekeyResolveAgainSuccessPrivate(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyResolveAgainSuccessPrivate)
}
func testKeyManagerRekeyResolveAgainSuccessPrivate(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h, err := ParseTlfHandle(
ctx, config.KBPKI(), "alice,bob@twitter,dave@twitter#charlie@twitter",
false)
if err != nil {
t.Fatal(err)
}
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
oldKeyGen := rmd.LatestKeyGeneration()
tlfCryptKey1 := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
expectRekey(config, h.ToBareHandleOrBust(), 3, true, true, tlfCryptKey1)
// Pretend that {bob,charlie}@twitter now resolve to {bob,charlie}.
daemon := config.KeybaseService().(*KeybaseDaemonLocal)
daemon.addNewAssertionForTestOrBust("bob", "bob@twitter")
daemon.addNewAssertionForTestOrBust("charlie", "charlie@twitter")
if done, _, err := config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen+1 {
t.Fatalf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration())
}
newH := rmd.GetTlfHandle()
require.Equal(t, CanonicalTlfName("alice,bob,dave@twitter#charlie"),
newH.GetCanonicalName())
// Also check MakeBareTlfHandle.
oldHandle := rmd.tlfHandle
rmd.tlfHandle = nil
newBareH, err := rmd.MakeBareTlfHandle()
require.NoError(t, err)
require.Equal(t, newH.ToBareHandleOrBust(), newBareH)
rmd.tlfHandle = oldHandle
// Now resolve using only a device addition, which won't bump the
// generation number.
daemon.addNewAssertionForTestOrBust("dave", "dave@twitter")
oldKeyGen = rmd.LatestKeyGeneration()
tlfCryptKey2 := kbfscrypto.MakeTLFCryptKey([32]byte{0x2})
config.KeyCache().PutTLFCryptKey(id, 1, tlfCryptKey2)
expectRekey(config, oldHandle.ToBareHandleOrBust(), 1, true, false, tlfCryptKey2)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), gomock.Any()).
Return([]kbfscrypto.CryptPublicKey{subkey}, nil).Times(3)
if done, _, err :=
config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen {
t.Fatalf("Bad key generation after rekey: %d",
rmd.LatestKeyGeneration())
}
newH = rmd.GetTlfHandle()
require.Equal(t, CanonicalTlfName("alice,bob,dave#charlie"),
newH.GetCanonicalName())
// Also check MakeBareTlfHandle.
rmd.tlfHandle = nil
newBareH, err = rmd.MakeBareTlfHandle()
require.NoError(t, err)
require.Equal(t, newH.ToBareHandleOrBust(), newBareH)
}
func TestKeyManagerPromoteReaderSuccessPrivate(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerPromoteReaderSuccessPrivate)
}
func testKeyManagerPromoteReaderSuccessPrivate(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h, err := ParseTlfHandle(ctx, config.KBPKI(),
"alice,bob@twitter#bob", false)
if err != nil {
t.Fatal(err)
}
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
oldKeyGen := rmd.LatestKeyGeneration()
tlfCryptKey := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
expectRekey(config, h.ToBareHandleOrBust(), 2, true, true, tlfCryptKey)
// Pretend that bob@twitter now resolves to bob.
daemon := config.KeybaseService().(*KeybaseDaemonLocal)
daemon.addNewAssertionForTestOrBust("bob", "bob@twitter")
// Make the first key generation
if done, _, err := config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen+1 {
t.Fatalf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration())
}
newH := rmd.GetTlfHandle()
require.Equal(t,
CanonicalTlfName("alice,bob"),
newH.GetCanonicalName())
}
func TestKeyManagerReaderRekeyResolveAgainSuccessPrivate(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerReaderRekeyResolveAgainSuccessPrivate)
}
func testKeyManagerReaderRekeyResolveAgainSuccessPrivate(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h, err := ParseTlfHandle(ctx, config.KBPKI(),
"alice,dave@twitter#bob@twitter,charlie@twitter", false)
if err != nil {
t.Fatal(err)
}
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
oldKeyGen := rmd.LatestKeyGeneration()
tlfCryptKey1 := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
expectRekey(config, h.ToBareHandleOrBust(), 1, true, true, tlfCryptKey1)
// Make the first key generation
if done, _, err := config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen+1 {
t.Fatalf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration())
}
newH := rmd.GetTlfHandle()
require.Equal(t,
CanonicalTlfName("alice,dave@twitter#bob@twitter,charlie@twitter"),
newH.GetCanonicalName())
// Now resolve everyone, but have reader bob to do the rekey
daemon := config.KeybaseService().(*KeybaseDaemonLocal)
daemon.addNewAssertionForTestOrBust("bob", "bob@twitter")
daemon.addNewAssertionForTestOrBust("charlie", "charlie@twitter")
daemon.addNewAssertionForTestOrBust("dave", "dave@twitter")
_, bobUID, err := daemon.Resolve(ctx, "bob")
daemon.setCurrentUID(bobUID)
// Now resolve using only a device addition, which won't bump the
// generation number.
oldKeyGen = rmd.LatestKeyGeneration()
// Pretend bob has the key in the cache (in reality it would be
// decrypted via bob's paper key)
tlfCryptKey2 := kbfscrypto.MakeTLFCryptKey([32]byte{0x2})
config.KeyCache().PutTLFCryptKey(rmd.TlfID(), oldKeyGen, tlfCryptKey2)
expectRekey(config, h.ToBareHandleOrBust(), 1, false, false, tlfCryptKey2)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), gomock.Any()).
Return([]kbfscrypto.CryptPublicKey{subkey}, nil)
if done, _, err :=
config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen {
t.Fatalf("Bad key generation after rekey: %d",
rmd.LatestKeyGeneration())
}
// bob shouldn't have been able to resolve other users since he's
// just a reader.
newH = rmd.GetTlfHandle()
require.Equal(t, CanonicalTlfName("alice,dave@twitter#bob,charlie@twitter"),
newH.GetCanonicalName())
// Also check MakeBareTlfHandle.
rmd.tlfHandle = nil
newBareH, err := rmd.MakeBareTlfHandle()
require.NoError(t, err)
require.Equal(t, newH.ToBareHandleOrBust(), newBareH)
}
func TestKeyManagerRekeyResolveAgainNoChangeSuccessPrivate(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyResolveAgainNoChangeSuccessPrivate)
}
func testKeyManagerRekeyResolveAgainNoChangeSuccessPrivate(t *testing.T, ver MetadataVer) {
mockCtrl, config, ctx := keyManagerInit(t, ver)
defer keyManagerShutdown(mockCtrl, config)
id := tlf.FakeID(1, false)
h, err := ParseTlfHandle(ctx, config.KBPKI(), "alice,bob,bob@twitter",
false)
if err != nil {
t.Fatal(err)
}
rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
require.NoError(t, err)
oldKeyGen := rmd.LatestKeyGeneration()
tlfCryptKey1 := kbfscrypto.MakeTLFCryptKey([32]byte{0x1})
expectRekey(config, h.ToBareHandleOrBust(), 2, true, true, tlfCryptKey1)
// Make the first key generation
if done, _, err := config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen+1 {
t.Fatalf("Bad key generation after rekey: %d", rmd.LatestKeyGeneration())
}
newH := rmd.GetTlfHandle()
require.Equal(t,
CanonicalTlfName("alice,bob,bob@twitter"),
newH.GetCanonicalName())
// Now resolve everyone, but have reader bob to do the rekey
daemon := config.KeybaseService().(*KeybaseDaemonLocal)
daemon.addNewAssertionForTestOrBust("bob", "bob@twitter")
// Now resolve which gets rid of the unresolved writers, but
// doesn't otherwise change the handle since bob is already in it.
oldKeyGen = rmd.LatestKeyGeneration()
config.mockCrypto.EXPECT().MakeRandomTLFEphemeralKeys().Return(
kbfscrypto.TLFEphemeralPublicKey{},
kbfscrypto.TLFEphemeralPrivateKey{}, nil)
subkey := kbfscrypto.MakeFakeCryptPublicKeyOrBust("crypt public key")
config.mockKbpki.EXPECT().GetCryptPublicKeys(gomock.Any(), gomock.Any()).
Return([]kbfscrypto.CryptPublicKey{subkey}, nil).Times(2)
if done, _, err :=
config.KeyManager().Rekey(ctx, rmd, false); !done || err != nil {
t.Fatalf("Got error on rekey: %t, %v", done, err)
}
if rmd.LatestKeyGeneration() != oldKeyGen {
t.Fatalf("Bad key generation after rekey: %d",
rmd.LatestKeyGeneration())
}
// bob shouldn't have been able to resolve other users since he's
// just a reader.
newH = rmd.GetTlfHandle()
require.Equal(t, CanonicalTlfName("alice,bob"), newH.GetCanonicalName())
// Also check MakeBareTlfHandle.
rmd.tlfHandle = nil
newBareH, err := rmd.MakeBareTlfHandle()
require.NoError(t, err)
require.Equal(t, newH.ToBareHandleOrBust(), newBareH)
}
func TestKeyManagerRekeyAddAndRevokeDevice(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyAddAndRevokeDevice)
}
func testKeyManagerRekeyAddAndRevokeDevice(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
clock := newTestClockNow()
config1.SetClock(clock)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// Create a shared folder
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
// user 1 creates a file
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
rootNode2 := GetRootNodeOrBust(ctx, t, config2, name, false)
kbfsOps2 := config2.KBFSOps()
// user 2 creates a file
_, _, err = kbfsOps2.CreateFile(ctx, rootNode2, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
config2Dev2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
// Now give u2 a new device. The configs don't share a Keybase
// Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// user 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// Set the KBPKI so we can count the identify calls
countKBPKI := &identifyCountingKBPKI{
KBPKI: config1.KBPKI(),
}
config1.SetKBPKI(countKBPKI)
// Force the FBO to forget about its previous identify, so that we
// can make sure the rekey doesn't trigger a full identify.
kbfsOps1.(*KBFSOpsStandard).getOpsNoAdd(
rootNode1.GetFolderBranch()).identifyDone = false
// now user 1 should rekey
err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// Only u2 should be identified as part of the rekey.
if g, e := countKBPKI.getIdentifyCalls(), 1; g != e {
t.Errorf("Expected %d identify calls, but got %d", e, g)
}
// u2 syncs after the rekey
if err := kbfsOps2.SyncFromServerForTesting(ctx,
rootNode2.GetFolderBranch()); err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// user 2 creates another file
_, _, err = kbfsOps2.CreateFile(ctx, rootNode2, "c", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// add a third device for user 2
config2Dev3 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev3)
defer config2Dev3.SetKeyCache(NewKeyCacheStandard(5000))
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
devIndex = AddDeviceForLocalUserOrBust(t, config2Dev3, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev3, devIndex)
// Now revoke the original user 2 device (the last writer)
clock.Add(1 * time.Minute)
RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0)
RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0)
RevokeDeviceForLocalUserOrBust(t, config2Dev3, uid2, 0)
// First request a rekey from the new device, which will only be
// able to set the rekey bit (copying the root MD).
err = config2Dev3.KBFSOps().Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// rekey again
err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// Only u2 should be identified again as part of the rekey.
if g, e := countKBPKI.getIdentifyCalls(), 2; g != e {
t.Errorf("Expected %d identify calls, but got %d", e, g)
}
// force re-encryption of the root dir
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "d", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// this device should be able to read now
root2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.SyncFromServerForTesting(ctx, root2Dev2.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// device 2 should still work
rootNode2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
children, err := kbfsOps2Dev2.GetDirChildren(ctx, rootNode2Dev2)
if _, ok := children["d"]; !ok {
t.Fatalf("Device 2 couldn't see the new dir entry")
}
// But device 1 should now fail to see any updates. TODO: when a
// device sees it has been revoked from the TLF, we should delete
// all its cached data and refuse to serve any more. (However, in
// production the device's session would likely be revoked,
// probably leading to NoCurrentSession errors anyway.)
err = kbfsOps2.SyncFromServerForTesting(ctx, rootNode2.GetFolderBranch())
if err == nil {
// This is not expected to succeed; the node will be unable to
// deserialize the private MD.
t.Fatalf("Unexpectedly could sync from server")
}
// Should still be seeing the old children, since the updates from
// the latest revision were never applied.
children, err = kbfsOps2.GetDirChildren(ctx, rootNode2)
if _, ok := children["d"]; ok {
t.Fatalf("Found c unexpectedly: %v", children)
}
// meanwhile, device 3 should be able to read both the new and the
// old files
rootNode2Dev3 := GetRootNodeOrBust(ctx, t, config2Dev3, name, false)
kbfsOps2Dev3 := config2Dev3.KBFSOps()
aNode, _, err := kbfsOps2Dev3.Lookup(ctx, rootNode2Dev3, "a")
if err != nil {
t.Fatalf("Device 3 couldn't lookup a: %v", err)
}
buf := []byte{0}
_, err = kbfsOps2Dev3.Read(ctx, aNode, buf, 0)
if err != nil {
t.Fatalf("Device 3 couldn't read a: %v", err)
}
bNode, _, err := kbfsOps2Dev3.Lookup(ctx, rootNode2Dev3, "b")
if err != nil {
t.Fatalf("Device 3 couldn't lookup b: %v", err)
}
_, err = kbfsOps2Dev3.Read(ctx, bNode, buf, 0)
if err != nil {
t.Fatalf("Device 3 couldn't read b: %v", err)
}
// Make sure the server-side keys for the revoked device are gone
// for all keygens.
rmd, err := config1.MDOps().GetForTLF(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get latest md: %v", err)
}
currKeyGen := rmd.LatestKeyGeneration()
// clear the key cache
config2.SetKeyCache(NewKeyCacheStandard(5000))
km2, ok := config2.KeyManager().(*KeyManagerStandard)
if !ok {
t.Fatal("Wrong kind of key manager for config2")
}
for keyGen := FirstValidKeyGen; keyGen <= currKeyGen; keyGen++ {
_, err = km2.getTLFCryptKeyUsingCurrentDevice(ctx, rmd.ReadOnly(), keyGen, true)
if err == nil {
t.Errorf("User 2 could still fetch a key for keygen %d", keyGen)
}
}
}
func TestKeyManagerRekeyAddWriterAndReaderDevice(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyAddWriterAndReaderDevice)
}
func testKeyManagerRekeyAddWriterAndReaderDevice(t *testing.T, ver MetadataVer) {
var u1, u2, u3 libkb.NormalizedUsername = "u1", "u2", "u3"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2, u3)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
config1.SetMetadataVersion(ver)
// Revoke user 3's device for now, to test the "other" rekey error.
_, uid3, err := config1.KBPKI().Resolve(ctx, u3.String())
if err != nil {
t.Fatalf("Couldn't resolve u3: %v", err)
}
RevokeDeviceForLocalUserOrBust(t, config1, uid3, 0)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// Create a shared folder
name := u1.String() + "," + u2.String() + ReaderSep + u3.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
// user 1 creates a file
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
config2Dev2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
config3 := ConfigAsUser(config1, u3)
defer CheckConfigAndShutdown(t, config3)
// Now give u2 and u3 new devices. The configs don't share a
// Keybase Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
AddDeviceForLocalUserOrBust(t, config1, uid3)
AddDeviceForLocalUserOrBust(t, config2, uid3)
devIndex = AddDeviceForLocalUserOrBust(t, config3, uid3)
t.Logf("Switching to device %d", devIndex)
SwitchDeviceForLocalUserOrBust(t, config3, devIndex)
// Users 2 and 3 should be unable to read the data now since its
// device wasn't registered when the folder was originally
// created.
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
_, err = GetRootNodeForTest(ctx, config3, name, false)
if _, ok := err.(NeedOtherRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// Set the KBPKI so we can count the identify calls
countKBPKI := &identifyCountingKBPKI{
KBPKI: config1.KBPKI(),
}
config1.SetKBPKI(countKBPKI)
// Force the FBO to forget about its previous identify, so that we
// can make sure the rekey doesn't trigger a full identify.
kbfsOps1.(*KBFSOpsStandard).getOpsNoAdd(
rootNode1.GetFolderBranch()).identifyDone = false
// now user 1 should rekey
err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// u2 and u3 should be identified as part of the rekey.
if g, e := countKBPKI.getIdentifyCalls(), 2; g != e {
t.Errorf("Expected %d identify calls, but got %d", e, g)
}
// The new devices should be able to read now.
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if err != nil {
t.Fatalf("Got unexpected error after rekey: %v", err)
}
_ = GetRootNodeOrBust(ctx, t, config3, name, false)
}
func TestKeyManagerSelfRekeyAcrossDevices(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerSelfRekeyAcrossDevices)
}
func testKeyManagerSelfRekeyAcrossDevices(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
t.Log("Create a shared folder")
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
t.Log("User 1 creates a file")
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
t.Log("User 2 adds a device")
// The configs don't share a Keybase Daemon so we have to do it in all
// places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2, uid2)
config2Dev2 := ConfigAsUser(config2, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
t.Log("Check that user 2 device 2 is unable to read the file")
// user 2 device 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
t.Log("User 2 rekeys from device 1")
root2dev1 := GetRootNodeOrBust(ctx, t, config2, name, false)
kbfsOps2 := config2.KBFSOps()
err = kbfsOps2.Rekey(ctx, root2dev1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
t.Log("User 2 device 2 should be able to read now")
root2dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
t.Log("User 2 device 2 reads user 1's file")
kbfsOps2Dev2 := config2Dev2.KBFSOps()
children2, err := kbfsOps2Dev2.GetDirChildren(ctx, root2dev2)
if _, ok := children2["a"]; !ok {
t.Fatalf("Device 2 couldn't see user 1's dir entry")
}
t.Log("User 2 device 2 creates a file")
_, _, err = kbfsOps2Dev2.CreateFile(ctx, root2dev2, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
t.Log("User 1 syncs from the server")
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
t.Log("User 1 should be able to read the file that user 2 device 2 created")
children1, err := kbfsOps1.GetDirChildren(ctx, rootNode1)
if _, ok := children1["b"]; !ok {
t.Fatalf("Device 1 couldn't see the new dir entry")
}
}
func TestKeyManagerReaderRekey(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerReaderRekey)
}
func testKeyManagerReaderRekey(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
_, uid1, err := config1.KBPKI().GetCurrentUserInfo(context.Background())
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
t.Log("Create a shared folder")
name := u1.String() + ReaderSep + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
t.Log("User 1 creates a file")
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
t.Log("User 1 adds a device")
// The configs don't share a Keybase Daemon so we have to do it in all
// places.
AddDeviceForLocalUserOrBust(t, config1, uid1)
devIndex := AddDeviceForLocalUserOrBust(t, config2, uid1)
config1Dev2 := ConfigAsUser(config2, u1)
defer CheckConfigAndShutdown(t, config1Dev2)
SwitchDeviceForLocalUserOrBust(t, config1Dev2, devIndex)
t.Log("User 2 adds a device")
// The configs don't share a Keybase Daemon so we have to do it in all
// places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config1Dev2, uid2)
devIndex = AddDeviceForLocalUserOrBust(t, config2, uid2)
config2Dev2 := ConfigAsUser(config2, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
t.Log("Check that user 2 device 2 is unable to read the file")
// user 2 device 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
kbfsOps2Dev2 := config2Dev2.KBFSOps()
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
t.Log("User 2 rekeys from device 1")
root2dev1 := GetRootNodeOrBust(ctx, t, config2, name, false)
kbfsOps2 := config2.KBFSOps()
err = kbfsOps2.Rekey(ctx, root2dev1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Expected reader rekey to partially complete. Actual error: %#v", err)
}
t.Log("User 2 device 2 should be able to read now")
root2dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
t.Log("User 1 device 2 should still be unable to read")
_, err = GetRootNodeForTest(ctx, config1Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
t.Log("User 2 device 2 reads user 1's file")
children2, err := kbfsOps2Dev2.GetDirChildren(ctx, root2dev2)
if _, ok := children2["a"]; !ok {
t.Fatalf("Device 2 couldn't see user 1's dir entry")
}
}
func TestKeyManagerReaderRekeyAndRevoke(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerReaderRekeyAndRevoke)
}
func testKeyManagerReaderRekeyAndRevoke(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
clock := newTestClockNow()
config1.SetClock(clock)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// The reader has a second device at the start.
AddDeviceForLocalUserOrBust(t, config1, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2, uid2)
config2Dev2 := ConfigAsUser(config2, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
t.Log("Create a shared folder")
name := u1.String() + ReaderSep + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
t.Log("User 1 creates a file")
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
t.Log("User 2 adds a device")
// The configs don't share a Keybase Daemon so we have to do it in all
// places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex = AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
config2Dev3 := ConfigAsUser(config2, u2)
defer CheckConfigAndShutdown(t, config2Dev3)
SwitchDeviceForLocalUserOrBust(t, config2Dev3, devIndex)
// Revoke the original user 2 device
clock.Add(1 * time.Minute)
RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0)
RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0)
RevokeDeviceForLocalUserOrBust(t, config2Dev3, uid2, 0)
t.Log("Check that user 2 device 3 is unable to read the file")
// user 2 device 3 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
_, err = GetRootNodeForTest(ctx, config2Dev3, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
t.Log("User 2 rekeys from device 2")
root2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.Rekey(ctx, root2Dev2.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Expected reader rekey to partially complete. "+
"Actual error: %#v", err)
}
t.Log("User 2 device 3 should be able to read now")
GetRootNodeOrBust(ctx, t, config2Dev3, name, false)
// A second rekey by the same reader shouldn't change the
// revision, since the rekey bit is already set, even though a
// rekey is still needed (due to the revoke, which has to be
// rekeyed by a writer).
ops := getOps(config2Dev2, root2Dev2.GetFolderBranch().Tlf)
rev1 := ops.head.Revision()
err = kbfsOps2Dev2.Rekey(ctx, root2Dev2.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Expected reader rekey to partially complete. "+
"Actual error: %#v", err)
}
rev2 := ops.head.Revision()
if rev1 != rev2 {
t.Fatalf("Reader rekey made two incomplete rekeys in a row.")
}
}
// This tests 2 variations of the situation where clients w/o the folder key set the rekey bit.
// In one case the client is a writer and in the other a reader. They both blindly copy the existing
// metadata and simply set the rekey bit. Then another participant rekeys the folder and they try to read.
func TestKeyManagerRekeyBit(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyBit)
}
func testKeyManagerRekeyBit(t *testing.T, ver MetadataVer) {
var u1, u2, u3 libkb.NormalizedUsername = "u1", "u2", "u3"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2, u3)
doShutdown1 := true
defer func() {
if doShutdown1 {
kbfsConcurTestShutdown(t, config1, ctx, cancel)
}
}()
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
config2.MDServer().DisableRekeyUpdatesForTesting()
config3 := ConfigAsUser(config1, u3)
defer CheckConfigAndShutdown(t, config3)
_, uid3, err := config3.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
config3.MDServer().DisableRekeyUpdatesForTesting()
// 2 writers 1 reader
name := u1.String() + "," + u2.String() + "#" + u3.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
// user 1 creates a file
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
config2Dev2 := ConfigAsUser(config1, u2)
// we don't check the config because this device can't read all of the md blocks.
defer config2Dev2.Shutdown()
config2Dev2.MDServer().DisableRekeyUpdatesForTesting()
// Now give u2 a new device. The configs don't share a Keybase
// Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
AddDeviceForLocalUserOrBust(t, config3, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// user 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// now user 2 should set the rekey bit
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// user 1 syncs from server
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// user 1 should try to rekey
err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// user 2 syncs from server
err = kbfsOps2Dev2.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// this device should be able to read now
rootNode2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
// look for the file
aNode, _, err := kbfsOps2Dev2.Lookup(ctx, rootNode2Dev2, "a")
if err != nil {
t.Fatalf("Device 2 couldn't lookup a: %v", err)
}
// read it
buf := []byte{0}
_, err = kbfsOps2Dev2.Read(ctx, aNode, buf, 0)
if err != nil {
t.Fatalf("Device 2 couldn't read a: %v", err)
}
config3Dev2 := ConfigAsUser(config1, u3)
// we don't check the config because this device can't read all of the md blocks.
defer config3Dev2.Shutdown()
config3Dev2.MDServer().DisableRekeyUpdatesForTesting()
// Now give u3 a new device.
AddDeviceForLocalUserOrBust(t, config1, uid3)
AddDeviceForLocalUserOrBust(t, config2, uid3)
AddDeviceForLocalUserOrBust(t, config2Dev2, uid3)
AddDeviceForLocalUserOrBust(t, config3, uid3)
devIndex = AddDeviceForLocalUserOrBust(t, config3Dev2, uid3)
SwitchDeviceForLocalUserOrBust(t, config3Dev2, devIndex)
// user 3 dev 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
_, err = GetRootNodeForTest(ctx, config3Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// now user 3 dev 2 should set the rekey bit
kbfsOps3Dev2 := config3Dev2.KBFSOps()
err = kbfsOps3Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// user 2 dev 2 syncs from server
err = kbfsOps2Dev2.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// user 2 dev 2 should try to rekey
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// user 3 dev 2 syncs from server
err = kbfsOps3Dev2.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// this device should be able to read now
rootNode3Dev2 := GetRootNodeOrBust(ctx, t, config3Dev2, name, false)
// look for the file
a2Node, _, err := kbfsOps3Dev2.Lookup(ctx, rootNode3Dev2, "a")
if err != nil {
t.Fatalf("Device 3 couldn't lookup a: %v", err)
}
// read it
buf = []byte{0}
_, err = kbfsOps3Dev2.Read(ctx, a2Node, buf, 0)
if err != nil {
t.Fatalf("Device 3 couldn't read a: %v", err)
}
// Explicitly run the checks with config1 before the deferred shutdowns begin.
// This way the shared mdserver hasn't been shutdown.
kbfsConcurTestShutdown(t, config1, ctx, cancel)
doShutdown1 = false
}
// Two devices conflict when revoking a 3rd device.
// Test that after this both can still read the latest version of the folder.
func TestKeyManagerRekeyAddAndRevokeDeviceWithConflict(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyAddAndRevokeDeviceWithConflict)
}
func testKeyManagerRekeyAddAndRevokeDeviceWithConflict(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
clock := newTestClockNow()
config1.SetClock(clock)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// create a shared folder
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
// user 1 creates a file
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
config2Dev2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
// give user 2 a new device
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// user 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
kbfsOps2Dev2 := config2Dev2.KBFSOps()
_, err = GetRootNodeForTest(ctx, config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// now user 1 should rekey
err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// this device should be able to read now
root2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
// Now revoke the original user 2 device
clock.Add(1 * time.Minute)
RevokeDeviceForLocalUserOrBust(t, config1, uid2, 0)
RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid2, 0)
// Stall user 1's rekey, to ensure a conflict.
onPutStalledCh, putUnstallCh, putCtx :=
StallMDOp(ctx, config1, StallableMDPut, 1)
// Have user 1 also try to rekey but fail due to conflict
errChan := make(chan error, 1)
go func() {
errChan <- kbfsOps1.Rekey(putCtx, rootNode1.GetFolderBranch().Tlf)
}()
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
case <-onPutStalledCh:
}
// rekey again but with user 2 device 2
err = kbfsOps2Dev2.Rekey(ctx, root2Dev2.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// Make sure user 1's rekey failed.
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
case putUnstallCh <- struct{}{}:
}
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
case err = <-errChan:
}
if _, isConflict := err.(RekeyConflictError); !isConflict {
t.Fatalf("Expected failure due to conflict")
}
err = kbfsOps2Dev2.SyncFromServerForTesting(ctx, root2Dev2.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// force re-encryption of the root dir
_, _, err = kbfsOps2Dev2.CreateFile(ctx, root2Dev2, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// device 1 should still work
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
rootNode1 = GetRootNodeOrBust(ctx, t, config1, name, false)
children, err := kbfsOps1.GetDirChildren(ctx, rootNode1)
if _, ok := children["b"]; !ok {
t.Fatalf("Device 1 couldn't see the new dir entry")
}
}
// cryptoLocalTrapAny traps every DecryptTLFCryptKeyClientHalfAny
// call, and sends on the given channel whether each call had
// promptPaper set or not.
type cryptoLocalTrapAny struct {
Crypto
promptCh chan<- bool
cryptoToUse Crypto
}
func (clta *cryptoLocalTrapAny) DecryptTLFCryptKeyClientHalfAny(
ctx context.Context,
keys []EncryptedTLFCryptKeyClientAndEphemeral, promptPaper bool) (
kbfscrypto.TLFCryptKeyClientHalf, int, error) {
select {
case clta.promptCh <- promptPaper:
case <-ctx.Done():
return kbfscrypto.TLFCryptKeyClientHalf{}, 0, ctx.Err()
}
// Decrypt the key half with the given config object
return clta.cryptoToUse.DecryptTLFCryptKeyClientHalfAny(
ctx, keys, promptPaper)
}
func TestKeyManagerRekeyAddDeviceWithPrompt(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyAddDeviceWithPrompt)
}
func testKeyManagerRekeyAddDeviceWithPrompt(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// Create a shared folder
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
// user 1 creates a file
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
config2Dev2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
// Now give u2 a new device. The configs don't share a Keybase
// Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// The new device should be unable to rekey on its own, and will
// just set the rekey bit.
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("First rekey failed %v", err)
}
ops := getOps(config2Dev2, rootNode1.GetFolderBranch().Tlf)
rev1 := ops.head.Revision()
// Do it again, to simulate the mdserver sending back this node's
// own rekey request. This shouldn't increase the MD version.
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Second rekey failed %v", err)
}
rev2 := ops.head.Revision()
if rev1 != rev2 {
t.Errorf("Revision changed after second rekey: %v vs %v", rev1, rev2)
}
// Make sure just the rekey bit is set
if !ops.head.IsRekeySet() {
t.Fatalf("Couldn't set rekey bit")
}
c := make(chan bool)
// Use our other device as a standin for the paper key.
clta := &cryptoLocalTrapAny{config2Dev2.Crypto(), c, config2.Crypto()}
config2Dev2.SetCrypto(clta)
ops.rekeyWithPromptTimer.Reset(1 * time.Millisecond)
var promptPaper bool
select {
case promptPaper = <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if !promptPaper {
t.Fatalf("Didn't prompt paper")
}
// called a second time for decrypting the private data
select {
case <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
// Take the mdWriterLock to ensure that the rekeyWithPrompt finishes.
lState := makeFBOLockState()
ops.mdWriterLock.Lock(lState)
ops.mdWriterLock.Unlock(lState)
config2Dev2.SetCrypto(clta.Crypto)
rootNode2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
kbfsOps2 := config2Dev2.KBFSOps()
children, err := kbfsOps2.GetDirChildren(ctx, rootNode2Dev2)
if _, ok := children["a"]; !ok {
t.Fatalf("Device 2 couldn't see the dir entry after rekey")
}
// user 2 creates another file to make a new revision
_, _, err = kbfsOps2.CreateFile(ctx, rootNode2Dev2, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
// device 1 should be able to read the new file
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
children, err = kbfsOps1.GetDirChildren(ctx, rootNode1)
if _, ok := children["b"]; !ok {
t.Fatalf("Device 2 couldn't see the dir entry after rekey")
}
}
func TestKeyManagerRekeyAddDeviceWithPromptAfterRestart(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyAddDeviceWithPromptAfterRestart)
}
func testKeyManagerRekeyAddDeviceWithPromptAfterRestart(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, uid1, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
clock := newTestClockNow()
config1.SetClock(clock)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// Create a shared folder
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
kbfsOps1 := config1.KBFSOps()
// user 1 creates a file
_, _, err = kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
config2Dev2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
// Now give u2 a new device. The configs don't share a Keybase
// Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// Revoke some previous device
clock.Add(1 * time.Minute)
RevokeDeviceForLocalUserOrBust(t, config2Dev2, uid1, 0)
// The new device should be unable to rekey on its own, and will
// just set the rekey bit.
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("First rekey failed %v", err)
}
ops := getOps(config2Dev2, rootNode1.GetFolderBranch().Tlf)
rev1 := ops.head.Revision()
// Do it again, to simulate the mdserver sending back this node's
// own rekey request. This shouldn't increase the MD version.
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Second rekey failed %v", err)
}
rev2 := ops.head.Revision()
if rev1 != rev2 {
t.Errorf("Revision changed after second rekey: %v vs %v", rev1, rev2)
}
// Make sure just the rekey bit is set
if !ops.head.IsRekeySet() {
t.Fatalf("Couldn't set rekey bit")
}
// Simulate a restart by clearing the timer after the rekey bit was set
ops.rekeyWithPromptTimer.Stop()
ops.rekeyWithPromptTimer = nil
// Try again, which should reset the timer (and so the Reser below
// will be on a non-nil timer).
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Third rekey failed %v", err)
}
c := make(chan bool)
// Use our other device as a standin for the paper key.
clta := &cryptoLocalTrapAny{config2Dev2.Crypto(), c, config2.Crypto()}
config2Dev2.SetCrypto(clta)
ops.rekeyWithPromptTimer.Reset(1 * time.Millisecond)
var promptPaper bool
select {
case promptPaper = <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if !promptPaper {
t.Fatalf("Didn't prompt paper")
}
// called a second time for decrypting the private data
select {
case <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
// Take the mdWriterLock to ensure that the rekeyWithPrompt finishes.
lState := makeFBOLockState()
ops.mdWriterLock.Lock(lState)
ops.mdWriterLock.Unlock(lState)
config2Dev2.SetCrypto(clta.Crypto)
rootNode2Dev2 := GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
kbfsOps2 := config2Dev2.KBFSOps()
children, err := kbfsOps2.GetDirChildren(ctx, rootNode2Dev2)
if _, ok := children["a"]; !ok {
t.Fatalf("Device 2 couldn't see the dir entry after rekey")
}
// user 2 creates another file to make a new revision
_, _, err = kbfsOps2.CreateFile(ctx, rootNode2Dev2, "b", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
}
func TestKeyManagerRekeyAddDeviceWithPromptViaFolderAccess(t *testing.T) {
runTestOverMetadataVers(t, testKeyManagerRekeyAddDeviceWithPromptViaFolderAccess)
}
func testKeyManagerRekeyAddDeviceWithPromptViaFolderAccess(t *testing.T, ver MetadataVer) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
defer kbfsConcurTestShutdown(t, config1, ctx, cancel)
config1.SetMetadataVersion(ver)
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// Create a shared folder
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, false)
config2Dev2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2Dev2)
// Now give u2 a new device. The configs don't share a Keybase
// Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// The new device should be unable to rekey on its own, and will
// just set the rekey bit.
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("First rekey failed %v", err)
}
ops := getOps(config2Dev2, rootNode1.GetFolderBranch().Tlf)
// Make sure just the rekey bit is set
if !ops.head.IsRekeySet() {
t.Fatalf("Couldn't set rekey bit")
}
// Allow the prompt rekey attempt to fail by using dev2's crypto
// (which still isn't keyed for)
c := make(chan bool)
// Use our other device as a standin for the paper key.
clta := &cryptoLocalTrapAny{config2Dev2.Crypto(), c, config2Dev2.Crypto()}
config2Dev2.SetCrypto(clta)
ops.rekeyWithPromptTimer.Reset(1 * time.Millisecond)
var promptPaper bool
select {
case promptPaper = <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if !promptPaper {
t.Fatalf("Didn't prompt paper")
}
// Make sure the rekey attempt is finished by taking the lock.
// Keep the lock for a while, to control when the second rekey starts.
lState := makeFBOLockState()
func() {
ops.mdWriterLock.Lock(lState)
defer ops.mdWriterLock.Unlock(lState)
// Now cause a paper prompt unlock via a folder access
errCh := make(chan error)
go func() {
_, err := GetRootNodeForTest(ctx, config2Dev2, name, false)
select {
case errCh <- err:
case <-ctx.Done():
errCh <- ctx.Err()
}
}()
// One failed decryption attempt
select {
case <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
select {
case err = <-errCh:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// Let the background rekeyer decrypt.
clta.cryptoToUse = config2.Crypto()
}()
select {
case promptPaper = <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
if !promptPaper {
t.Fatalf("Didn't prompt paper")
}
// called a second time for decrypting the private data
select {
case <-c:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
// Make sure the rekey attempt is finished
ops.mdWriterLock.Lock(lState)
ops.mdWriterLock.Unlock(lState)
GetRootNodeOrBust(ctx, t, config2Dev2, name, false)
}
| 1 | 14,918 | What's the theory on why this could fix the hang? It seems like the call to `GetRootNodeForTest` will always need to call into the crypto object before returning an error, and so it should block on that `c` receive, after which the test immediately drains `errCh`. So I don't quite see how buffering would help... | keybase-kbfs | go |
@@ -2003,8 +2003,16 @@ decode_operand(decode_info_t *di, byte optype, opnd_size_t opsize, opnd_t *opnd)
return true;
}
case TYPE_H: {
- /* part of AVX: vex.vvvv selects xmm/ymm register */
+ /* As part of AVX and AVX-512, vex.vvvv selects xmm/ymm/zmm register. Note that
+ * vex.vvvv and evex.vvvv are a union.
+ */
reg_id_t reg = (~di->vex_vvvv) & 0xf; /* bit-inverted */
+ if (TEST(PREFIX_EVEX_VV, di->prefixes)) {
+ /* This assumes that the register ranges of DR_REG_XMM, DR_REG_YMM, and
+ * DR_REG_ZMM are contiguous.
+ */
+ reg += 16;
+ }
*opnd = opnd_create_reg(((TEST(PREFIX_VEX_L, di->prefixes) &&
/* see .LIG notes above */
expand_subreg_size(opsize) != OPSZ_16) | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* decode.c -- a full x86 decoder */
#include "../globals.h"
#include "arch.h"
#include "instr.h"
#include "decode.h"
#include "decode_fast.h"
#include "decode_private.h"
/*
* XXX i#431: consider cpuid features when deciding invalid instrs:
* for core DR, it doesn't really matter: the only bad thing is thinking
* a valid instr is invalid, esp decoding its size improperly.
* but for completeness and use as disassembly library might be nice.
*
* XXX (these are very old):
* 1) several opcodes gdb thinks bad/not bad -- changes to ISA?
* 2) why does gdb use Ex, Ed, & Ev for all float opcodes w/ modrm < 0xbf?
* a float instruction's modrm cannot specify a register, right?
* sizes are single-real => d, double-real => q, extended-real = 90 bits,
* 14/28 bytes, and 98/108 bytes!
* should address sizes all be 'v'?
* 3) there don't seem to be any immediate float values...?!?
* 4) fld (0xd9 0xc0-7) in Table A-10 has 2 operands in different order
* than expected, plus asm prints using just 2nd one
* 5) I don't see T...is there a T? gdb has it for 0f26mov
*/
/* N.B.: must justify each assert, since we do not want to assert on a bad
* instruction -- we want to fail gracefully and have the caller deal with it
*/
#ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* used for VEX decoding */
#define xx TYPE_NONE, OPSZ_NA
static const instr_info_t escape_instr = { ESCAPE, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
static const instr_info_t escape_38_instr = {
ESCAPE_3BYTE_38, 0x000000, "(bad)", xx, xx, xx, xx, xx, 0, 0, 0
};
static const instr_info_t escape_3a_instr = {
ESCAPE_3BYTE_3a, 0x000000, "(bad)", xx, xx, xx, xx, xx, 0, 0, 0
};
/* used for XOP decoding */
static const instr_info_t xop_8_instr = { XOP_8_EXT, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
static const instr_info_t xop_9_instr = { XOP_9_EXT, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
static const instr_info_t xop_a_instr = { XOP_A_EXT, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
#undef xx
bool
is_isa_mode_legal(dr_isa_mode_t mode)
{
#ifdef X64
return (mode == DR_ISA_IA32 || mode == DR_ISA_AMD64);
#else
return (mode == DR_ISA_IA32);
#endif
}
app_pc
canonicalize_pc_target(dcontext_t *dcontext, app_pc pc)
{
return pc;
}
#ifdef X64
bool
set_x86_mode(dcontext_t *dcontext, bool x86)
{
dr_isa_mode_t old_mode;
if (!dr_set_isa_mode(dcontext, x86 ? DR_ISA_IA32 : DR_ISA_AMD64, &old_mode))
return false;
return old_mode == DR_ISA_IA32;
}
bool
get_x86_mode(dcontext_t *dcontext)
{
return dr_get_isa_mode(dcontext) == DR_ISA_IA32;
}
#endif
/****************************************************************************
* All code below based on tables in the ``Intel Architecture Software
* Developer's Manual,'' Volume 2: Instruction Set Reference, 2001.
*/
#if defined(DEBUG) && !defined(STANDALONE_DECODER) /* currently only used in ASSERTs */
static bool
is_variable_size(opnd_size_t sz)
{
switch (sz) {
case OPSZ_2_short1:
case OPSZ_4_short2:
case OPSZ_4x8:
case OPSZ_4x8_short2:
case OPSZ_4x8_short2xi8:
case OPSZ_4_short2xi4:
case OPSZ_4_rex8_short2:
case OPSZ_4_rex8:
case OPSZ_6_irex10_short4:
case OPSZ_6x10:
case OPSZ_8_short2:
case OPSZ_8_short4:
case OPSZ_28_short14:
case OPSZ_108_short94:
case OPSZ_1_reg4:
case OPSZ_2_reg4:
case OPSZ_4_reg16:
case OPSZ_32_short16:
case OPSZ_8_rex16:
case OPSZ_8_rex16_short4:
case OPSZ_12_rex40_short6:
case OPSZ_16_vex32:
case OPSZ_16_vex32_evex64: return true;
default: return false;
}
}
#endif
opnd_size_t
resolve_var_reg_size(opnd_size_t sz, bool is_reg)
{
switch (sz) {
case OPSZ_1_reg4: return (is_reg ? OPSZ_4 : OPSZ_1);
case OPSZ_2_reg4: return (is_reg ? OPSZ_4 : OPSZ_2);
case OPSZ_4_reg16:
return (is_reg ? OPSZ_4 /* i#1382: we distinguish sub-xmm now */
: OPSZ_4);
}
return sz;
}
/* Like all our code, we assume cs specifies default data and address sizes.
* This routine assumes the size varies by data, NOT by address!
*/
opnd_size_t
resolve_variable_size(decode_info_t *di /*IN: x86_mode, prefixes*/, opnd_size_t sz,
bool is_reg)
{
switch (sz) {
case OPSZ_2_short1: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_1 : OPSZ_2);
case OPSZ_4_short2: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4);
case OPSZ_4x8: return (X64_MODE(di) ? OPSZ_8 : OPSZ_4);
case OPSZ_4x8_short2:
return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2
: (X64_MODE(di) ? OPSZ_8 : OPSZ_4));
case OPSZ_4x8_short2xi8:
return (X64_MODE(di) ? (proc_get_vendor() == VENDOR_INTEL
? OPSZ_8
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_8))
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4));
case OPSZ_4_short2xi4:
return ((X64_MODE(di) && proc_get_vendor() == VENDOR_INTEL)
? OPSZ_4
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4));
case OPSZ_4_rex8_short2: /* rex.w trumps data prefix */
return (TEST(PREFIX_REX_W, di->prefixes)
? OPSZ_8
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4));
case OPSZ_4_rex8: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_8 : OPSZ_4);
case OPSZ_6_irex10_short4: /* rex.w trumps data prefix, but is ignored on AMD */
DODEBUG({
/* less annoying than a CURIOSITY assert when testing */
if (TEST(PREFIX_REX_W, di->prefixes))
SYSLOG_INTERNAL_INFO_ONCE("curiosity: rex.w on OPSZ_6_irex10_short4!");
});
return ((TEST(PREFIX_REX_W, di->prefixes) && proc_get_vendor() != VENDOR_AMD)
? OPSZ_10
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_4 : OPSZ_6));
case OPSZ_6x10: return (X64_MODE(di) ? OPSZ_10 : OPSZ_6);
case OPSZ_8_short2: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_8);
case OPSZ_8_short4: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_4 : OPSZ_8);
case OPSZ_8_rex16: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_16 : OPSZ_8);
case OPSZ_8_rex16_short4: /* rex.w trumps data prefix */
return (TEST(PREFIX_REX_W, di->prefixes)
? OPSZ_16
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_4 : OPSZ_8));
case OPSZ_12_rex40_short6: /* rex.w trumps data prefix */
return (TEST(PREFIX_REX_W, di->prefixes)
? OPSZ_40
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_6 : OPSZ_12));
case OPSZ_16_vex32: return (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_32 : OPSZ_16);
case OPSZ_32_short16: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_16 : OPSZ_32);
case OPSZ_28_short14: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_14 : OPSZ_28);
case OPSZ_108_short94: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_94 : OPSZ_108);
case OPSZ_1_reg4:
case OPSZ_2_reg4:
case OPSZ_4_reg16: return resolve_var_reg_size(sz, is_reg);
/* The _of_ types are not exposed to the user so convert here */
case OPSZ_1_of_16: return OPSZ_1;
case OPSZ_2_of_8:
case OPSZ_2_of_16: return OPSZ_2;
case OPSZ_4_of_8:
case OPSZ_4_of_16: return OPSZ_4;
case OPSZ_4_rex8_of_16: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_8 : OPSZ_4);
case OPSZ_8_of_16: return OPSZ_8;
case OPSZ_12_of_16: return OPSZ_12;
case OPSZ_12_rex8_of_16: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_8 : OPSZ_12);
case OPSZ_14_of_16: return OPSZ_14;
case OPSZ_15_of_16: return OPSZ_15;
case OPSZ_8_of_16_vex32: return (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_32 : OPSZ_8);
case OPSZ_16_of_32: return OPSZ_16;
case OPSZ_16_vex32_evex64:
/* XXX i#1312: There may be a conflict since LL' is also used for rounding
* control in AVX-512 if used in combination.
*/
return (TEST(PREFIX_EVEX_LL, di->prefixes)
? OPSZ_64
: (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_32 : OPSZ_16));
}
return sz;
}
opnd_size_t
expand_subreg_size(opnd_size_t sz)
{
switch (sz) {
case OPSZ_2_of_8:
case OPSZ_4_of_8: return OPSZ_8;
case OPSZ_1_of_16:
case OPSZ_2_of_16:
case OPSZ_4_of_16:
case OPSZ_4_rex8_of_16:
case OPSZ_8_of_16:
case OPSZ_12_of_16:
case OPSZ_12_rex8_of_16:
case OPSZ_14_of_16:
case OPSZ_15_of_16:
case OPSZ_4_reg16: return OPSZ_16;
case OPSZ_16_of_32: return OPSZ_32;
case OPSZ_8_of_16_vex32: return OPSZ_16_vex32;
}
return sz;
}
opnd_size_t
resolve_variable_size_dc(dcontext_t *dcontext, uint prefixes, opnd_size_t sz, bool is_reg)
{
decode_info_t di;
IF_X64(di.x86_mode = get_x86_mode(dcontext));
di.prefixes = prefixes;
return resolve_variable_size(&di, sz, is_reg);
}
opnd_size_t
resolve_addr_size(decode_info_t *di /*IN: x86_mode, prefixes*/)
{
if (TEST(PREFIX_ADDR, di->prefixes))
return (X64_MODE(di) ? OPSZ_4 : OPSZ_2);
else
return (X64_MODE(di) ? OPSZ_8 : OPSZ_4);
}
bool
optype_is_indir_reg(int optype)
{
switch (optype) {
case TYPE_INDIR_VAR_XREG:
case TYPE_INDIR_VAR_XREG_OFFS_1:
case TYPE_INDIR_VAR_XREG_OFFS_N:
case TYPE_INDIR_VAR_XIREG:
case TYPE_INDIR_VAR_XIREG_OFFS_1:
case TYPE_INDIR_VAR_REG:
case TYPE_INDIR_VAR_REG_OFFS_2:
case TYPE_INDIR_VAR_REG_SIZEx2:
case TYPE_INDIR_VAR_XREG_OFFS_8:
case TYPE_INDIR_VAR_XREG_SIZEx8:
case TYPE_INDIR_VAR_REG_SIZEx3x5: return true;
}
return false;
}
opnd_size_t
indir_var_reg_size(decode_info_t *di, int optype)
{
switch (optype) {
case TYPE_INDIR_VAR_XREG:
case TYPE_INDIR_VAR_XREG_OFFS_1:
case TYPE_INDIR_VAR_XREG_OFFS_N:
/* non-zero immed int adds additional, but we require client to handle that
* b/c our decoding and encoding can't see the rest of the operands
*/
return OPSZ_VARSTACK;
case TYPE_INDIR_VAR_XIREG:
case TYPE_INDIR_VAR_XIREG_OFFS_1: return OPSZ_ret;
case TYPE_INDIR_VAR_REG: return OPSZ_REXVARSTACK;
case TYPE_INDIR_VAR_REG_OFFS_2:
case TYPE_INDIR_VAR_REG_SIZEx2: return OPSZ_8_rex16_short4;
case TYPE_INDIR_VAR_XREG_OFFS_8:
case TYPE_INDIR_VAR_XREG_SIZEx8: return OPSZ_32_short16;
case TYPE_INDIR_VAR_REG_SIZEx3x5: return OPSZ_12_rex40_short6;
default: CLIENT_ASSERT(false, "internal error: invalid indir reg type");
}
return OPSZ_0;
}
/* Returns multiplier of the operand size to use as the base-disp offs */
int
indir_var_reg_offs_factor(int optype)
{
switch (optype) {
case TYPE_INDIR_VAR_XREG_OFFS_1:
case TYPE_INDIR_VAR_XREG_OFFS_8:
case TYPE_INDIR_VAR_XREG_OFFS_N:
case TYPE_INDIR_VAR_XIREG_OFFS_1:
case TYPE_INDIR_VAR_REG_OFFS_2: return -1;
}
return 0;
}
/****************************************************************************
* Reading all bytes of instruction
*/
static byte *
read_immed(byte *pc, decode_info_t *di, opnd_size_t size, ptr_int_t *result)
{
size = resolve_variable_size(di, size, false);
/* all data immediates are sign-extended. we use the compiler's casts with
* signed types to do our sign extensions for us.
*/
switch (size) {
case OPSZ_1:
*result = (ptr_int_t)(char)*pc; /* sign-extend */
pc++;
break;
case OPSZ_2:
*result = (ptr_int_t) * ((short *)pc); /* sign-extend */
pc += 2;
break;
case OPSZ_4:
*result = (ptr_int_t) * ((int *)pc); /* sign-extend */
pc += 4;
break;
case OPSZ_8:
CLIENT_ASSERT(X64_MODE(di), "decode immediate: invalid size");
CLIENT_ASSERT(sizeof(ptr_int_t) == 8, "decode immediate: internal size error");
*result = *((ptr_int_t *)pc);
pc += 8;
break;
default:
/* called internally w/ instr_info_t fields or hardcoded values,
* so ok to assert */
CLIENT_ASSERT(false, "decode immediate: unknown size");
}
return pc;
}
/* reads any trailing immed bytes */
static byte *
read_operand(byte *pc, decode_info_t *di, byte optype, opnd_size_t opsize)
{
ptr_int_t val = 0;
opnd_size_t size = opsize;
switch (optype) {
case TYPE_A: {
CLIENT_ASSERT(!X64_MODE(di), "x64 has no type A instructions");
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(opsize == OPSZ_6_irex10_short4, "decode A operand error");
if (TEST(PREFIX_DATA, di->prefixes)) {
/* 4-byte immed */
pc = read_immed(pc, di, OPSZ_4, &val);
#ifdef X64
if (!X64_MODE(di)) {
/* we do not want the sign extension that read_immed() applied */
val &= (ptr_int_t)0x00000000ffffffff;
}
#endif
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed == OPSZ_NA && di->size_immed2 == OPSZ_NA,
"decode A operand error");
di->size_immed = resolve_variable_size(di, opsize, false);
ASSERT(di->size_immed == OPSZ_4);
di->immed = val;
} else {
/* 6-byte immed */
ptr_int_t val2 = 0;
/* little-endian: segment comes last */
pc = read_immed(pc, di, OPSZ_4, &val2);
pc = read_immed(pc, di, OPSZ_2, &val);
#ifdef X64
if (!X64_MODE(di)) {
/* we do not want the sign extension that read_immed() applied */
val2 &= (ptr_int_t)0x00000000ffffffff;
}
#endif
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed == OPSZ_NA && di->size_immed2 == OPSZ_NA,
"decode A operand error");
di->size_immed = resolve_variable_size(di, opsize, false);
ASSERT(di->size_immed == OPSZ_6);
di->size_immed2 = resolve_variable_size(di, opsize, false);
di->immed = val;
di->immed2 = val2;
}
return pc;
}
case TYPE_I: {
pc = read_immed(pc, di, opsize, &val);
break;
}
case TYPE_J: {
byte *end_pc;
pc = read_immed(pc, di, opsize, &val);
if (di->orig_pc != di->start_pc) {
CLIENT_ASSERT(di->start_pc != NULL,
"internal decode error: start pc not set");
end_pc = di->orig_pc + (pc - di->start_pc);
} else
end_pc = pc;
/* convert from relative offset to absolute target pc */
val = ((ptr_int_t)end_pc) + val;
if ((!X64_MODE(di) || proc_get_vendor() != VENDOR_INTEL) &&
TEST(PREFIX_DATA, di->prefixes)) {
/* need to clear upper 16 bits */
val &= (ptr_int_t)0x0000ffff;
} /* for x64 Intel, always 64-bit addr ("f64" in Intel table) */
break;
}
case TYPE_L: {
/* part of AVX: top 4 bits of 8-bit immed select xmm/ymm register */
pc = read_immed(pc, di, OPSZ_1, &val);
break;
}
case TYPE_O: {
/* no modrm byte, offset follows directly. this is address-sized,
* so 64-bit for x64, and addr prefix affects it. */
size = resolve_addr_size(di);
pc = read_immed(pc, di, size, &val);
if (TEST(PREFIX_ADDR, di->prefixes)) {
/* need to clear upper bits */
if (X64_MODE(di))
val &= (ptr_int_t)0xffffffff;
else
val &= (ptr_int_t)0x0000ffff;
}
#ifdef X64
if (!X64_MODE(di)) {
/* we do not want the sign extension that read_immed() applied */
val &= (ptr_int_t)0x00000000ffffffff;
}
#endif
break;
}
default: return pc;
}
if (di->size_immed == OPSZ_NA) {
di->size_immed = size;
di->immed = val;
} else {
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed2 == OPSZ_NA, "decode operand error");
di->size_immed2 = size;
di->immed2 = val;
}
return pc;
}
/* reads the modrm byte and any following sib and offset bytes */
static byte *
read_modrm(byte *pc, decode_info_t *di)
{
byte modrm = *pc;
pc++;
di->modrm = modrm;
di->mod = (byte)((modrm >> 6) & 0x3); /* top 2 bits */
di->reg = (byte)((modrm >> 3) & 0x7); /* middle 3 bits */
di->rm = (byte)(modrm & 0x7); /* bottom 3 bits */
/* addr16 displacement */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes)) {
di->has_sib = false;
if ((di->mod == 0 && di->rm == 6) || di->mod == 2) {
/* 2-byte disp */
di->has_disp = true;
if (di->mod == 0 && di->rm == 6) {
/* treat absolute addr as unsigned */
di->disp = (int)*((ushort *)pc); /* zero-extend */
} else {
/* treat relative addr as signed */
di->disp = (int)*((short *)pc); /* sign-extend */
}
pc += 2;
} else if (di->mod == 1) {
/* 1-byte disp */
di->has_disp = true;
di->disp = (int)(char)*pc; /* sign-extend */
pc++;
} else {
di->has_disp = false;
}
} else {
/* 32-bit, which sometimes has a SIB */
if (di->rm == 4 && di->mod != 3) {
/* need SIB */
byte sib = *pc;
pc++;
di->has_sib = true;
di->scale = (byte)((sib >> 6) & 0x3); /* top 2 bits */
di->index = (byte)((sib >> 3) & 0x7); /* middle 3 bits */
di->base = (byte)(sib & 0x7); /* bottom 3 bits */
} else {
di->has_sib = false;
}
/* displacement */
if ((di->mod == 0 && di->rm == 5) ||
(di->has_sib && di->mod == 0 && di->base == 5) || di->mod == 2) {
/* 4-byte disp */
di->has_disp = true;
di->disp = *((int *)pc);
IF_X64(di->disp_abs = pc); /* used to set instr->rip_rel_pos */
pc += 4;
} else if (di->mod == 1) {
/* 1-byte disp */
di->has_disp = true;
di->disp = (int)(char)*pc; /* sign-extend */
pc++;
} else {
di->has_disp = false;
}
}
return pc;
}
/* Given the potential first vex byte at pc, reads any subsequent vex
* bytes (and any prefix bytes) and sets the appropriate prefix flags in di.
* Sets info to the entry for the first opcode byte, and pc to point past
* the first opcode byte.
* Also handles xop encodings, which are quite similar to vex.
*/
static byte *
read_vex(byte *pc, decode_info_t *di, byte instr_byte,
const instr_info_t **ret_info INOUT, bool *is_vex /*or xop*/)
{
int idx = 0;
const instr_info_t *info;
byte vex_last = 0, vex_pp;
ASSERT(ret_info != NULL && *ret_info != NULL && is_vex != NULL);
info = *ret_info;
if (info->type == VEX_PREFIX_EXT) {
/* If 32-bit mode and mod selects for memory, this is not vex */
if (X64_MODE(di) || TESTALL(MODRM_BYTE(3, 0, 0), *pc))
idx = 1;
else
idx = 0;
info = &vex_prefix_extensions[info->code][idx];
} else if (info->type == XOP_PREFIX_EXT) {
/* If m-mmm (what AMD calls "map_select") < 8, this is not vex */
if ((*pc & 0x1f) < 0x8)
idx = 0;
else
idx = 1;
info = &xop_prefix_extensions[info->code][idx];
} else
CLIENT_ASSERT(false, "internal vex decoding error");
if (idx == 0) {
/* not vex */
*ret_info = info;
*is_vex = false;
return pc;
}
*is_vex = true;
if (TESTANY(PREFIX_REX_ALL | PREFIX_LOCK, di->prefixes) || di->data_prefix ||
di->rep_prefix || di->repne_prefix) {
/* #UD if combined w/ VEX prefix */
*ret_info = &invalid_instr;
return pc;
}
/* read 2nd vex byte */
instr_byte = *pc;
pc++;
if (info->code == PREFIX_VEX_2B) {
CLIENT_ASSERT(info->type == PREFIX, "internal vex decoding error");
/* fields are: R, vvvv, L, PP. R is inverted. */
vex_last = instr_byte;
if (!TEST(0x80, vex_last))
di->prefixes |= PREFIX_REX_R;
/* 2-byte vex implies leading 0x0f */
*ret_info = &escape_instr;
/* rest are shared w/ 3-byte form's final byte */
} else if (info->code == PREFIX_VEX_3B || info->code == PREFIX_XOP) {
byte vex_mm;
CLIENT_ASSERT(info->type == PREFIX, "internal vex decoding error");
/* fields are: R, X, B, m-mmmm. R, X, and B are inverted. */
if (!TEST(0x80, instr_byte))
di->prefixes |= PREFIX_REX_R;
if (!TEST(0x40, instr_byte))
di->prefixes |= PREFIX_REX_X;
if (!TEST(0x20, instr_byte))
di->prefixes |= PREFIX_REX_B;
vex_mm = instr_byte & 0x1f;
/* our strategy is to decode through the regular tables w/ a vex-encoded
* flag, to match Intel manuals and vex implicit-prefix flags
*/
if (info->code == PREFIX_VEX_3B) {
if (vex_mm == 1) {
*ret_info = &escape_instr;
} else if (vex_mm == 2) {
*ret_info = &escape_38_instr;
} else if (vex_mm == 3) {
*ret_info = &escape_3a_instr;
} else {
/* #UD: reserved for future use */
*ret_info = &invalid_instr;
return pc;
}
} else {
/* xop */
if (vex_mm == 0x8) {
*ret_info = &xop_8_instr;
} else if (vex_mm == 0x9) {
*ret_info = &xop_9_instr;
} else if (vex_mm == 0xa) {
*ret_info = &xop_a_instr;
} else {
/* #UD: reserved for future use */
*ret_info = &invalid_instr;
return pc;
}
}
/* read 3rd vex byte */
vex_last = *pc;
pc++;
/* fields are: W, vvvv, L, PP */
/* Intel docs say vex.W1 behaves just like rex.w except in cases
* where rex.w is ignored, so no need for a PREFIX_VEX_W flag
*/
if (TEST(0x80, vex_last))
di->prefixes |= PREFIX_REX_W;
/* rest are shared w/ 2-byte form's final byte */
} else
CLIENT_ASSERT(false, "internal vex decoding error");
/* shared vex fields */
vex_pp = vex_last & 0x03;
di->vex_vvvv = (vex_last & 0x78) >> 3;
if (TEST(0x04, vex_last))
di->prefixes |= PREFIX_VEX_L;
if (vex_pp == 0x1)
di->data_prefix = true;
else if (vex_pp == 0x2)
di->rep_prefix = true;
else if (vex_pp == 0x3)
di->repne_prefix = true;
di->vex_encoded = true;
return pc;
}
/* Given the potential first evex byte at pc, reads any subsequent evex
* bytes (and any prefix bytes) and sets the appropriate prefix flags in di.
* Sets info to the entry for the first opcode byte, and pc to point past
* the first opcode byte.
*/
static byte *
read_evex(byte *pc, decode_info_t *di, byte instr_byte,
const instr_info_t **ret_info INOUT, bool *is_evex)
{
const instr_info_t *info;
byte prefix_byte = 0, evex_pp = 0;
ASSERT(ret_info != NULL && *ret_info != NULL && is_evex != NULL);
info = *ret_info;
CLIENT_ASSERT(info->type == EVEX_PREFIX_EXT, "internal evex decoding error");
/* If 32-bit mode and mod selects for memory, this is not evex */
if (X64_MODE(di) || TESTALL(MODRM_BYTE(3, 0, 0), *pc)) {
/* P[3:2] must be 0 and P[10] must be 1, otherwise #UD */
if (TEST(0xC, *pc) || !TEST(0x04, *(pc + 1))) {
*ret_info = &invalid_instr;
return pc;
}
*is_evex = true;
info = &evex_prefix_extensions[0][1];
} else {
/* not evex */
*is_evex = false;
*ret_info = &evex_prefix_extensions[0][0];
return pc;
}
CLIENT_ASSERT(info->code == PREFIX_EVEX, "internal evex decoding error");
/* read 2nd evex byte */
instr_byte = *pc;
prefix_byte = instr_byte;
pc++;
if (TESTANY(PREFIX_REX_ALL | PREFIX_LOCK, di->prefixes) || di->data_prefix ||
di->rep_prefix || di->repne_prefix) {
/* #UD if combined w/ EVEX prefix */
*ret_info = &invalid_instr;
return pc;
}
CLIENT_ASSERT(info->type == PREFIX, "internal evex decoding error");
/* Fields are: R, X, B, R', 00, mm. R, X, B and R' are inverted. Intel's
* Software Developer's Manual Vol-2A 2.6 AVX-512 ENCODING fails to mention
* explicitly the fact that the bits are inverted in order to make the prefix
* distinct from the bound instruction in 32-bit mode. We experimentally
* confirmed.
*/
if (!TEST(0x80, prefix_byte))
di->prefixes |= PREFIX_REX_R;
if (!TEST(0x40, prefix_byte))
di->prefixes |= PREFIX_REX_X;
if (!TEST(0x20, prefix_byte))
di->prefixes |= PREFIX_REX_B;
if (!TEST(0x10, prefix_byte))
di->prefixes |= PREFIX_EVEX_RR;
byte evex_mm = instr_byte & 0x3;
if (evex_mm == 1) {
*ret_info = &escape_instr;
} else if (evex_mm == 2) {
*ret_info = &escape_38_instr;
} else if (evex_mm == 3) {
*ret_info = &escape_3a_instr;
} else {
/* #UD: reserved for future use */
*ret_info = &invalid_instr;
return pc;
}
/* read 3rd evex byte */
prefix_byte = *pc;
pc++;
/* fields are: W, vvvv, 1, PP */
if (TEST(0x80, prefix_byte)) {
di->prefixes |= PREFIX_REX_W;
}
evex_pp = prefix_byte & 0x03;
di->evex_vvvv = (prefix_byte & 0x78) >> 3;
if (evex_pp == 0x1)
di->data_prefix = true;
else if (evex_pp == 0x2)
di->rep_prefix = true;
else if (evex_pp == 0x3)
di->repne_prefix = true;
/* read 4th evex byte */
prefix_byte = *pc;
pc++;
/* fields are: z, L', L, b, V' and aaa */
if (TEST(0x80, prefix_byte))
di->prefixes |= PREFIX_EVEX_z;
if (TEST(0x40, prefix_byte))
di->prefixes |= PREFIX_EVEX_LL;
if (TEST(0x20, prefix_byte))
di->prefixes |= PREFIX_VEX_L;
if (TEST(0x10, prefix_byte))
di->prefixes |= PREFIX_EVEX_b;
if (TEST(0x08, prefix_byte))
di->prefixes |= PREFIX_EVEX_VV;
di->evex_aaa = prefix_byte & 0x07;
di->evex_encoded = true;
return pc;
}
/* Given an instr_info_t PREFIX_EXT entry, reads the next entry based on the prefixes.
* Note that this function does not initialize the opcode field in \p di but is set in
* \p info->type.
*/
static inline const instr_info_t *
read_prefix_ext(const instr_info_t *info, decode_info_t *di)
{
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->rep_prefix ? 1 : (di->data_prefix ? 2 : (di->repne_prefix ? 3 : 0)));
if (di->vex_encoded)
idx += 4;
else if (di->evex_encoded)
idx += 8;
info = &prefix_extensions[code][idx];
if (info->type == INVALID && !DYNAMO_OPTION(decode_strict)) {
/* i#1118: some of these seem to not be invalid with
* prefixes that land in blank slots in the decode tables.
* Though it seems to only be btc, bsf, and bsr (the SSE*
* instrs really do seem invalid when given unlisted prefixes),
* we'd rather err on the side of treating as valid, which is
* after all what gdb and dumpbin list. Even if these
* fault when executed, we know the length, so there's no
* downside to listing as valid, for DR anyway.
* Users of drdecodelib may want to be more aggressive: hence the
* -decode_strict option.
*/
/* Take the base entry w/o prefixes and keep the prefixes */
CLIENT_ASSERT(!di->evex_encoded, "TODO i#1312: decode error: unsupported yet.");
info = &prefix_extensions[code][0 + (di->vex_encoded ? 4 : 0)];
} else if (di->rep_prefix)
di->rep_prefix = false;
else if (di->repne_prefix)
di->repne_prefix = false;
if (di->data_prefix &&
/* Don't remove it if the entry doesn't list 0x66:
* e.g., OP_bsr (i#1118).
*/
(info->opcode >> 24) == DATA_PREFIX_OPCODE)
di->data_prefix = false;
if (info->type == REX_B_EXT) {
/* discard old info, get new one */
code = (int)info->code;
idx = (TEST(PREFIX_REX_B, di->prefixes) ? 1 : 0);
info = &rex_b_extensions[code][idx];
}
return info;
}
/* Disassembles the instruction at pc into the data structures ret_info
* and di. Does NOT set or read di->len.
* Returns a pointer to the pc of the next instruction.
* If just_opcode is true, does not decode the immeds and returns NULL
* (you must call decode_next_pc to get the next pc, but that's faster
* than decoding the immeds)
* Returns NULL on an invalid instruction
*/
static byte *
read_instruction(byte *pc, byte *orig_pc, const instr_info_t **ret_info,
decode_info_t *di, bool just_opcode _IF_DEBUG(bool report_invalid))
{
DEBUG_DECLARE(byte *post_suffix_pc = NULL;)
byte instr_byte;
const instr_info_t *info;
bool vex_noprefix = false;
bool evex_noprefix = false;
/* initialize di */
/* though we only need di->start_pc for full decode rip-rel (and
* there only post-read_instruction()) and decode_from_copy(), and
* di->orig_pc only for decode_from_copy(), we assume that
* high-perf decoding uses decode_cti() and live w/ the extra
* writes here for decode_opcode() and decode_eflags_usage().
*/
di->start_pc = pc;
di->orig_pc = orig_pc;
di->size_immed = OPSZ_NA;
di->size_immed2 = OPSZ_NA;
di->seg_override = REG_NULL;
di->data_prefix = false;
di->rep_prefix = false;
di->repne_prefix = false;
di->vex_encoded = false;
di->evex_encoded = false;
/* FIXME: set data and addr sizes to current mode
* for now I assume always 32-bit mode (or 64 for X64_MODE(di))!
*/
di->prefixes = 0;
do {
instr_byte = *pc;
pc++;
info = &first_byte[instr_byte];
if (info->type == X64_EXT) {
/* discard old info, get new one */
info = &x64_extensions[info->code][X64_MODE(di) ? 1 : 0];
} else if (info->type == VEX_PREFIX_EXT || info->type == XOP_PREFIX_EXT) {
bool is_vex = false; /* or xop */
pc = read_vex(pc, di, instr_byte, &info, &is_vex);
/* if read_vex changes info, leave this loop */
if (info->type != VEX_PREFIX_EXT && info->type != XOP_PREFIX_EXT)
break;
else {
if (is_vex)
vex_noprefix = true; /* staying in loop, but ensure no prefixes */
continue;
}
} else if (info->type == EVEX_PREFIX_EXT) {
bool is_evex = false;
pc = read_evex(pc, di, instr_byte, &info, &is_evex);
/* if read_evex changes info, leave this loop */
if (info->type != EVEX_PREFIX_EXT)
break;
else {
if (is_evex)
evex_noprefix = true; /* staying in loop, but ensure no prefixes */
continue;
}
}
if (info->type == PREFIX) {
if (vex_noprefix || evex_noprefix) {
/* VEX/EVEX prefix must be last */
info = &invalid_instr;
break;
}
if (TESTANY(PREFIX_REX_ALL, di->prefixes)) {
/* rex.* must come after all other prefixes (including those that are
* part of the opcode, xref PR 271878): so discard them if before
* matching the behavior of decode_sizeof(). This in effect nops
* improperly placed rex prefixes which (xref PR 241563 and Intel Manual
* 2A 2.2.1) is the correct thing to do. NOTE - windbg shows early bytes
* as ??, objdump as their prefix names, separate from the next instr.
*/
di->prefixes &= ~PREFIX_REX_ALL;
}
if (info->code == PREFIX_REP) {
/* see if used as part of opcode before considering prefix */
di->rep_prefix = true;
} else if (info->code == PREFIX_REPNE) {
/* see if used as part of opcode before considering prefix */
di->repne_prefix = true;
} else if (REG_START_SEGMENT <= info->code &&
info->code <= REG_STOP_SEGMENT) {
CLIENT_ASSERT_TRUNCATE(di->seg_override, ushort, info->code,
"decode error: invalid segment override");
di->seg_override = (reg_id_t)info->code;
} else if (info->code == PREFIX_DATA) {
/* see if used as part of opcode before considering prefix */
di->data_prefix = true;
} else if (TESTANY(PREFIX_REX_ALL | PREFIX_ADDR | PREFIX_LOCK, info->code)) {
di->prefixes |= info->code;
}
} else
break;
} while (true);
if (info->type == ESCAPE) {
/* discard first byte, move to second */
instr_byte = *pc;
pc++;
info = &second_byte[instr_byte];
}
if (info->type == ESCAPE_3BYTE_38 || info->type == ESCAPE_3BYTE_3a) {
/* discard second byte, move to third */
instr_byte = *pc;
pc++;
if (info->type == ESCAPE_3BYTE_38)
info = &third_byte_38[third_byte_38_index[instr_byte]];
else
info = &third_byte_3a[third_byte_3a_index[instr_byte]];
} else if (info->type == XOP_8_EXT || info->type == XOP_9_EXT ||
info->type == XOP_A_EXT) {
/* discard second byte, move to third */
int idx = 0;
instr_byte = *pc;
pc++;
if (info->type == XOP_8_EXT)
idx = xop_8_index[instr_byte];
else if (info->type == XOP_9_EXT)
idx = xop_9_index[instr_byte];
else if (info->type == XOP_A_EXT)
idx = xop_a_index[instr_byte];
else
CLIENT_ASSERT(false, "internal invalid XOP type");
info = &xop_extensions[idx];
}
/* all FLOAT_EXT and PREFIX_EXT (except nop & pause) and EXTENSION need modrm,
* get it now
*/
if ((info->flags & HAS_MODRM) != 0)
pc = read_modrm(pc, di);
if (info->type == FLOAT_EXT) {
if (di->modrm <= 0xbf) {
int offs = (instr_byte - 0xd8) * 8 + di->reg;
info = &float_low_modrm[offs];
} else {
int offs1 = (instr_byte - 0xd8);
int offs2 = di->modrm - 0xc0;
info = &float_high_modrm[offs1][offs2];
}
} else if (info->type == REP_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->rep_prefix ? 2 : 0);
info = &rep_extensions[code][idx];
if (di->rep_prefix)
di->rep_prefix = false;
} else if (info->type == REPNE_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->rep_prefix ? 2 : (di->repne_prefix ? 4 : 0));
info = &repne_extensions[code][idx];
di->rep_prefix = false;
di->repne_prefix = false;
} else if (info->type == EXTENSION) {
/* discard old info, get new one */
info = &base_extensions[info->code][di->reg];
/* absurd cases of using prefix on top of reg opcode extension
* (pslldq, psrldq) => PREFIX_EXT can happen after here,
* and MOD_EXT after that
*/
} else if (info->type == SUFFIX_EXT) {
/* Discard old info, get new one for complete opcode, which includes
* a suffix byte where an immed would be (yes, ugly!).
* We should have already read in the modrm (+ sib).
*/
CLIENT_ASSERT(TEST(HAS_MODRM, info->flags), "decode error on 3DNow instr");
info = &suffix_extensions[suffix_index[*pc]];
pc++;
DEBUG_DECLARE(post_suffix_pc = pc;)
} else if (info->type == VEX_L_EXT) {
/* TODO i#1312: We probably need to extend this table for EVEX. In this case,
* rename to e_vex_L_extensions or set up a new table?
*/
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->vex_encoded) ? (TEST(PREFIX_VEX_L, di->prefixes) ? 2 : 1) : 0;
info = &vex_L_extensions[code][idx];
} else if (info->type == VEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &vex_W_extensions[code][idx];
} else if (info->type == EVEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &evex_W_extensions[code][idx];
}
/* can occur AFTER above checks (EXTENSION, in particular) */
if (info->type == PREFIX_EXT) {
/* discard old info, get new one */
info = read_prefix_ext(info, di);
}
/* can occur AFTER above checks (PREFIX_EXT, in particular) */
if (info->type == MOD_EXT) {
info = &mod_extensions[info->code][(di->mod == 3) ? 1 : 0];
/* Yes, we have yet another layer, thanks to Intel's poor choice
* in opcodes -- why didn't they fill out the PREFIX_EXT space?
*/
if (info->type == RM_EXT) {
info = &rm_extensions[info->code][di->rm];
}
/* We have to support prefix before mod, and mod before prefix */
if (info->type == PREFIX_EXT) {
info = read_prefix_ext(info, di);
}
}
/* can occur AFTER above checks (MOD_EXT, in particular) */
if (info->type == E_VEX_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = 0;
if (di->vex_encoded)
idx = 1;
else if (di->evex_encoded)
idx = 2;
info = &e_vex_extensions[code][idx];
}
/* can occur AFTER above checks (EXTENSION, in particular) */
if (info->type == PREFIX_EXT) {
/* discard old info, get new one */
info = read_prefix_ext(info, di);
}
/* can occur AFTER above checks (MOD_EXT, in particular) */
if (info->type == REX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &rex_w_extensions[code][idx];
} else if (info->type == VEX_L_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->vex_encoded) ? (TEST(PREFIX_VEX_L, di->prefixes) ? 2 : 1) : 0;
info = &vex_L_extensions[code][idx];
} else if (info->type == VEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &vex_W_extensions[code][idx];
} else if (info->type == EVEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &evex_W_extensions[code][idx];
}
if (TEST(REQUIRES_PREFIX, info->flags)) {
byte required = (byte)(info->opcode >> 24);
bool *prefix_var = NULL;
if (required == 0) { /* cannot have a prefix */
if (prefix_var != NULL) {
/* invalid instr */
info = NULL;
}
} else {
CLIENT_ASSERT(info->opcode > 0xffffff, "decode error in SSSE3/SSE4 instr");
if (required == DATA_PREFIX_OPCODE)
prefix_var = &di->data_prefix;
else if (required == REPNE_PREFIX_OPCODE)
prefix_var = &di->repne_prefix;
else if (required == REP_PREFIX_OPCODE)
prefix_var = &di->rep_prefix;
else
CLIENT_ASSERT(false, "internal required-prefix error");
if (prefix_var == NULL || !*prefix_var) {
/* Invalid instr. TODO: have processor w/ SSE4, confirm that
* an exception really is raised.
*/
info = NULL;
} else
*prefix_var = false;
}
}
/* we go through regular tables for vex but only some are valid w/ vex */
if (info != NULL && di->vex_encoded) {
if (!TEST(REQUIRES_VEX, info->flags))
info = NULL; /* invalid encoding */
else if (TEST(REQUIRES_VEX_L_0, info->flags) && TEST(PREFIX_VEX_L, di->prefixes))
info = NULL;
} else if (info != NULL && !di->vex_encoded && TEST(REQUIRES_VEX, info->flags)) {
info = NULL; /* invalid encoding */
} else if (info != NULL && di->evex_encoded) {
if (!TEST(REQUIRES_EVEX, info->flags))
info = NULL; /* invalid encoding */
/* XXX i#1312: This might need checks for REQUIRES_EVEX_L_0 and
* REQUIRES_EVEX_LL_0.
*/
} else if (info != NULL && !di->evex_encoded && TEST(REQUIRES_EVEX, info->flags))
info = NULL; /* invalid encoding */
/* XXX: not currently marking these cases as invalid instructions:
* - if no TYPE_H:
* "Note: In VEX-encoded versions, VEX.vvvv is reserved and must be 1111b otherwise
* instructions will #UD."
* - "an attempt to execute VTESTPS with VEX.W=1 will cause #UD."
* and similar for VEX.W.
*/
/* at this point should be an instruction, so type should be an OP_ constant */
if (info == NULL || info == &invalid_instr || info->type < OP_FIRST ||
info->type > OP_LAST || (X64_MODE(di) && TEST(X64_INVALID, info->flags)) ||
(!X64_MODE(di) && TEST(X86_INVALID, info->flags))) {
/* invalid instruction: up to caller to decide what to do with it */
/* FIXME case 10672: provide a runtime option to specify new
* instruction formats */
DODEBUG({
/* don't report when decoding DR addresses, as we sometimes try to
* decode backward (e.g., interrupted_inlined_syscall(): PR 605161)
* XXX: better to pass in a flag when decoding that we are
* being speculative!
*/
if (report_invalid && !is_dynamo_address(di->start_pc)) {
SYSLOG_INTERNAL_WARNING_ONCE("Invalid opcode encountered");
if (info != NULL && info->type == INVALID) {
LOG(THREAD_GET, LOG_ALL, 1, "Invalid opcode @" PFX ": 0x%x\n",
di->start_pc, info->opcode);
} else {
int i;
dcontext_t *dcontext = get_thread_private_dcontext();
IF_X64(bool old_mode = set_x86_mode(dcontext, di->x86_mode);)
int sz = decode_sizeof(dcontext, di->start_pc, NULL _IF_X64(NULL));
IF_X64(set_x86_mode(dcontext, old_mode));
LOG(THREAD_GET, LOG_ALL, 1,
"Error decoding " PFX " == ", di->start_pc);
for (i = 0; i < sz; i++) {
LOG(THREAD_GET, LOG_ALL, 1, "0x%x ", *(di->start_pc + i));
}
LOG(THREAD_GET, LOG_ALL, 1, "\n");
}
}
});
*ret_info = &invalid_instr;
return NULL;
}
#ifdef INTERNAL
DODEBUG(
{ /* rep & repne should have been completely handled by now */
/* processor will typically ignore extra prefixes, but we log this internally
* in case it's our decode messing up instead of weird app instrs
*/
if (report_invalid &&
((di->rep_prefix &&
/* case 6861: AMD64 opt: "rep ret" used if br tgt or after cbr */
(pc != di->start_pc + 2 || *(di->start_pc + 1) != RAW_OPCODE_ret)) ||
(di->repne_prefix &&
/* i#1899: MPX puts repne prior to branches. We ignore here until we have
* full MPX decoding support (i#1312).
*/
info->type != OP_call && info->type != OP_call_ind &&
info->type != OP_ret && info->type != OP_jmp &&
info->type != OP_jmp_short && !opc_is_cbr_arch(info->type)))) {
char bytes[17 * 3];
int i;
dcontext_t *dcontext = get_thread_private_dcontext();
IF_X64(bool old_mode = set_x86_mode(dcontext, di->x86_mode);)
int sz = decode_sizeof(dcontext, di->start_pc, NULL _IF_X64(NULL));
IF_X64(set_x86_mode(dcontext, old_mode));
CLIENT_ASSERT(sz <= 17, "decode rep/repne error: unsupported opcode?");
for (i = 0; i < sz; i++)
snprintf(&bytes[i * 3], 3, "%02x ", *(di->start_pc + i));
bytes[sz * 3 - 1] = '\0'; /* -1 to kill trailing space */
SYSLOG_INTERNAL_WARNING_ONCE(
"spurious rep/repne prefix @" PFX " (%s): ", di->start_pc, bytes);
}
});
#endif
/* if just want opcode, stop here! faster for caller to
* separately call decode_next_pc than for us to decode immeds!
*/
if (just_opcode) {
*ret_info = info;
return NULL;
}
if (di->data_prefix) {
/* prefix was not part of opcode, it's a real prefix */
/* From Intel manual:
* "For non-byte operations: if a 66H prefix is used with
* prefix (REX.W = 1), 66H is ignored."
* That means non-byte-specific operations, for which 66H is
* ignored as well, right?
* Xref PR 593593.
* Note that this means we could assert or remove some of
* the "rex.w trumps data prefix" logic elsewhere in this file.
*/
if (TEST(PREFIX_REX_W, di->prefixes)) {
LOG(THREAD_GET, LOG_ALL, 3, "Ignoring 0x66 in presence of rex.w @" PFX "\n",
di->start_pc);
} else {
di->prefixes |= PREFIX_DATA;
}
}
if ((di->repne_prefix || di->rep_prefix) &&
(TEST(PREFIX_LOCK, di->prefixes) ||
/* xrelease can go on non-0xa3 mov_st w/o lock prefix */
(di->repne_prefix && info->type == OP_mov_st &&
(info->opcode & 0xa30000) != 0xa30000))) {
/* we don't go so far as to ensure the mov_st is of the right type */
if (di->repne_prefix)
di->prefixes |= PREFIX_XACQUIRE;
if (di->rep_prefix)
di->prefixes |= PREFIX_XRELEASE;
}
/* read any trailing immediate bytes */
if (info->dst1_type != TYPE_NONE)
pc = read_operand(pc, di, info->dst1_type, info->dst1_size);
if (info->dst2_type != TYPE_NONE)
pc = read_operand(pc, di, info->dst2_type, info->dst2_size);
if (info->src1_type != TYPE_NONE)
pc = read_operand(pc, di, info->src1_type, info->src1_size);
if (info->src2_type != TYPE_NONE)
pc = read_operand(pc, di, info->src2_type, info->src2_size);
if (info->src3_type != TYPE_NONE)
pc = read_operand(pc, di, info->src3_type, info->src3_size);
if (info->type == SUFFIX_EXT) {
/* Shouldn't be any more bytes (immed bytes) read after the modrm+suffix! */
DODEBUG({ CLIENT_ASSERT(pc == post_suffix_pc, "decode error on 3DNow instr"); });
}
/* return values */
*ret_info = info;
return pc;
}
/****************************************************************************
* Full decoding
*/
/* Caller must check for rex.{r,b} extensions before calling this routine */
static reg_id_t
reg8_alternative(decode_info_t *di, reg_id_t reg, uint prefixes)
{
if (X64_MODE(di) && reg >= REG_START_x86_8 && reg <= REG_STOP_x86_8 &&
TESTANY(PREFIX_REX_ALL, prefixes)) {
/* for x64, if any rex prefix exists, we use SPL...SDL instead of
* AH..BH (this seems to be the only use of 0x40 == PREFIX_REX_GENERAL)
*/
return (reg - REG_START_x86_8 + REG_START_x64_8);
}
return reg;
}
/* which register within modrm, vex or evex we're decoding */
typedef enum {
DECODE_REG_REG,
DECODE_REG_BASE,
DECODE_REG_INDEX,
DECODE_REG_RM,
DECODE_REG_VEX,
DECODE_REG_EVEX,
DECODE_REG_OPMASK,
} decode_reg_t;
/* Pass in the raw opsize, NOT a size passed through resolve_variable_size(),
* to avoid allowing OPSZ_6_irex10_short4 w/ data16.
* To create a sub-sized register, caller must set size separately.
*/
static reg_id_t
decode_reg(decode_reg_t which_reg, decode_info_t *di, byte optype, opnd_size_t opsize)
{
bool extend = false;
bool avx512_extend = false;
byte reg = 0;
switch (which_reg) {
case DECODE_REG_REG:
reg = di->reg;
extend = X64_MODE(di) && TEST(PREFIX_REX_R, di->prefixes);
avx512_extend = TEST(PREFIX_EVEX_RR, di->prefixes);
break;
case DECODE_REG_BASE:
reg = di->base;
extend = X64_MODE(di) && TEST(PREFIX_REX_B, di->prefixes);
break;
case DECODE_REG_INDEX:
reg = di->index;
extend = X64_MODE(di) && TEST(PREFIX_REX_X, di->prefixes);
break;
case DECODE_REG_RM:
reg = di->rm;
extend = X64_MODE(di) && TEST(PREFIX_REX_B, di->prefixes);
if (di->evex_encoded)
avx512_extend = TEST(PREFIX_REX_X, di->prefixes);
break;
case DECODE_REG_VEX:
/* Part of XOP/AVX: vex.vvvv selects general-purpose register.
* It has 4 bits so no separate prefix bit is needed to extend.
*/
reg = (~di->vex_vvvv) & 0xf; /* bit-inverted */
extend = false;
avx512_extend = false;
break;
case DECODE_REG_EVEX:
/* Part of AVX-512: evex.vvvv selects general-purpose register.
* It has 4 bits so no separate prefix bit is needed to extend.
* Intel's Software Developer's Manual Vol-2A 2.6 AVX-512 ENCODING fails to
* mention the fact that the bits are inverted in the EVEX prefix. Experimentally
* confirmed.
*/
reg = (~di->evex_vvvv) & 0xf; /* bit-inverted */
extend = false;
avx512_extend = !TEST(PREFIX_EVEX_VV, di->prefixes); /* bit-inverted */
break;
case DECODE_REG_OPMASK:
/* Part of AVX-512: evex.aaa selects opmask register. */
reg = di->evex_aaa & 0x7;
break;
default: CLIENT_ASSERT(false, "internal unknown reg error");
}
switch (optype) {
case TYPE_P:
case TYPE_Q:
case TYPE_P_MODRM: return (REG_START_MMX + reg); /* no x64 extensions */
case TYPE_V:
case TYPE_W:
case TYPE_V_MODRM:
case TYPE_VSIB: {
reg_id_t extend_reg = extend ? reg + 8 : reg;
extend_reg = avx512_extend ? extend_reg + 16 : extend_reg;
return (TEST(PREFIX_EVEX_LL, di->prefixes)
? (DR_REG_START_ZMM + extend_reg)
: ((TEST(PREFIX_VEX_L, di->prefixes) &&
/* Not only do we use this for VEX .LIG (where raw reg is either
* OPSZ_32 or OPSZ_16_vex32) but also for VSIB which currently
* does not get up to OPSZ_16 so we can use this negative
* check.
* XXX i#1312: vgather/vscatter VSIB addressing may be OPSZ_16?
* For EVEX .LIG, raw reg will be able to be OPSZ_64 or
* OPSZ_16_vex32_evex64.
*/
expand_subreg_size(opsize) != OPSZ_16)
? (REG_START_YMM + extend_reg)
: (REG_START_XMM + extend_reg)));
}
case TYPE_S:
if (reg >= 6)
return REG_NULL;
return (REG_START_SEGMENT + reg);
case TYPE_C: return (extend ? (REG_START_CR + 8 + reg) : (REG_START_CR + reg));
case TYPE_D: return (extend ? (REG_START_DR + 8 + reg) : (REG_START_DR + reg));
case TYPE_K_REG:
case TYPE_K_MODRM:
case TYPE_K_MODRM_R:
case TYPE_K_VEX:
case TYPE_K_EVEX: return DR_REG_START_OPMASK + reg;
case TYPE_E:
case TYPE_G:
case TYPE_R:
case TYPE_B:
case TYPE_M:
case TYPE_INDIR_E:
case TYPE_FLOATMEM:
/* GPR: fall-through since variable subset of full register */
break;
default: CLIENT_ASSERT(false, "internal unknown reg error");
}
/* Do not allow a register for 'p' or 'a' types. FIXME: maybe *_far_ind_* should
* use TYPE_INDIR_M instead of TYPE_INDIR_E? What other things are going to turn
* into asserts or crashes instead of invalid instrs based on events as fragile
* as these decode routines moving sizes around?
*/
if (opsize != OPSZ_6_irex10_short4 && opsize != OPSZ_8_short4)
opsize = resolve_variable_size(di, opsize, true);
switch (opsize) {
case OPSZ_1:
if (extend)
return (REG_START_8 + 8 + reg);
else
return reg8_alternative(di, REG_START_8 + reg, di->prefixes);
case OPSZ_2: return (extend ? (REG_START_16 + 8 + reg) : (REG_START_16 + reg));
case OPSZ_4: return (extend ? (REG_START_32 + 8 + reg) : (REG_START_32 + reg));
case OPSZ_8: return (extend ? (REG_START_64 + 8 + reg) : (REG_START_64 + reg));
case OPSZ_6:
case OPSZ_6_irex10_short4:
case OPSZ_8_short4:
/* invalid: no register of size p */
return REG_NULL;
default:
/* ok to assert since params controlled by us */
CLIENT_ASSERT(false, "decode error: unknown register size");
return REG_NULL;
}
}
static bool
decode_modrm(decode_info_t *di, byte optype, opnd_size_t opsize, opnd_t *reg_opnd,
opnd_t *rm_opnd)
{
/* for x64, addr prefix affects only base/index and truncates final addr:
* modrm + sib table is the same
*/
bool addr16 = !X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes);
if (reg_opnd != NULL) {
reg_id_t reg = decode_reg(DECODE_REG_REG, di, optype, opsize);
if (reg == REG_NULL)
return false;
*reg_opnd = opnd_create_reg(reg);
opnd_set_size(reg_opnd, resolve_variable_size(di, opsize, true /*is reg*/));
}
if (rm_opnd != NULL) {
reg_id_t base_reg = REG_NULL;
int disp = 0;
reg_id_t index_reg = REG_NULL;
int scale = 0;
char memtype = (optype == TYPE_VSIB ? TYPE_VSIB : TYPE_M);
opnd_size_t memsize = resolve_addr_size(di);
bool encode_zero_disp, force_full_disp;
if (di->has_disp)
disp = di->disp;
else
disp = 0;
if (di->has_sib) {
CLIENT_ASSERT(!addr16, "decode error: x86 addr16 cannot have a SIB byte");
if (di->index == 4 &&
/* rex.x enables r12 as index */
(!X64_MODE(di) || !TEST(PREFIX_REX_X, di->prefixes)) &&
optype != TYPE_VSIB) {
/* no scale/index */
index_reg = REG_NULL;
} else {
index_reg = decode_reg(DECODE_REG_INDEX, di, memtype, memsize);
if (index_reg == REG_NULL) {
CLIENT_ASSERT(false, "decode error: !index: internal modrm error");
return false;
}
if (di->scale == 0)
scale = 1;
else if (di->scale == 1)
scale = 2;
else if (di->scale == 2)
scale = 4;
else if (di->scale == 3)
scale = 8;
}
if (di->base == 5 && di->mod == 0) {
/* no base */
base_reg = REG_NULL;
} else {
base_reg = decode_reg(DECODE_REG_BASE, di, TYPE_M, memsize);
if (base_reg == REG_NULL) {
CLIENT_ASSERT(false, "decode error: internal modrm decode error");
return false;
}
}
} else {
if (optype == TYPE_VSIB)
return false; /* invalid w/o vsib byte */
if ((!addr16 && di->mod == 0 && di->rm == 5) ||
(addr16 && di->mod == 0 && di->rm == 6)) {
/* just absolute displacement, or rip-relative for x64 */
#ifdef X64
if (X64_MODE(di)) {
/* rip-relative: convert from relative offset to absolute target pc */
byte *addr;
CLIENT_ASSERT(di->start_pc != NULL,
"internal decode error: start pc not set");
if (di->orig_pc != di->start_pc)
addr = di->orig_pc + di->len + di->disp;
else
addr = di->start_pc + di->len + di->disp;
if (TEST(PREFIX_ADDR, di->prefixes)) {
/* Need to clear upper 32 bits.
* Debuggers do not display this truncation, though
* both Intel and AMD manuals describe it.
* I did verify it w/ actual execution.
*/
ASSERT_NOT_TESTED();
addr = (byte *)((ptr_uint_t)addr & 0xffffffff);
}
*rm_opnd = opnd_create_far_rel_addr(
di->seg_override, (void *)addr,
resolve_variable_size(di, opsize, false));
return true;
} else
#endif
base_reg = REG_NULL;
index_reg = REG_NULL;
} else if (di->mod == 3) {
/* register */
reg_id_t rm_reg = decode_reg(DECODE_REG_RM, di, optype, opsize);
if (rm_reg == REG_NULL) /* no assert since happens, e.g., ff d9 */
return false;
else {
*rm_opnd = opnd_create_reg(rm_reg);
opnd_set_size(rm_opnd,
resolve_variable_size(di, opsize, true /*is reg*/));
return true;
}
} else {
/* non-sib reg-based memory address */
if (addr16) {
/* funny order requiring custom decode */
switch (di->rm) {
case 0:
base_reg = REG_BX;
index_reg = REG_SI;
scale = 1;
break;
case 1:
base_reg = REG_BX;
index_reg = REG_DI;
scale = 1;
break;
case 2:
base_reg = REG_BP;
index_reg = REG_SI;
scale = 1;
break;
case 3:
base_reg = REG_BP;
index_reg = REG_DI;
scale = 1;
break;
case 4: base_reg = REG_SI; break;
case 5: base_reg = REG_DI; break;
case 6:
base_reg = REG_BP;
CLIENT_ASSERT(di->mod != 0,
"decode error: %bp cannot have mod 0");
break;
case 7: base_reg = REG_BX; break;
default:
CLIENT_ASSERT(false, "decode error: unknown modrm rm");
break;
}
} else {
/* single base reg */
base_reg = decode_reg(DECODE_REG_RM, di, memtype, memsize);
if (base_reg == REG_NULL) {
CLIENT_ASSERT(false,
"decode error: !base: internal modrm decode error");
return false;
}
}
}
}
/* We go ahead and preserve the force bools if the original really had a 0
* disp; up to user to unset bools when changing disp value (FIXME: should
* we auto-unset on first mod?)
*/
encode_zero_disp = di->has_disp && disp == 0 &&
/* there is no bp base without a disp */
(!addr16 || base_reg != REG_BP);
force_full_disp =
di->has_disp && disp >= INT8_MIN && disp <= INT8_MAX && di->mod == 2;
if (di->seg_override != REG_NULL) {
*rm_opnd = opnd_create_far_base_disp_ex(
di->seg_override, base_reg, index_reg, scale, disp,
resolve_variable_size(di, opsize, false), encode_zero_disp,
force_full_disp, TEST(PREFIX_ADDR, di->prefixes));
} else {
/* Note that OP_{jmp,call}_far_ind does NOT have a far base disp
* operand: it is a regular base disp containing 6 bytes that
* specify a segment selector and address. The opcode must be
* examined to know how to interpret those 6 bytes.
*/
*rm_opnd = opnd_create_base_disp_ex(base_reg, index_reg, scale, disp,
resolve_variable_size(di, opsize, false),
encode_zero_disp, force_full_disp,
TEST(PREFIX_ADDR, di->prefixes));
}
}
return true;
}
static ptr_int_t
get_immed(decode_info_t *di, opnd_size_t opsize)
{
ptr_int_t val = 0;
if (di->size_immed == OPSZ_NA) {
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed2 != OPSZ_NA, "decode immediate size error");
val = di->immed2;
di->size_immed2 = OPSZ_NA; /* mark as used up */
} else {
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed != OPSZ_NA, "decode immediate size error");
val = di->immed;
di->size_immed = OPSZ_NA; /* mark as used up */
}
return val;
}
/* Also takes in reg8 for TYPE_REG_EX mov_imm */
reg_id_t
resolve_var_reg(decode_info_t *di /*IN: x86_mode, prefixes*/, reg_id_t reg32, bool addr,
bool can_shrink _IF_X64(bool default_64) _IF_X64(bool can_grow)
_IF_X64(bool extendable))
{
#ifdef X64
if (extendable && X64_MODE(di) && di->prefixes != 0 /*optimization*/) {
/* Note that Intel's table 3-1 on +r possibilities is incorrect:
* it lists rex.r, while Table 2-4 lists rex.b which is correct.
*/
if (TEST(PREFIX_REX_B, di->prefixes))
reg32 = reg32 + 8;
else
reg32 = reg8_alternative(di, reg32, di->prefixes);
}
#endif
if (addr) {
#ifdef X64
if (X64_MODE(di)) {
CLIENT_ASSERT(default_64, "addr-based size must be default 64");
if (!can_shrink || !TEST(PREFIX_ADDR, di->prefixes))
return reg_32_to_64(reg32);
/* else leave 32 (it's addr32 not addr16) */
} else
#endif
if (can_shrink && TEST(PREFIX_ADDR, di->prefixes))
return reg_32_to_16(reg32);
} else {
#ifdef X64
/* rex.w trumps data prefix */
if (X64_MODE(di) &&
((can_grow && TEST(PREFIX_REX_W, di->prefixes)) ||
(default_64 && (!can_shrink || !TEST(PREFIX_DATA, di->prefixes)))))
return reg_32_to_64(reg32);
else
#endif
if (can_shrink && TEST(PREFIX_DATA, di->prefixes))
return reg_32_to_16(reg32);
}
return reg32;
}
static reg_id_t
ds_seg(decode_info_t *di)
{
if (di->seg_override != REG_NULL) {
#ifdef X64
/* Although the AMD docs say that es,cs,ss,ds prefixes are NOT treated as
* segment override prefixes and instead as NULL prefixes, Intel docs do not
* say that, and both gdb and windbg disassemble as though the prefixes are
* taking effect. We therefore do not suppress those prefixes.
*/
#endif
return di->seg_override;
}
return SEG_DS;
}
static bool
decode_operand(decode_info_t *di, byte optype, opnd_size_t opsize, opnd_t *opnd)
{
/* resolving here, for non-reg, makes for simpler code: though the
* most common types don't need this.
*/
opnd_size_t ressize = resolve_variable_size(di, opsize, false);
switch (optype) {
case TYPE_NONE: *opnd = opnd_create_null(); return true;
case TYPE_REG:
*opnd = opnd_create_reg(opsize);
/* here and below, for all TYPE_*REG*: no need to set size as it's a GPR */
return true;
case TYPE_XREG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_REG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(false /*d32*/)
_IF_X64(true /*growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VARZ_REG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(false /*d32*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_XREG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_REGX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrinkable*/
_IF_X64(false /*!d64*/)
_IF_X64(true /*growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_ADDR_XREG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, true /*addr*/,
true /*shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_REG_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrink*/
_IF_X64(false /*d32*/)
_IF_X64(false /*!growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_VAR_REG_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(false /*d32*/)
_IF_X64(true /*growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_VAR_XREG_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_VAR_REGX_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrink*/
_IF_X64(false /*d64*/)
_IF_X64(true /*growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_FLOATMEM:
case TYPE_M:
case TYPE_VSIB:
/* ensure referencing memory */
if (di->mod >= 3)
return false;
/* fall through */
case TYPE_E:
case TYPE_Q:
case TYPE_W: return decode_modrm(di, optype, opsize, NULL, opnd);
case TYPE_R:
case TYPE_P_MODRM:
case TYPE_V_MODRM:
/* ensure referencing a register */
if (di->mod != 3)
return false;
return decode_modrm(di, optype, opsize, NULL, opnd);
case TYPE_G:
case TYPE_P:
case TYPE_V:
case TYPE_S:
case TYPE_C:
case TYPE_D: return decode_modrm(di, optype, opsize, opnd, NULL);
case TYPE_I:
*opnd = opnd_create_immed_int(get_immed(di, opsize), ressize);
return true;
case TYPE_1:
CLIENT_ASSERT(opsize == OPSZ_0, "internal decode inconsistency");
*opnd = opnd_create_immed_int(1, ressize);
return true;
case TYPE_FLOATCONST:
CLIENT_ASSERT(opsize == OPSZ_0, "internal decode inconsistency");
/* i#386: avoid floating-point instructions */
*opnd = opnd_create_immed_float_for_opcode(di->opcode);
return true;
case TYPE_J:
if (di->seg_override == SEG_JCC_NOT_TAKEN || di->seg_override == SEG_JCC_TAKEN) {
/* SEG_DS - taken, pt */
/* SEG_CS - not taken, pn */
/* starting from RH9 I see code using this */
LOG(THREAD_GET, LOG_EMIT, 5, "disassemble: branch hint %s:\n",
di->seg_override == SEG_JCC_TAKEN ? "pt" : "pn");
if (di->seg_override == SEG_JCC_NOT_TAKEN)
di->prefixes |= PREFIX_JCC_NOT_TAKEN;
else
di->prefixes |= PREFIX_JCC_TAKEN;
di->seg_override = REG_NULL;
STATS_INC(num_branch_hints);
}
/* just ignore other segment prefixes -- don't assert */
*opnd = opnd_create_pc((app_pc)get_immed(di, opsize));
return true;
case TYPE_A: {
/* ok since instr_info_t fields */
CLIENT_ASSERT(!X64_MODE(di), "x64 has no type A instructions");
CLIENT_ASSERT(opsize == OPSZ_6_irex10_short4, "decode A operand error");
/* just ignore segment prefixes -- don't assert */
if (TEST(PREFIX_DATA, di->prefixes)) {
/* 4-byte immed */
ptr_int_t val = get_immed(di, opsize);
*opnd = opnd_create_far_pc((ushort)(((ptr_int_t)val & 0xffff0000) >> 16),
(app_pc)((ptr_int_t)val & 0x0000ffff));
} else {
/* 6-byte immed */
/* ok since instr_info_t fields */
CLIENT_ASSERT(di->size_immed == OPSZ_6 && di->size_immed2 == OPSZ_6,
"decode A operand 6-byte immed error");
ASSERT(CHECK_TRUNCATE_TYPE_short(di->immed));
*opnd = opnd_create_far_pc((ushort)(short)di->immed, (app_pc)di->immed2);
di->size_immed = OPSZ_NA;
di->size_immed2 = OPSZ_NA;
}
return true;
}
case TYPE_O: {
/* no modrm byte, offset follows directly */
ptr_int_t immed = get_immed(di, resolve_addr_size(di));
*opnd = opnd_create_far_abs_addr(di->seg_override, (void *)immed, ressize);
return true;
}
case TYPE_X:
/* this means the memory address DS:(E)SI */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_SI, REG_NULL, 0, 0, ressize);
} else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_ESI, REG_NULL, 0, 0, ressize);
} else {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_RSI, REG_NULL, 0, 0, ressize);
}
return true;
case TYPE_Y:
/* this means the memory address ES:(E)DI */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(SEG_ES, REG_DI, REG_NULL, 0, 0, ressize);
else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(SEG_ES, REG_EDI, REG_NULL, 0, 0, ressize);
else
*opnd = opnd_create_far_base_disp(SEG_ES, REG_RDI, REG_NULL, 0, 0, ressize);
return true;
case TYPE_XLAT:
/* this means the memory address DS:(E)BX+AL */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(ds_seg(di), REG_BX, REG_AL, 1, 0, ressize);
else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(ds_seg(di), REG_EBX, REG_AL, 1, 0, ressize);
else
*opnd = opnd_create_far_base_disp(ds_seg(di), REG_RBX, REG_AL, 1, 0, ressize);
return true;
case TYPE_MASKMOVQ:
/* this means the memory address DS:(E)DI */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_DI, REG_NULL, 0, 0, ressize);
} else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_EDI, REG_NULL, 0, 0, ressize);
} else {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_RDI, REG_NULL, 0, 0, ressize);
}
return true;
case TYPE_INDIR_REG:
/* FIXME: how know data size? for now just use reg size: our only use
* of this does not have a varying hardcoded reg, fortunately. */
*opnd = opnd_create_base_disp(opsize, REG_NULL, 0, 0, reg_get_size(opsize));
return true;
case TYPE_INDIR_VAR_XREG: /* indirect reg varies by ss only, base is 4x8,
* opsize varies by data16 */
case TYPE_INDIR_VAR_REG: /* indirect reg varies by ss only, base is 4x8,
* opsize varies by rex and data16 */
case TYPE_INDIR_VAR_XIREG: /* indirect reg varies by ss only, base is 4x8,
* opsize varies by data16 except on 64-bit Intel */
case TYPE_INDIR_VAR_XREG_OFFS_1: /* TYPE_INDIR_VAR_XREG + an offset */
case TYPE_INDIR_VAR_XREG_OFFS_8: /* TYPE_INDIR_VAR_XREG + an offset + scale */
case TYPE_INDIR_VAR_XREG_OFFS_N: /* TYPE_INDIR_VAR_XREG + an offset + scale */
case TYPE_INDIR_VAR_XIREG_OFFS_1: /* TYPE_INDIR_VAR_XIREG + an offset + scale */
case TYPE_INDIR_VAR_REG_OFFS_2: /* TYPE_INDIR_VAR_REG + offset + scale */
case TYPE_INDIR_VAR_XREG_SIZEx8: /* TYPE_INDIR_VAR_XREG + scale */
case TYPE_INDIR_VAR_REG_SIZEx2: /* TYPE_INDIR_VAR_REG + scale */
case TYPE_INDIR_VAR_REG_SIZEx3x5: /* TYPE_INDIR_VAR_REG + scale */
{
reg_id_t reg = resolve_var_reg(di, opsize, true /*doesn't matter*/,
false /*!shrinkable*/
_IF_X64(true /*d64*/) _IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/));
opnd_size_t sz =
resolve_variable_size(di, indir_var_reg_size(di, optype), false /*not reg*/);
/* NOTE - needs to match size in opnd_type_ok() and instr_create.h */
*opnd = opnd_create_base_disp(
reg, REG_NULL, 0, indir_var_reg_offs_factor(optype) * opnd_size_in_bytes(sz),
sz);
return true;
}
case TYPE_INDIR_E:
/* how best mark as indirect?
* in current usage decode_modrm will be treated as indirect, becoming
* a base_disp operand, vs. an immed, which becomes a pc operand
* besides, Ap is just as indirect as i_Ep!
*/
return decode_operand(di, TYPE_E, opsize, opnd);
case TYPE_L: {
/* part of AVX: top 4 bits of 8-bit immed select xmm/ymm register */
ptr_int_t immed = get_immed(di, OPSZ_1);
reg_id_t reg = (reg_id_t)(immed & 0xf0) >> 4;
*opnd = opnd_create_reg(((TEST(PREFIX_VEX_L, di->prefixes) &&
/* see .LIG notes above */
expand_subreg_size(opsize) != OPSZ_16)
? REG_START_YMM
: REG_START_XMM) +
reg);
opnd_set_size(opnd, resolve_variable_size(di, opsize, true /*is reg*/));
return true;
}
case TYPE_H: {
/* part of AVX: vex.vvvv selects xmm/ymm register */
reg_id_t reg = (~di->vex_vvvv) & 0xf; /* bit-inverted */
*opnd = opnd_create_reg(((TEST(PREFIX_VEX_L, di->prefixes) &&
/* see .LIG notes above */
expand_subreg_size(opsize) != OPSZ_16)
? REG_START_YMM
: REG_START_XMM) +
reg);
opnd_set_size(opnd, resolve_variable_size(di, opsize, true /*is reg*/));
return true;
}
case TYPE_B: {
/* Part of XOP/AVX/AVX-512: vex.vvvv or evex.vvvv selects general-purpose
* register.
*/
if (di->evex_encoded)
*opnd = opnd_create_reg(decode_reg(DECODE_REG_EVEX, di, optype, opsize));
else
*opnd = opnd_create_reg(decode_reg(DECODE_REG_VEX, di, optype, opsize));
/* no need to set size as it's a GPR */
return true;
}
case TYPE_K_MODRM: {
/* part of AVX-512: modrm.rm selects opmask register or mem addr */
if (di->mod != 3) {
return decode_modrm(di, optype, opsize, NULL, opnd);
}
/* fall through*/
}
case TYPE_K_MODRM_R: {
/* part of AVX-512: modrm.rm selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_RM, di, optype, opsize));
return true;
}
case TYPE_K_REG: {
/* part of AVX-512: modrm.reg selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_REG, di, optype, opsize));
return true;
}
case TYPE_K_VEX: {
/* part of AVX-512: vex.vvvv selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_VEX, di, optype, opsize));
return true;
}
case TYPE_K_EVEX: {
/* part of AVX-512: evex.aaa selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_OPMASK, di, optype, opsize));
return true;
}
default:
/* ok to assert, types coming only from instr_info_t */
CLIENT_ASSERT(false, "decode error: unknown operand type");
}
return false;
}
dr_pred_type_t
decode_predicate_from_instr_info(uint opcode, const instr_info_t *info)
{
if (TESTANY(HAS_PRED_CC | HAS_PRED_COMPLEX, info->flags)) {
if (TEST(HAS_PRED_CC, info->flags))
return DR_PRED_O + instr_cmovcc_to_jcc(opcode) - OP_jo;
else
return DR_PRED_COMPLEX;
}
return DR_PRED_NONE;
}
/****************************************************************************
* Exported routines
*/
/* Decodes only enough of the instruction at address pc to determine
* its eflags usage, which is returned in usage as EFLAGS_ constants
* or'ed together.
* This corresponds to halfway between Level 1 and Level 2: a Level 1 decoding
* plus eflags information (usually only at Level 2).
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instruction.
*
* N.B.: an instruction that has an "undefined" effect on eflags is considered
* to write to eflags. This is fine since programs shouldn't be reading
* eflags after an undefined modification to them, but a weird program that
* relies on some undefined eflag thing might behave differently under dynamo
* than not!
*/
byte *
decode_eflags_usage(dcontext_t *dcontext, byte *pc, uint *usage,
dr_opnd_query_flags_t flags)
{
const instr_info_t *info;
decode_info_t di;
IF_X64(di.x86_mode = get_x86_mode(dcontext));
/* don't decode immeds, instead use decode_next_pc, it's faster */
read_instruction(pc, pc, &info, &di, true /* just opcode */ _IF_DEBUG(true));
*usage = instr_eflags_conditionally(
info->eflags, decode_predicate_from_instr_info(info->type, info), flags);
pc = decode_next_pc(dcontext, pc);
/* failure handled fine -- we'll go ahead and return the NULL */
return pc;
}
/* Decodes the opcode and eflags usage of instruction at address pc
* into instr.
* This corresponds to a Level 2 decoding.
* Assumes that instr is already initialized, but uses the x86/x64 mode
* for the current thread rather than that set in instr.
* If caller is re-using same instr struct over multiple decodings,
* should call instr_reset or instr_reuse.
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instruction.
*/
byte *
decode_opcode(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
const instr_info_t *info;
decode_info_t di;
int sz;
#ifdef X64
/* PR 251479: we need to know about all rip-relative addresses.
* Since change/setting raw bits invalidates, we must set this
* on every return. */
uint rip_rel_pos;
#endif
IF_X64(di.x86_mode = instr_get_x86_mode(instr));
/* when pass true to read_instruction it doesn't decode immeds,
* so have to call decode_next_pc, but that ends up being faster
* than decoding immeds!
*/
read_instruction(pc, pc, &info, &di,
true /* just opcode */
_IF_DEBUG(!TEST(INSTR_IGNORE_INVALID, instr->flags)));
sz = decode_sizeof(dcontext, pc, NULL _IF_X64(&rip_rel_pos));
IF_X64(instr_set_x86_mode(instr, get_x86_mode(dcontext)));
instr_set_opcode(instr, info->type);
/* read_instruction sets opcode to OP_INVALID for illegal instr.
* decode_sizeof will return 0 for _some_ illegal instrs, so we
* check it first since it's faster than instr_valid, but we have to
* also check instr_valid to catch all illegal instrs.
*/
if (sz == 0 || !instr_valid(instr)) {
CLIENT_ASSERT(!instr_valid(instr), "decode_opcode: invalid instr");
return NULL;
}
instr->eflags = info->eflags;
instr_set_eflags_valid(instr, true);
/* operands are NOT set */
instr_set_operands_valid(instr, false);
/* raw bits are valid though and crucial for encoding */
instr_set_raw_bits(instr, pc, sz);
/* must set rip_rel_pos after setting raw bits */
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return pc + sz;
}
#if defined(DEBUG) && !defined(STANDALONE_DECODER)
/* PR 215143: we must resolve variable sizes at decode time */
static bool
check_is_variable_size(opnd_t op)
{
if (opnd_is_memory_reference(op) ||
/* reg_get_size() fails on fp registers since no OPSZ for them */
(opnd_is_reg(op) && !reg_is_fp(opnd_get_reg(op))))
return !is_variable_size(opnd_get_size(op));
/* else no legitimate size to check */
return true;
}
#endif
/* Decodes the instruction at address pc into instr, filling in the
* instruction's opcode, eflags usage, prefixes, and operands.
* This corresponds to a Level 3 decoding.
* Assumes that instr is already initialized, but uses the x86/x64 mode
* for the current thread rather than that set in instr.
* If caller is re-using same instr struct over multiple decodings,
* should call instr_reset or instr_reuse.
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instruction.
*/
static byte *
decode_common(dcontext_t *dcontext, byte *pc, byte *orig_pc, instr_t *instr)
{
const instr_info_t *info;
decode_info_t di;
byte *next_pc;
int instr_num_dsts = 0, instr_num_srcs = 0;
opnd_t dsts[8];
opnd_t srcs[8];
CLIENT_ASSERT(instr->opcode == OP_INVALID || instr->opcode == OP_UNDECODED,
"decode: instr is already decoded, may need to call instr_reset()");
IF_X64(di.x86_mode = get_x86_mode(dcontext));
next_pc = read_instruction(pc, orig_pc, &info, &di,
false /* not just opcode,
decode operands too */
_IF_DEBUG(!TEST(INSTR_IGNORE_INVALID, instr->flags)));
instr_set_opcode(instr, info->type);
IF_X64(instr_set_x86_mode(instr, di.x86_mode));
/* failure up to this point handled fine -- we set opcode to OP_INVALID */
if (next_pc == NULL) {
LOG(THREAD, LOG_INTERP, 3, "decode: invalid instr at " PFX "\n", pc);
CLIENT_ASSERT(!instr_valid(instr), "decode: invalid instr");
return NULL;
}
instr->eflags = info->eflags;
instr_set_eflags_valid(instr, true);
/* since we don't use set_src/set_dst we must explicitly say they're valid */
instr_set_operands_valid(instr, true);
/* read_instruction doesn't set di.len since only needed for rip-relative opnds */
IF_X64(
CLIENT_ASSERT_TRUNCATE(di.len, int, next_pc - pc, "internal truncation error"));
di.len = (int)(next_pc - pc);
di.opcode = info->type; /* used for opnd_create_immed_float_for_opcode */
instr->prefixes |= di.prefixes;
/* operands */
do {
if (info->dst1_type != TYPE_NONE) {
if (!decode_operand(&di, info->dst1_type, info->dst1_size,
&(dsts[instr_num_dsts++])))
goto decode_invalid;
ASSERT(check_is_variable_size(dsts[instr_num_dsts - 1]));
}
if (info->dst2_type != TYPE_NONE) {
if (!decode_operand(&di, info->dst2_type, info->dst2_size,
&(dsts[instr_num_dsts++])))
goto decode_invalid;
ASSERT(check_is_variable_size(dsts[instr_num_dsts - 1]));
}
if (info->src1_type != TYPE_NONE) {
if (!decode_operand(&di, info->src1_type, info->src1_size,
&(srcs[instr_num_srcs++])))
goto decode_invalid;
ASSERT(check_is_variable_size(srcs[instr_num_srcs - 1]));
}
if (info->src2_type != TYPE_NONE) {
if (!decode_operand(&di, info->src2_type, info->src2_size,
&(srcs[instr_num_srcs++])))
goto decode_invalid;
ASSERT(check_is_variable_size(srcs[instr_num_srcs - 1]));
}
if (info->src3_type != TYPE_NONE) {
if (!decode_operand(&di, info->src3_type, info->src3_size,
&(srcs[instr_num_srcs++])))
goto decode_invalid;
ASSERT(check_is_variable_size(srcs[instr_num_srcs - 1]));
}
/* extra operands:
* we take advantage of the fact that all instructions that need extra
* operands have only one encoding, so the code field points to instr_info_t
* structures containing the extra operands
*/
if ((info->flags & HAS_EXTRA_OPERANDS) != 0) {
if ((info->flags & EXTRAS_IN_CODE_FIELD) != 0)
info = (const instr_info_t *)(info->code);
else /* extra operands are in next entry */
info = info + 1;
} else
break;
} while (true);
/* some operands add to di.prefixes so we copy again */
instr->prefixes |= di.prefixes;
if (di.seg_override == SEG_FS)
instr->prefixes |= PREFIX_SEG_FS;
if (di.seg_override == SEG_GS)
instr->prefixes |= PREFIX_SEG_GS;
/* now copy operands into their real slots */
instr_set_num_opnds(dcontext, instr, instr_num_dsts, instr_num_srcs);
if (instr_num_dsts > 0) {
memcpy(instr->dsts, dsts, instr_num_dsts * sizeof(opnd_t));
}
if (instr_num_srcs > 0) {
/* remember that src0 is static */
instr->src0 = srcs[0];
if (instr_num_srcs > 1) {
memcpy(instr->srcs, &(srcs[1]), (instr_num_srcs - 1) * sizeof(opnd_t));
}
}
if (TESTANY(HAS_PRED_CC | HAS_PRED_COMPLEX, info->flags))
instr_set_predicate(instr, decode_predicate_from_instr_info(di.opcode, info));
/* check for invalid prefixes that depend on operand types */
if (TEST(PREFIX_LOCK, di.prefixes)) {
/* check for invalid opcode, list on p3-397 of IA-32 vol 2 */
switch (instr_get_opcode(instr)) {
case OP_add:
case OP_adc:
case OP_and:
case OP_btc:
case OP_btr:
case OP_bts:
case OP_cmpxchg:
case OP_cmpxchg8b:
case OP_dec:
case OP_inc:
case OP_neg:
case OP_not:
case OP_or:
case OP_sbb:
case OP_sub:
case OP_xor:
case OP_xadd:
case OP_xchg: {
/* still illegal unless dest is mem op rather than src */
CLIENT_ASSERT(instr->num_dsts > 0, "internal lock prefix check error");
if (!opnd_is_memory_reference(instr->dsts[0])) {
LOG(THREAD, LOG_INTERP, 3, "decode: invalid lock prefix at " PFX "\n",
pc);
goto decode_invalid;
}
break;
}
default: {
LOG(THREAD, LOG_INTERP, 3, "decode: invalid lock prefix at " PFX "\n", pc);
goto decode_invalid;
}
}
}
/* PREFIX_XRELEASE is allowed w/o LOCK on mov_st, but use of it or PREFIX_XACQUIRE
* in other situations does not result in #UD so we ignore.
*/
if (orig_pc != pc) {
/* We do not want to copy when encoding and condone an invalid
* relative target
*/
instr_set_raw_bits_valid(instr, false);
instr_set_translation(instr, orig_pc);
} else {
/* we set raw bits AFTER setting all srcs and dsts b/c setting
* a src or dst marks instr as having invalid raw bits
*/
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(next_pc - pc)));
instr_set_raw_bits(instr, pc, (uint)(next_pc - pc));
#ifdef X64
if (X64_MODE(&di) && TEST(HAS_MODRM, info->flags) && di.mod == 0 && di.rm == 5) {
CLIENT_ASSERT(di.disp_abs > di.start_pc, "decode: internal rip-rel error");
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(di.disp_abs - di.start_pc),
"decode: internal rip-rel error");
/* must do this AFTER setting raw bits to avoid being invalidated */
instr_set_rip_rel_pos(instr, (int)(di.disp_abs - di.start_pc));
}
#endif
}
return next_pc;
decode_invalid:
instr_set_operands_valid(instr, false);
instr_set_opcode(instr, OP_INVALID);
return NULL;
}
byte *
decode(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
return decode_common(dcontext, pc, pc, instr);
}
byte *
decode_from_copy(dcontext_t *dcontext, byte *copy_pc, byte *orig_pc, instr_t *instr)
{
return decode_common(dcontext, copy_pc, orig_pc, instr);
}
const instr_info_t *
get_next_instr_info(const instr_info_t *info)
{
return (const instr_info_t *)(info->code);
}
byte
decode_first_opcode_byte(int opcode)
{
const instr_info_t *info = op_instr[opcode];
return (byte)((info->opcode & 0x00ff0000) >> 16);
}
DR_API
const char *
decode_opcode_name(int opcode)
{
const instr_info_t *info = op_instr[opcode];
return info->name;
}
const instr_info_t *
opcode_to_encoding_info(uint opc, dr_isa_mode_t isa_mode)
{
return op_instr[opc];
}
app_pc
dr_app_pc_as_jump_target(dr_isa_mode_t isa_mode, app_pc pc)
{
return pc;
}
app_pc
dr_app_pc_as_load_target(dr_isa_mode_t isa_mode, app_pc pc)
{
return pc;
}
#ifdef DEBUG
void
decode_debug_checks_arch(void)
{
/* empty */
}
#endif
#ifdef DECODE_UNIT_TEST
# include "instr_create.h"
/* FIXME: Tried putting this inside a separate unit-decode.c file, but
* required creating a unit-decode_table.c file. Since the
* infrastructure is not fully set up, currently leaving this here
* FIXME: beef up to check if something went wrong
*/
static bool
unit_check_decode_ff_opcode()
{
static int do_once = 0;
instr_t instr;
byte modrm, sib;
byte raw_bytes[] = {
0xff, 0x0, 0x0, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
0xff, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xfa,
};
app_pc next_pc = NULL;
for (modrm = 0x0; modrm < 0xff; modrm++) {
raw_bytes[1] = modrm;
for (sib = 0x0; sib < 0xff; sib++) {
raw_bytes[2] = sib;
/* set up instr for decode_opcode */
instr_init(GLOBAL_DCONTEXT, &instr);
instr.bytes = raw_bytes;
instr.length = 15;
instr_set_raw_bits_valid(&instr, true);
instr_set_operands_valid(&instr, false);
next_pc = decode_opcode(GLOBAL_DCONTEXT, instr.bytes, &instr);
if (next_pc != NULL && instr.opcode != OP_INVALID &&
instr.opcode != OP_UNDECODED) {
print_file(STDERR, "## %02x %02x %02x len=%d\n", instr.bytes[0],
instr.bytes[1], instr.bytes[2], instr.length);
}
}
}
return 0;
}
/* Standalone building is still broken so I tested this by calling
* from a real DR build.
*/
# define CHECK_ENCODE_OPCODE(dcontext, instr, pc, opc, ...) \
instr = INSTR_CREATE_##opc(dcontext, ##__VA_ARGS__); \
instr_encode(dcontext, instr, pc); \
instr_reset(dcontext, instr); \
decode(dcontext, pc, instr); \
/* FIXME: use EXPECT */ \
CLIENT_ASSERT(instr_get_opcode(instr) == OP_##opc, "unit test"); \
instr_destroy(dcontext, instr);
/* FIXME: case 8212: add checks for every single instr type */
static bool
unit_check_sse3()
{
dcontext_t *dcontext = get_thread_private_dcontext();
byte buf[32];
instr_t *instr;
CHECK_ENCODE_OPCODE(dcontext, instr, buf, mwait);
CHECK_ENCODE_OPCODE(dcontext, instr, buf, monitor);
CHECK_ENCODE_OPCODE(dcontext, instr, buf, haddpd, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, haddps, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, hsubpd, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, hsubps, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, addsubpd, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, addsubps, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, lddqu, opnd_create_reg(REG_XMM7),
opnd_create_base_disp(REG_NULL, REG_NULL, 0, 0, OPSZ_16));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, movsldup, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, movshdup, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, movddup, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
/* not sse3 but I fixed it at same time so here to test */
CHECK_ENCODE_OPCODE(dcontext, instr, buf, cmpxchg8b,
opnd_create_base_disp(REG_NULL, REG_NULL, 0, 0, OPSZ_8));
return true;
}
int
main()
{
bool res;
standalone_init();
res = unit_check_sse3();
res = unit_check_decode_ff_opcode() && res;
return res;
}
#endif /* DECODE_UNIT_TEST */
| 1 | 16,831 | Maybe add an assert in arch_init or somewhere? | DynamoRIO-dynamorio | c |
@@ -100,6 +100,7 @@ func TestcaseNetworkDelay(
err := cli.Create(ctx, networkDelay.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
+ // nolint
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 3 { | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package networkchaos
import (
"context"
"net/http"
"time"
. "github.com/onsi/ginkgo"
corev1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/e2e-test/e2e/util"
)
func TestcaseNetworkDelay(
ns string,
cli client.Client,
networkPeers []*corev1.Pod,
ports []uint16,
c http.Client,
) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
By("prepare experiment playground")
for index := range networkPeers {
err := util.WaitE2EHelperReady(c, ports[index])
framework.ExpectNoError(err, "wait e2e helper ready error")
}
result := probeNetworkCondition(c, networkPeers, ports, false)
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(len(result[networkConditionSlow]), 0)
var (
testDelayTcParam = v1alpha1.TcParameter{
Delay: &v1alpha1.DelaySpec{
Latency: "200ms",
Correlation: "25",
Jitter: "0ms",
},
}
testDelayTcParamEvenMoreComplicate = v1alpha1.TcParameter{
Delay: &v1alpha1.DelaySpec{
Latency: "200ms",
Correlation: "25",
Jitter: "0ms",
},
Loss: &v1alpha1.LossSpec{
Loss: "25",
Correlation: "25",
},
Duplicate: &v1alpha1.DuplicateSpec{
Duplicate: "25",
Correlation: "25",
},
Corrupt: &v1alpha1.CorruptSpec{
Corrupt: "25",
Correlation: "25",
},
}
testDelayDuration = pointer.StringPtr("9m")
testDelaySchedulerSpec = &v1alpha1.SchedulerSpec{
Cron: "@every 10m",
}
)
By("normal delay chaos")
networkDelay := makeNetworkDelayChaos(
ns, "network-chaos-1",
map[string]string{"app": "network-peer-0"},
nil, // no target specified
v1alpha1.OnePodMode,
v1alpha1.OnePodMode,
v1alpha1.To,
testDelayTcParam,
testDelayDuration,
testDelaySchedulerSpec,
)
By("Injecting delay for 0")
err := cli.Create(ctx, networkDelay.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 3 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 1}, {0, 2}, {0, 3}})
By("recover")
err = cli.Delete(ctx, networkDelay.DeepCopy())
framework.ExpectNoError(err, "delete network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 0 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(len(result[networkConditionSlow]), 0)
networkDelayWithTarget := makeNetworkDelayChaos(
ns, "network-chaos-1",
map[string]string{"app": "network-peer-0"},
map[string]string{"app": "network-peer-1"}, // 0 -> 1 add delays
v1alpha1.OnePodMode,
v1alpha1.OnePodMode,
v1alpha1.To,
testDelayTcParam,
testDelayDuration,
testDelaySchedulerSpec,
)
By("Injecting delay for 0 -> 1")
err = cli.Create(ctx, networkDelayWithTarget.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 1 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 1}})
err = cli.Delete(ctx, networkDelayWithTarget.DeepCopy())
framework.ExpectNoError(err, "delete network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 0 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(len(result[networkConditionSlow]), 0)
evenNetworkDelay := makeNetworkDelayChaos(
ns, "network-chaos-2",
map[string]string{"app": "network-peer-0"},
map[string]string{"partition": "0"}, // 0 -> even its partition (idx % 2)
v1alpha1.OnePodMode,
v1alpha1.AllPodMode,
v1alpha1.To,
testDelayTcParam,
testDelayDuration,
testDelaySchedulerSpec,
)
By("Injecting delay for 0 -> even partition")
err = cli.Create(ctx, evenNetworkDelay.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 1 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 2}})
By("Injecting delay for 0 -> 1")
err = cli.Create(ctx, networkDelayWithTarget.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 2 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 1}, {0, 2}})
err = cli.Delete(ctx, networkDelayWithTarget.DeepCopy())
framework.ExpectNoError(err, "delete network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 1 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 2}})
err = cli.Delete(ctx, evenNetworkDelay.DeepCopy())
framework.ExpectNoError(err, "delete network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 0 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(len(result[networkConditionSlow]), 0)
complicateNetem := makeNetworkDelayChaos(
ns, "network-chaos-3",
map[string]string{"app": "network-peer-0"},
nil, // no target specified
v1alpha1.OnePodMode,
v1alpha1.OnePodMode,
v1alpha1.To,
testDelayTcParamEvenMoreComplicate,
testDelayDuration,
testDelaySchedulerSpec,
)
By("Injecting complicate chaos for 0")
err = cli.Create(ctx, complicateNetem.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 3 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 1}, {0, 2}, {0, 3}})
By("recover")
err = cli.Delete(ctx, complicateNetem.DeepCopy())
framework.ExpectNoError(err, "delete network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, false)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 0 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(len(result[networkConditionSlow]), 0)
bothDirectionNetem := makeNetworkDelayChaos(
ns, "network-chaos-4",
map[string]string{"app": "network-peer-0"},
map[string]string{"partition": "0"}, // 0 -> even its partition (idx % 2)
v1alpha1.OnePodMode,
v1alpha1.AllPodMode,
v1alpha1.Both,
testDelayTcParam,
testDelayDuration,
testDelaySchedulerSpec,
)
By("Injecting both direction chaos for 0")
err = cli.Create(ctx, bothDirectionNetem.DeepCopy())
framework.ExpectNoError(err, "create network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, true)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 2 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(result[networkConditionSlow], [][]int{{0, 2}, {2, 0}})
By("recover")
err = cli.Delete(ctx, bothDirectionNetem.DeepCopy())
framework.ExpectNoError(err, "delete network chaos error")
wait.Poll(time.Second, 15*time.Second, func() (done bool, err error) {
result = probeNetworkCondition(c, networkPeers, ports, true)
if len(result[networkConditionBlocked]) != 0 || len(result[networkConditionSlow]) != 0 {
return false, nil
}
return true, nil
})
framework.ExpectEqual(len(result[networkConditionBlocked]), 0)
framework.ExpectEqual(len(result[networkConditionSlow]), 0)
}
| 1 | 21,522 | I wonder what does this `// nolint` ignore? And what are the standards that you choose which error to fix and which to ignore? | chaos-mesh-chaos-mesh | go |
@@ -441,7 +441,10 @@ public class HttpSolrCall {
} else {
if (!retry) {
// we couldn't find a core to work with, try reloading aliases & this collection
- cores.getZkController().getZkStateReader().aliasesManager.update();
+ if(!cores.getZkController().getZkStateReader().aliasesManager.update()) {
+ //no change. go back
+ return;
+ }
cores.getZkController().zkStateReader.forceUpdateCollection(collectionName);
action = RETRY;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.servlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.lang.invoke.MethodHandles;
import java.security.Principal;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import io.opentracing.Span;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.Header;
import org.apache.http.HeaderIterator;
import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpOptions;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.entity.InputStreamEntity;
import org.apache.solr.api.ApiBag;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.annotation.SolrThreadSafe;
import org.apache.solr.common.cloud.Aliases;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.MapSolrParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.CommandOperation;
import org.apache.solr.common.util.ContentStream;
import org.apache.solr.common.util.JsonSchemaValidator;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.common.util.Utils;
import org.apache.solr.common.util.ValidatingJsonMap;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.ContentStreamHandlerBase;
import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrQueryRequestBase;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.QueryResponseWriterUtil;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.security.AuditEvent;
import org.apache.solr.security.AuditEvent.EventType;
import org.apache.solr.security.AuthenticationPlugin;
import org.apache.solr.security.AuthorizationContext;
import org.apache.solr.security.AuthorizationContext.CollectionRequest;
import org.apache.solr.security.AuthorizationContext.RequestType;
import org.apache.solr.security.AuthorizationResponse;
import org.apache.solr.security.PublicKeyHandler;
import org.apache.solr.servlet.SolrDispatchFilter.Action;
import org.apache.solr.servlet.cache.HttpCacheHeaderUtil;
import org.apache.solr.servlet.cache.Method;
import org.apache.solr.update.processor.DistributingUpdateProcessorFactory;
import org.apache.solr.util.RTimerTree;
import org.apache.solr.util.TimeOut;
import org.apache.solr.util.tracing.GlobalTracer;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MarkerFactory;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.params.CollectionAdminParams.SYSTEM_COLL;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.RELOAD;
import static org.apache.solr.common.params.CommonParams.NAME;
import static org.apache.solr.common.params.CoreAdminParams.ACTION;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.ADMIN;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.FORWARD;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.PASSTHROUGH;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.PROCESS;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.REMOTEQUERY;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.RETRY;
import static org.apache.solr.servlet.SolrDispatchFilter.Action.RETURN;
/**
* This class represents a call made to Solr
**/
@SolrThreadSafe
public class HttpSolrCall {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String ORIGINAL_USER_PRINCIPAL_HEADER = "originalUserPrincipal";
public static final String INTERNAL_REQUEST_COUNT = "_forwardedCount";
public static final Random random;
static {
// We try to make things reproducible in the context of our tests by initializing the random instance
// based on the current seed
String seed = System.getProperty("tests.seed");
if (seed == null) {
random = new Random();
} else {
random = new Random(seed.hashCode());
}
}
protected final SolrDispatchFilter solrDispatchFilter;
protected final CoreContainer cores;
protected final HttpServletRequest req;
protected final HttpServletResponse response;
protected final boolean retry;
protected SolrCore core = null;
protected SolrQueryRequest solrReq = null;
private boolean mustClearSolrRequestInfo = false;
protected SolrRequestHandler handler = null;
protected final SolrParams queryParams;
protected String path;
protected Action action;
protected String coreUrl;
protected SolrConfig config;
protected Map<String, Integer> invalidStates;
//The states of client that is invalid in this request
protected String origCorename; // What's in the URL path; might reference a collection/alias or a Solr core name
protected List<String> collectionsList; // The list of SolrCloud collections if in SolrCloud (usually 1)
public RequestType getRequestType() {
return requestType;
}
protected RequestType requestType;
public HttpSolrCall(SolrDispatchFilter solrDispatchFilter, CoreContainer cores,
HttpServletRequest request, HttpServletResponse response, boolean retry) {
this.solrDispatchFilter = solrDispatchFilter;
this.cores = cores;
this.req = request;
this.response = response;
this.retry = retry;
this.requestType = RequestType.UNKNOWN;
req.setAttribute(HttpSolrCall.class.getName(), this);
queryParams = SolrRequestParsers.parseQueryString(req.getQueryString());
// set a request timer which can be reused by requests if needed
req.setAttribute(SolrRequestParsers.REQUEST_TIMER_SERVLET_ATTRIBUTE, new RTimerTree());
// put the core container in request attribute
req.setAttribute("org.apache.solr.CoreContainer", cores);
path = ServletUtils.getPathAfterContext(req);
}
public String getPath() {
return path;
}
public HttpServletRequest getReq() {
return req;
}
public SolrCore getCore() {
return core;
}
public SolrParams getQueryParams() {
return queryParams;
}
protected Aliases getAliases() {
return cores.isZooKeeperAware() ? cores.getZkController().getZkStateReader().getAliases() : Aliases.EMPTY;
}
/** The collection(s) referenced in this request. Populated in {@link #init()}. Not null. */
public List<String> getCollectionsList() {
return collectionsList != null ? collectionsList : Collections.emptyList();
}
protected void init() throws Exception {
// check for management path
String alternate = cores.getManagementPath();
if (alternate != null && path.startsWith(alternate)) {
path = path.substring(0, alternate.length());
}
// unused feature ?
int idx = path.indexOf(':');
if (idx > 0) {
// save the portion after the ':' for a 'handler' path parameter
path = path.substring(0, idx);
}
// Check for container handlers
handler = cores.getRequestHandler(path);
if (handler != null) {
solrReq = SolrRequestParsers.DEFAULT.parse(null, path, req);
solrReq.getContext().put(CoreContainer.class.getName(), cores);
requestType = RequestType.ADMIN;
action = ADMIN;
return;
}
// Parse a core or collection name from the path and attempt to see if it's a core name
idx = path.indexOf("/", 1);
if (idx > 1) {
origCorename = path.substring(1, idx);
// Try to resolve a Solr core name
core = cores.getCore(origCorename);
if (core != null) {
path = path.substring(idx);
} else {
if (cores.isCoreLoading(origCorename)) { // extra mem barriers, so don't look at this before trying to get core
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "SolrCore is loading");
}
// the core may have just finished loading
core = cores.getCore(origCorename);
if (core != null) {
path = path.substring(idx);
} else {
if (!cores.isZooKeeperAware()) {
core = cores.getCore("");
}
}
}
}
if (cores.isZooKeeperAware()) {
// init collectionList (usually one name but not when there are aliases)
String def = core != null ? core.getCoreDescriptor().getCollectionName() : origCorename;
collectionsList = resolveCollectionListOrAlias(queryParams.get(COLLECTION_PROP, def)); // &collection= takes precedence
if (core == null) {
// lookup core from collection, or route away if need to
String collectionName = collectionsList.isEmpty() ? null : collectionsList.get(0); // route to 1st
//TODO try the other collections if can't find a local replica of the first? (and do to V2HttpSolrCall)
boolean isPreferLeader = (path.endsWith("/update") || path.contains("/update/"));
core = getCoreByCollection(collectionName, isPreferLeader); // find a local replica/core for the collection
if (core != null) {
if (idx > 0) {
path = path.substring(idx);
}
} else {
// if we couldn't find it locally, look on other nodes
if (idx > 0) {
extractRemotePath(collectionName, origCorename);
if (action == REMOTEQUERY) {
path = path.substring(idx);
return;
}
}
//core is not available locally or remotely
autoCreateSystemColl(collectionName);
if (action != null) return;
}
}
}
// With a valid core...
if (core != null) {
config = core.getSolrConfig();
// get or create/cache the parser for the core
SolrRequestParsers parser = config.getRequestParsers();
// Determine the handler from the url path if not set
// (we might already have selected the cores handler)
extractHandlerFromURLPath(parser);
if (action != null) return;
// With a valid handler and a valid core...
if (handler != null) {
// if not a /select, create the request
if (solrReq == null) {
solrReq = parser.parse(core, path, req);
}
invalidStates = checkStateVersionsAreValid(solrReq.getParams().get(CloudSolrClient.STATE_VERSION));
addCollectionParamIfNeeded(getCollectionsList());
action = PROCESS;
return; // we are done with a valid handler
}
}
log.debug("no handler or core retrieved for {}, follow through...", path);
action = PASSTHROUGH;
}
protected void autoCreateSystemColl(String corename) throws Exception {
if (core == null &&
SYSTEM_COLL.equals(corename) &&
"POST".equals(req.getMethod()) &&
!cores.getZkController().getClusterState().hasCollection(SYSTEM_COLL)) {
log.info("Going to auto-create {} collection", SYSTEM_COLL);
SolrQueryResponse rsp = new SolrQueryResponse();
String repFactor = String.valueOf(Math.min(3, cores.getZkController().getClusterState().getLiveNodes().size()));
cores.getCollectionsHandler().handleRequestBody(new LocalSolrQueryRequest(null,
new ModifiableSolrParams()
.add(ACTION, CREATE.toString())
.add( NAME, SYSTEM_COLL)
.add(REPLICATION_FACTOR, repFactor)), rsp);
if (rsp.getValues().get("success") == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not auto-create " + SYSTEM_COLL + " collection: "+ Utils.toJSONString(rsp.getValues()));
}
TimeOut timeOut = new TimeOut(3, TimeUnit.SECONDS, TimeSource.NANO_TIME);
for (; ; ) {
if (cores.getZkController().getClusterState().getCollectionOrNull(SYSTEM_COLL) != null) {
break;
} else {
if (timeOut.hasTimedOut()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find " + SYSTEM_COLL + " collection even after 3 seconds");
}
timeOut.sleep(50);
}
}
action = RETRY;
}
}
/**
* Resolves the parameter as a potential comma delimited list of collections, and resolves aliases too.
* One level of aliases pointing to another alias is supported.
* De-duplicates and retains the order.
* {@link #getCollectionsList()}
*/
protected List<String> resolveCollectionListOrAlias(String collectionStr) {
if (collectionStr == null || collectionStr.trim().isEmpty()) {
return Collections.emptyList();
}
List<String> result = null;
LinkedHashSet<String> uniqueList = null;
Aliases aliases = getAliases();
List<String> inputCollections = StrUtils.splitSmart(collectionStr, ",", true);
if (inputCollections.size() > 1) {
uniqueList = new LinkedHashSet<>();
}
for (String inputCollection : inputCollections) {
List<String> resolvedCollections = aliases.resolveAliases(inputCollection);
if (uniqueList != null) {
uniqueList.addAll(resolvedCollections);
} else {
result = resolvedCollections;
}
}
if (uniqueList != null) {
return new ArrayList<>(uniqueList);
} else {
return result;
}
}
/**
* Extract handler from the URL path if not set.
*/
protected void extractHandlerFromURLPath(SolrRequestParsers parser) throws Exception {
if (handler == null && path.length() > 1) { // don't match "" or "/" as valid path
handler = core.getRequestHandler(path);
// no handler yet but <requestDispatcher> allows us to handle /select with a 'qt' param
if (handler == null && parser.isHandleSelect()) {
if ("/select".equals(path) || "/select/".equals(path)) {
solrReq = parser.parse(core, path, req);
String qt = solrReq.getParams().get(CommonParams.QT);
handler = core.getRequestHandler(qt);
if (handler == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "unknown handler: " + qt);
}
if (qt != null && qt.startsWith("/") && (handler instanceof ContentStreamHandlerBase)) {
//For security reasons it's a bad idea to allow a leading '/', ex: /select?qt=/update see SOLR-3161
//There was no restriction from Solr 1.4 thru 3.5 and it's not supported for update handlers.
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid Request Handler ('qt'). Do not use /select to access: " + qt);
}
}
}
}
}
protected void extractRemotePath(String collectionName, String origCorename) throws UnsupportedEncodingException, KeeperException, InterruptedException, SolrException {
assert core == null;
coreUrl = getRemoteCoreUrl(collectionName, origCorename);
// don't proxy for internal update requests
invalidStates = checkStateVersionsAreValid(queryParams.get(CloudSolrClient.STATE_VERSION));
if (coreUrl != null
&& queryParams.get(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM) == null) {
if (invalidStates != null) {
//it does not make sense to send the request to a remote node
throw new SolrException(SolrException.ErrorCode.INVALID_STATE, new String(Utils.toJSON(invalidStates), org.apache.lucene.util.IOUtils.UTF_8));
}
action = REMOTEQUERY;
} else {
if (!retry) {
// we couldn't find a core to work with, try reloading aliases & this collection
cores.getZkController().getZkStateReader().aliasesManager.update();
cores.getZkController().zkStateReader.forceUpdateCollection(collectionName);
action = RETRY;
}
}
}
Action authorize() throws IOException {
AuthorizationContext context = getAuthCtx();
log.debug("AuthorizationContext : {}", context);
AuthorizationResponse authResponse = cores.getAuthorizationPlugin().authorize(context);
int statusCode = authResponse.statusCode;
if (statusCode == AuthorizationResponse.PROMPT.statusCode) {
@SuppressWarnings({"unchecked"})
Map<String, String> headers = (Map) getReq().getAttribute(AuthenticationPlugin.class.getName());
if (headers != null) {
for (Map.Entry<String, String> e : headers.entrySet()) response.setHeader(e.getKey(), e.getValue());
}
if (log.isDebugEnabled()) {
log.debug("USER_REQUIRED {} {}", req.getHeader("Authorization"), req.getUserPrincipal());
}
sendError(statusCode,
"Authentication failed, Response code: " + statusCode);
if (shouldAudit(EventType.REJECTED)) {
cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.REJECTED, req, context));
}
return RETURN;
}
if (statusCode == AuthorizationResponse.FORBIDDEN.statusCode) {
if (log.isDebugEnabled()) {
log.debug("UNAUTHORIZED auth header {} context : {}, msg: {}", req.getHeader("Authorization"), context, authResponse.getMessage()); // nowarn
}
sendError(statusCode,
"Unauthorized request, Response code: " + statusCode);
if (shouldAudit(EventType.UNAUTHORIZED)) {
cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.UNAUTHORIZED, req, context));
}
return RETURN;
}
if (!(statusCode == HttpStatus.SC_ACCEPTED) && !(statusCode == HttpStatus.SC_OK)) {
log.warn("ERROR {} during authentication: {}", statusCode, authResponse.getMessage()); // nowarn
sendError(statusCode,
"ERROR during authorization, Response code: " + statusCode);
if (shouldAudit(EventType.ERROR)) {
cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.ERROR, req, context));
}
return RETURN;
}
if (shouldAudit(EventType.AUTHORIZED)) {
cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.AUTHORIZED, req, context));
}
return null;
}
/**
* This method processes the request.
*/
public Action call() throws IOException {
MDCLoggingContext.reset();
Span activeSpan = GlobalTracer.getTracer().activeSpan();
if (activeSpan != null) {
MDCLoggingContext.setTracerId(activeSpan.context().toTraceId());
}
MDCLoggingContext.setNode(cores);
if (cores == null) {
sendError(503, "Server is shutting down or failed to initialize");
return RETURN;
}
if (solrDispatchFilter.abortErrorMessage != null) {
sendError(500, solrDispatchFilter.abortErrorMessage);
if (shouldAudit(EventType.ERROR)) {
cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.ERROR, getReq()));
}
return RETURN;
}
try {
init();
// Perform authorization here, if:
// (a) Authorization is enabled, and
// (b) The requested resource is not a known static file
// (c) And this request should be handled by this node (see NOTE below)
// NOTE: If the query is to be handled by another node, then let that node do the authorization.
// In case of authentication using BasicAuthPlugin, for example, the internode request
// is secured using PKI authentication and the internode request here will contain the
// original user principal as a payload/header, using which the receiving node should be
// able to perform the authorization.
if (cores.getAuthorizationPlugin() != null && shouldAuthorize()
&& !(action == REMOTEQUERY || action == FORWARD)) {
Action authorizationAction = authorize();
if (authorizationAction != null) return authorizationAction;
}
HttpServletResponse resp = response;
switch (action) {
case ADMIN:
handleAdminRequest();
return RETURN;
case REMOTEQUERY:
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, new SolrQueryResponse(), action));
mustClearSolrRequestInfo = true;
remoteQuery(coreUrl + path, resp);
return RETURN;
case PROCESS:
final Method reqMethod = Method.getMethod(req.getMethod());
HttpCacheHeaderUtil.setCacheControlHeader(config, resp, reqMethod);
// unless we have been explicitly told not to, do cache validation
// if we fail cache validation, execute the query
if (config.getHttpCachingConfig().isNever304() ||
!HttpCacheHeaderUtil.doCacheHeaderValidation(solrReq, req, reqMethod, resp)) {
SolrQueryResponse solrRsp = new SolrQueryResponse();
/* even for HEAD requests, we need to execute the handler to
* ensure we don't get an error (and to make sure the correct
* QueryResponseWriter is selected and we get the correct
* Content-Type)
*/
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(solrReq, solrRsp, action));
mustClearSolrRequestInfo = true;
execute(solrRsp);
if (shouldAudit()) {
EventType eventType = solrRsp.getException() == null ? EventType.COMPLETED : EventType.ERROR;
if (shouldAudit(eventType)) {
cores.getAuditLoggerPlugin().doAudit(
new AuditEvent(eventType, req, getAuthCtx(), solrReq.getRequestTimer().getTime(), solrRsp.getException()));
}
}
HttpCacheHeaderUtil.checkHttpCachingVeto(solrRsp, resp, reqMethod);
Iterator<Map.Entry<String, String>> headers = solrRsp.httpHeaders();
while (headers.hasNext()) {
Map.Entry<String, String> entry = headers.next();
resp.addHeader(entry.getKey(), entry.getValue());
}
QueryResponseWriter responseWriter = getResponseWriter();
if (invalidStates != null) solrReq.getContext().put(CloudSolrClient.STATE_VERSION, invalidStates);
writeResponse(solrRsp, responseWriter, reqMethod);
}
return RETURN;
default: return action;
}
} catch (Throwable ex) {
if (shouldAudit(EventType.ERROR)) {
cores.getAuditLoggerPlugin().doAudit(new AuditEvent(EventType.ERROR, ex, req));
}
sendError(ex);
// walk the the entire cause chain to search for an Error
Throwable t = ex;
while (t != null) {
if (t instanceof Error) {
if (t != ex) {
log.error("An Error was wrapped in another exception - please report complete stacktrace on SOLR-6161", ex);
}
throw (Error) t;
}
t = t.getCause();
}
return RETURN;
}
}
private boolean shouldAudit() {
return cores.getAuditLoggerPlugin() != null;
}
private boolean shouldAudit(AuditEvent.EventType eventType) {
return shouldAudit() && cores.getAuditLoggerPlugin().shouldLog(eventType);
}
private boolean shouldAuthorize() {
if(PublicKeyHandler.PATH.equals(path)) return false;
//admin/info/key is the path where public key is exposed . it is always unsecured
if ("/".equals(path) || "/solr/".equals(path)) return false; // Static Admin UI files must always be served
if (cores.getPkiAuthenticationPlugin() != null && req.getUserPrincipal() != null) {
boolean b = cores.getPkiAuthenticationPlugin().needsAuthorization(req);
log.debug("PkiAuthenticationPlugin says authorization required : {} ", b);
return b;
}
return true;
}
void destroy() {
try {
if (solrReq != null) {
log.debug("Closing out SolrRequest: {}", solrReq);
solrReq.close();
}
} finally {
try {
if (core != null) core.close();
} finally {
if (mustClearSolrRequestInfo) {
SolrRequestInfo.clearRequestInfo();
}
}
AuthenticationPlugin authcPlugin = cores.getAuthenticationPlugin();
if (authcPlugin != null) authcPlugin.closeRequest();
}
}
//TODO using Http2Client
private void remoteQuery(String coreUrl, HttpServletResponse resp) throws IOException {
HttpRequestBase method;
HttpEntity httpEntity = null;
ModifiableSolrParams updatedQueryParams = new ModifiableSolrParams(queryParams);
int forwardCount = queryParams.getInt(INTERNAL_REQUEST_COUNT, 0) + 1;
updatedQueryParams.set(INTERNAL_REQUEST_COUNT, forwardCount);
String queryStr = updatedQueryParams.toQueryString();
try {
String urlstr = coreUrl + queryStr;
boolean isPostOrPutRequest = "POST".equals(req.getMethod()) || "PUT".equals(req.getMethod());
if ("GET".equals(req.getMethod())) {
method = new HttpGet(urlstr);
} else if ("HEAD".equals(req.getMethod())) {
method = new HttpHead(urlstr);
} else if (isPostOrPutRequest) {
HttpEntityEnclosingRequestBase entityRequest =
"POST".equals(req.getMethod()) ? new HttpPost(urlstr) : new HttpPut(urlstr);
InputStream in = req.getInputStream();
HttpEntity entity = new InputStreamEntity(in, req.getContentLength());
entityRequest.setEntity(entity);
method = entityRequest;
} else if ("DELETE".equals(req.getMethod())) {
method = new HttpDelete(urlstr);
} else if ("OPTIONS".equals(req.getMethod())) {
method = new HttpOptions(urlstr);
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Unexpected method type: " + req.getMethod());
}
for (Enumeration<String> e = req.getHeaderNames(); e.hasMoreElements(); ) {
String headerName = e.nextElement();
if (!"host".equalsIgnoreCase(headerName)
&& !"authorization".equalsIgnoreCase(headerName)
&& !"accept".equalsIgnoreCase(headerName)) {
method.addHeader(headerName, req.getHeader(headerName));
}
}
// These headers not supported for HttpEntityEnclosingRequests
if (method instanceof HttpEntityEnclosingRequest) {
method.removeHeaders(TRANSFER_ENCODING_HEADER);
method.removeHeaders(CONTENT_LENGTH_HEADER);
}
final HttpResponse response
= solrDispatchFilter.httpClient.execute(method, HttpClientUtil.createNewHttpClientRequestContext());
int httpStatus = response.getStatusLine().getStatusCode();
httpEntity = response.getEntity();
resp.setStatus(httpStatus);
for (HeaderIterator responseHeaders = response.headerIterator(); responseHeaders.hasNext(); ) {
Header header = responseHeaders.nextHeader();
// We pull out these two headers below because they can cause chunked
// encoding issues with Tomcat
if (header != null && !header.getName().equalsIgnoreCase(TRANSFER_ENCODING_HEADER)
&& !header.getName().equalsIgnoreCase(CONNECTION_HEADER)) {
// NOTE: explicitly using 'setHeader' instead of 'addHeader' so that
// the remote nodes values for any response headers will overide any that
// may have already been set locally (ex: by the local jetty's RewriteHandler config)
resp.setHeader(header.getName(), header.getValue());
}
}
if (httpEntity != null) {
if (httpEntity.getContentEncoding() != null)
resp.setHeader(httpEntity.getContentEncoding().getName(), httpEntity.getContentEncoding().getValue());
if (httpEntity.getContentType() != null) resp.setContentType(httpEntity.getContentType().getValue());
InputStream is = httpEntity.getContent();
OutputStream os = resp.getOutputStream();
IOUtils.copyLarge(is, os);
}
} catch (IOException e) {
sendError(new SolrException(
SolrException.ErrorCode.SERVER_ERROR,
"Error trying to proxy request for url: " + coreUrl + " with _forwardCount: " + forwardCount, e));
} finally {
Utils.consumeFully(httpEntity);
}
}
protected void sendError(Throwable ex) throws IOException {
Exception exp = null;
SolrCore localCore = null;
try {
SolrQueryResponse solrResp = new SolrQueryResponse();
if (ex instanceof Exception) {
solrResp.setException((Exception) ex);
} else {
solrResp.setException(new RuntimeException(ex));
}
localCore = core;
if (solrReq == null) {
final SolrParams solrParams;
if (req != null) {
// use GET parameters if available:
solrParams = SolrRequestParsers.parseQueryString(req.getQueryString());
} else {
// we have no params at all, use empty ones:
solrParams = new MapSolrParams(Collections.emptyMap());
}
solrReq = new SolrQueryRequestBase(core, solrParams) {
};
}
QueryResponseWriter writer = getResponseWriter();
writeResponse(solrResp, writer, Method.GET);
} catch (Exception e) { // This error really does not matter
exp = e;
} finally {
try {
if (exp != null) {
@SuppressWarnings({"rawtypes"})
SimpleOrderedMap info = new SimpleOrderedMap();
int code = ResponseUtils.getErrorInfo(ex, info, log);
sendError(code, info.toString());
}
} finally {
if (core == null && localCore != null) {
localCore.close();
}
}
}
}
protected void sendError(int code, String message) throws IOException {
try {
response.sendError(code, message);
} catch (EOFException e) {
log.info("Unable to write error response, client closed connection or we are shutting down", e);
}
}
protected void execute(SolrQueryResponse rsp) {
// a custom filter could add more stuff to the request before passing it on.
// for example: sreq.getContext().put( "HttpServletRequest", req );
// used for logging query stats in SolrCore.execute()
solrReq.getContext().put("webapp", req.getContextPath());
solrReq.getCore().execute(handler, solrReq, rsp);
}
private void handleAdminRequest() throws IOException {
SolrQueryResponse solrResp = new SolrQueryResponse();
SolrCore.preDecorateResponse(solrReq, solrResp);
handleAdmin(solrResp);
SolrCore.postDecorateResponse(handler, solrReq, solrResp);
if (solrResp.getToLog().size() > 0) {
if (log.isInfoEnabled()) { // has to come second and in it's own if to keep ./gradlew check happy.
log.info(handler != null ? MarkerFactory.getMarker(handler.getClass().getName()) : MarkerFactory.getMarker(HttpSolrCall.class.getName()), solrResp.getToLogAsString("[admin]"));
}
}
QueryResponseWriter respWriter = SolrCore.DEFAULT_RESPONSE_WRITERS.get(solrReq.getParams().get(CommonParams.WT));
if (respWriter == null) respWriter = getResponseWriter();
writeResponse(solrResp, respWriter, Method.getMethod(req.getMethod()));
if (shouldAudit()) {
EventType eventType = solrResp.getException() == null ? EventType.COMPLETED : EventType.ERROR;
if (shouldAudit(eventType)) {
cores.getAuditLoggerPlugin().doAudit(
new AuditEvent(eventType, req, getAuthCtx(), solrReq.getRequestTimer().getTime(), solrResp.getException()));
}
}
}
/**
* Returns {@link QueryResponseWriter} to be used.
* When {@link CommonParams#WT} not specified in the request or specified value doesn't have
* corresponding {@link QueryResponseWriter} then, returns the default query response writer
* Note: This method must not return null
*/
protected QueryResponseWriter getResponseWriter() {
String wt = solrReq.getParams().get(CommonParams.WT);
if (core != null) {
return core.getQueryResponseWriter(wt);
} else {
return SolrCore.DEFAULT_RESPONSE_WRITERS.getOrDefault(wt,
SolrCore.DEFAULT_RESPONSE_WRITERS.get("standard"));
}
}
protected void handleAdmin(SolrQueryResponse solrResp) {
handler.handleRequest(solrReq, solrResp);
}
/**
* Sets the "collection" parameter on the request to the list of alias-resolved collections for this request.
* It can be avoided sometimes.
* Note: {@link org.apache.solr.handler.component.HttpShardHandler} processes this param.
* @see #getCollectionsList()
*/
protected void addCollectionParamIfNeeded(List<String> collections) {
if (collections.isEmpty()) {
return;
}
assert cores.isZooKeeperAware();
String collectionParam = queryParams.get(COLLECTION_PROP);
// if there is no existing collection param and the core we go to is for the expected collection,
// then we needn't add a collection param
if (collectionParam == null && // if collection param already exists, we may need to over-write it
core != null && collections.equals(Collections.singletonList(core.getCoreDescriptor().getCollectionName()))) {
return;
}
String newCollectionParam = StrUtils.join(collections, ',');
if (newCollectionParam.equals(collectionParam)) {
return;
}
// TODO add a SolrRequest.getModifiableParams ?
ModifiableSolrParams params = new ModifiableSolrParams(solrReq.getParams());
params.set(COLLECTION_PROP, newCollectionParam);
solrReq.setParams(params);
}
private void writeResponse(SolrQueryResponse solrRsp, QueryResponseWriter responseWriter, Method reqMethod)
throws IOException {
try {
Object invalidStates = solrReq.getContext().get(CloudSolrClient.STATE_VERSION);
//This is the last item added to the response and the client would expect it that way.
//If that assumption is changed , it would fail. This is done to avoid an O(n) scan on
// the response for each request
if (invalidStates != null) solrRsp.add(CloudSolrClient.STATE_VERSION, invalidStates);
// Now write it out
final String ct = responseWriter.getContentType(solrReq, solrRsp);
// don't call setContentType on null
if (null != ct) response.setContentType(ct);
if (solrRsp.getException() != null) {
@SuppressWarnings({"rawtypes"})
NamedList info = new SimpleOrderedMap();
int code = ResponseUtils.getErrorInfo(solrRsp.getException(), info, log);
solrRsp.add("error", info);
response.setStatus(code);
}
if (Method.HEAD != reqMethod) {
OutputStream out = response.getOutputStream();
QueryResponseWriterUtil.writeQueryResponse(out, responseWriter, solrReq, solrRsp, ct);
}
//else http HEAD request, nothing to write out, waited this long just to get ContentType
} catch (EOFException e) {
log.info("Unable to write response, client closed connection or we are shutting down", e);
}
}
/** Returns null if the state ({@link CloudSolrClient#STATE_VERSION}) is good; otherwise returns state problems. */
private Map<String, Integer> checkStateVersionsAreValid(String stateVer) {
Map<String, Integer> result = null;
String[] pairs;
if (stateVer != null && !stateVer.isEmpty() && cores.isZooKeeperAware()) {
// many have multiple collections separated by |
pairs = StringUtils.split(stateVer, '|');
for (String pair : pairs) {
String[] pcs = StringUtils.split(pair, ':');
if (pcs.length == 2 && !pcs[0].isEmpty() && !pcs[1].isEmpty()) {
Integer status = cores.getZkController().getZkStateReader().compareStateVersions(pcs[0], Integer.parseInt(pcs[1]));
if (status != null) {
if (result == null) result = new HashMap<>();
result.put(pcs[0], status);
}
}
}
}
return result;
}
protected SolrCore getCoreByCollection(String collectionName, boolean isPreferLeader) {
ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection = clusterState.getCollectionOrNull(collectionName, true);
if (collection == null) {
return null;
}
Set<String> liveNodes = clusterState.getLiveNodes();
if (isPreferLeader) {
List<Replica> leaderReplicas = collection.getLeaderReplicas(cores.getZkController().getNodeName());
SolrCore core = randomlyGetSolrCore(liveNodes, leaderReplicas);
if (core != null) return core;
}
List<Replica> replicas = collection.getReplicas(cores.getZkController().getNodeName());
return randomlyGetSolrCore(liveNodes, replicas);
}
private SolrCore randomlyGetSolrCore(Set<String> liveNodes, List<Replica> replicas) {
if (replicas != null) {
RandomIterator<Replica> it = new RandomIterator<>(random, replicas);
while (it.hasNext()) {
Replica replica = it.next();
if (liveNodes.contains(replica.getNodeName()) && replica.getState() == Replica.State.ACTIVE) {
SolrCore core = checkProps(replica);
if (core != null) return core;
}
}
}
return null;
}
private SolrCore checkProps(ZkNodeProps zkProps) {
String corename;
SolrCore core = null;
if (cores.getZkController().getNodeName().equals(zkProps.getStr(NODE_NAME_PROP))) {
corename = zkProps.getStr(CORE_NAME_PROP);
core = cores.getCore(corename);
}
return core;
}
private void getSlicesForCollections(ClusterState clusterState,
Collection<Slice> slices, boolean activeSlices) {
if (activeSlices) {
for (Map.Entry<String, DocCollection> entry : clusterState.getCollectionsMap().entrySet()) {
final Slice[] activeCollectionSlices = entry.getValue().getActiveSlicesArr();
if (activeCollectionSlices != null) {
Collections.addAll(slices, activeCollectionSlices);
}
}
} else {
for (Map.Entry<String, DocCollection> entry : clusterState.getCollectionsMap().entrySet()) {
final Collection<Slice> collectionSlices = entry.getValue().getSlices();
if (collectionSlices != null) {
slices.addAll(collectionSlices);
}
}
}
}
protected String getRemoteCoreUrl(String collectionName, String origCorename) throws SolrException {
ClusterState clusterState = cores.getZkController().getClusterState();
final DocCollection docCollection = clusterState.getCollectionOrNull(collectionName);
Slice[] slices = (docCollection != null) ? docCollection.getActiveSlicesArr() : null;
List<Slice> activeSlices = new ArrayList<>();
boolean byCoreName = false;
int totalReplicas = 0;
if (slices == null) {
byCoreName = true;
activeSlices = new ArrayList<>();
getSlicesForCollections(clusterState, activeSlices, true);
if (activeSlices.isEmpty()) {
getSlicesForCollections(clusterState, activeSlices, false);
}
} else {
Collections.addAll(activeSlices, slices);
}
for (Slice s: activeSlices) {
totalReplicas += s.getReplicas().size();
}
if (activeSlices.isEmpty()) {
return null;
}
// XXX (ab) most likely this is not needed? it seems all code paths
// XXX already make sure the collectionName is on the list
if (!collectionsList.contains(collectionName)) {
collectionsList = new ArrayList<>(collectionsList);
collectionsList.add(collectionName);
}
// Avoid getting into a recursive loop of requests being forwarded by
// stopping forwarding and erroring out after (totalReplicas) forwards
if (queryParams.getInt(INTERNAL_REQUEST_COUNT, 0) > totalReplicas){
throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
"No active replicas found for collection: " + collectionName);
}
String coreUrl = getCoreUrl(collectionName, origCorename, clusterState,
activeSlices, byCoreName, true);
if (coreUrl == null) {
coreUrl = getCoreUrl(collectionName, origCorename, clusterState,
activeSlices, byCoreName, false);
}
return coreUrl;
}
private String getCoreUrl(String collectionName,
String origCorename, ClusterState clusterState, List<Slice> slices,
boolean byCoreName, boolean activeReplicas) {
String coreUrl;
Set<String> liveNodes = clusterState.getLiveNodes();
Collections.shuffle(slices, random);
for (Slice slice : slices) {
List<Replica> randomizedReplicas = new ArrayList<>(slice.getReplicas());
Collections.shuffle(randomizedReplicas, random);
for (Replica replica : randomizedReplicas) {
if (!activeReplicas || (liveNodes.contains(replica.getNodeName())
&& replica.getState() == Replica.State.ACTIVE)) {
if (byCoreName && !origCorename.equals(replica.getStr(CORE_NAME_PROP))) {
// if it's by core name, make sure they match
continue;
}
if (replica.getBaseUrl().equals(cores.getZkController().getBaseUrl())) {
// don't count a local core
continue;
}
if (origCorename != null) {
coreUrl = replica.getBaseUrl() + "/" + origCorename;
} else {
coreUrl = replica.getCoreUrl();
if (coreUrl.endsWith("/")) {
coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
}
}
return coreUrl;
}
}
}
return null;
}
protected Object _getHandler(){
return handler;
}
private AuthorizationContext getAuthCtx() {
String resource = getPath();
SolrParams params = getQueryParams();
final ArrayList<CollectionRequest> collectionRequests = new ArrayList<>();
for (String collection : getCollectionsList()) {
collectionRequests.add(new CollectionRequest(collection));
}
// Extract collection name from the params in case of a Collection Admin request
if (getPath().equals("/admin/collections")) {
if (CREATE.isEqual(params.get("action"))||
RELOAD.isEqual(params.get("action"))||
DELETE.isEqual(params.get("action")))
collectionRequests.add(new CollectionRequest(params.get("name")));
else if (params.get(COLLECTION_PROP) != null)
collectionRequests.add(new CollectionRequest(params.get(COLLECTION_PROP)));
}
// Populate the request type if the request is select or update
if(requestType == RequestType.UNKNOWN) {
if(resource.startsWith("/select") || resource.startsWith("/get"))
requestType = RequestType.READ;
if(resource.startsWith("/update"))
requestType = RequestType.WRITE;
}
return new AuthorizationContext() {
@Override
public SolrParams getParams() {
return null == solrReq ? null : solrReq.getParams();
}
@Override
public Principal getUserPrincipal() {
return getReq().getUserPrincipal();
}
@Override
public String getHttpHeader(String s) {
return getReq().getHeader(s);
}
@Override
public Enumeration<String> getHeaderNames() {
return getReq().getHeaderNames();
}
@Override
public List<CollectionRequest> getCollectionRequests() {
return collectionRequests;
}
@Override
public RequestType getRequestType() {
return requestType;
}
public String getResource() {
return path;
}
@Override
public String getHttpMethod() {
return getReq().getMethod();
}
@Override
public Object getHandler() {
return _getHandler();
}
@Override
public String toString() {
StringBuilder response = new StringBuilder("userPrincipal: [").append(getUserPrincipal()).append("]")
.append(" type: [").append(requestType.toString()).append("], collections: [");
for (CollectionRequest collectionRequest : collectionRequests) {
response.append(collectionRequest.collectionName).append(", ");
}
if(collectionRequests.size() > 0)
response.delete(response.length() - 1, response.length());
response.append("], Path: [").append(resource).append("]");
response.append(" path : ").append(path).append(" params :").append(getParams());
return response.toString();
}
@Override
public String getRemoteAddr() {
return getReq().getRemoteAddr();
}
@Override
public String getRemoteHost() {
return getReq().getRemoteHost();
}
};
}
static final String CONNECTION_HEADER = "Connection";
static final String TRANSFER_ENCODING_HEADER = "Transfer-Encoding";
static final String CONTENT_LENGTH_HEADER = "Content-Length";
List<CommandOperation> parsedCommands;
public List<CommandOperation> getCommands(boolean validateInput) {
if (parsedCommands == null) {
Iterable<ContentStream> contentStreams = solrReq.getContentStreams();
if (contentStreams == null) parsedCommands = Collections.emptyList();
else {
parsedCommands = ApiBag.getCommandOperations(contentStreams.iterator().next(), getValidators(), validateInput);
}
}
return CommandOperation.clone(parsedCommands);
}
protected ValidatingJsonMap getSpec() {
return null;
}
@SuppressWarnings({"unchecked"})
protected Map<String, JsonSchemaValidator> getValidators(){
return Collections.EMPTY_MAP;
}
/**
* A faster method for randomly picking items when you do not need to
* consume all items.
*/
private static class RandomIterator<E> implements Iterator<E> {
private Random rand;
private ArrayList<E> elements;
private int size;
public RandomIterator(Random rand, Collection<E> elements) {
this.rand = rand;
this.elements = new ArrayList<>(elements);
this.size = elements.size();
}
@Override
public boolean hasNext() {
return size > 0;
}
@Override
public E next() {
int idx = rand.nextInt(size);
E e1 = elements.get(idx);
E e2 = elements.get(size-1);
elements.set(idx,e2);
size--;
return e1;
}
}
}
| 1 | 40,990 | what about the line below, forceUpdateCollection? | apache-lucene-solr | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.